From: Jonathan Cameron <jonathan.cameron@huawei.com>
Once a read or write reaches a CXL type 3 device, the HDM decoders
on the device are used to establish the Device Physical Address
which should be accessed. These functions peform the required maths
and then directly access the hostmem->mr to fullfil the actual
operation. Note that failed writes are silent, but failed reads
return poison. Note this is based loosely on:
https://lore.kernel.org/qemu-devel/20200817161853.593247-6-f4bug@amsat.org/
[RFC PATCH 0/9] hw/misc: Add support for interleaved memory accesses
Only lightly tested so far. More complex test cases yet to be written.
Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
---
hw/mem/cxl_type3.c | 81 +++++++++++++++++++++++++++++++++++++
include/hw/cxl/cxl_device.h | 5 +++
2 files changed, 86 insertions(+)
diff --git a/hw/mem/cxl_type3.c b/hw/mem/cxl_type3.c
index b1ba4bf0de..064e8c942c 100644
--- a/hw/mem/cxl_type3.c
+++ b/hw/mem/cxl_type3.c
@@ -161,6 +161,87 @@ static void ct3_realize(PCIDevice *pci_dev, Error **errp)
&ct3d->cxl_dstate.device_registers);
}
+/* TODO: Support multiple HDM decoders and DPA skip */
+static bool cxl_type3_dpa(CXLType3Dev *ct3d, hwaddr host_addr, uint64_t *dpa)
+{
+ uint32_t *cache_mem = ct3d->cxl_cstate.crb.cache_mem_registers;
+ uint64_t decoder_base, decoder_size, hpa_offset;
+ uint32_t hdm0_ctrl;
+ int ig, iw;
+
+ decoder_base = (((uint64_t)cache_mem[R_CXL_HDM_DECODER0_BASE_HI] << 32) |
+ cache_mem[R_CXL_HDM_DECODER0_BASE_LO]);
+ if ((uint64_t)host_addr < decoder_base) {
+ return false;
+ }
+
+ hpa_offset = (uint64_t)host_addr - decoder_base;
+
+ decoder_size = ((uint64_t)cache_mem[R_CXL_HDM_DECODER0_SIZE_HI] << 32) |
+ cache_mem[R_CXL_HDM_DECODER0_SIZE_LO];
+ if (hpa_offset >= decoder_size) {
+ return false;
+ }
+
+ hdm0_ctrl = cache_mem[R_CXL_HDM_DECODER0_CTRL];
+ iw = FIELD_EX32(hdm0_ctrl, CXL_HDM_DECODER0_CTRL, IW);
+ ig = FIELD_EX32(hdm0_ctrl, CXL_HDM_DECODER0_CTRL, IG);
+
+ *dpa = (MAKE_64BIT_MASK(0, 8 + ig) & hpa_offset) |
+ ((MAKE_64BIT_MASK(8 + ig + iw, 64 - 8 - ig - iw) & hpa_offset) >> iw);
+
+ return true;
+}
+
+MemTxResult cxl_type3_read(PCIDevice *d, hwaddr host_addr, uint64_t *data,
+ unsigned size, MemTxAttrs attrs)
+{
+ CXLType3Dev *ct3d = CT3(d);
+ uint64_t dpa_offset;
+ MemoryRegion *mr;
+
+ /* TODO support volatile region */
+ mr = host_memory_backend_get_memory(ct3d->hostmem);
+ if (!mr) {
+ return MEMTX_ERROR;
+ }
+
+ if (!cxl_type3_dpa(ct3d, host_addr, &dpa_offset)) {
+ return MEMTX_ERROR;
+ }
+
+ if (dpa_offset > int128_get64(mr->size)) {
+ return MEMTX_ERROR;
+ }
+
+ return memory_region_dispatch_read(mr, dpa_offset, data,
+ size_memop(size), attrs);
+}
+
+MemTxResult cxl_type3_write(PCIDevice *d, hwaddr host_addr, uint64_t data,
+ unsigned size, MemTxAttrs attrs)
+{
+ CXLType3Dev *ct3d = CT3(d);
+ uint64_t dpa_offset;
+ MemoryRegion *mr;
+
+ mr = host_memory_backend_get_memory(ct3d->hostmem);
+ if (!mr) {
+ return MEMTX_OK;
+ }
+
+ if (!cxl_type3_dpa(ct3d, host_addr, &dpa_offset)) {
+ return MEMTX_OK;
+ }
+
+ if (dpa_offset > int128_get64(mr->size)) {
+ return MEMTX_OK;
+ }
+
+ return memory_region_dispatch_write(mr, dpa_offset, data,
+ size_memop(size), attrs);
+}
+
static void ct3d_reset(DeviceState *dev)
{
CXLType3Dev *ct3d = CT3(dev);
diff --git a/include/hw/cxl/cxl_device.h b/include/hw/cxl/cxl_device.h
index 43908f161b..83da5d4e8f 100644
--- a/include/hw/cxl/cxl_device.h
+++ b/include/hw/cxl/cxl_device.h
@@ -264,4 +264,9 @@ struct CXLType3Class {
uint64_t offset);
};
+MemTxResult cxl_type3_read(PCIDevice *d, hwaddr host_addr, uint64_t *data,
+ unsigned size, MemTxAttrs attrs);
+MemTxResult cxl_type3_write(PCIDevice *d, hwaddr host_addr, uint64_t data,
+ unsigned size, MemTxAttrs attrs);
+
#endif
--
2.32.0