[RFC PATCH 4/7] hw/cxl: Map lazy memory backend after host acceptance

Alireza Sanaee via posted 7 patches 2 months, 1 week ago
[RFC PATCH 4/7] hw/cxl: Map lazy memory backend after host acceptance
Posted by Alireza Sanaee via 2 months, 1 week ago
Map relevant memory backend when host accepted an extent.

Signed-off-by: Alireza Sanaee <alireza.sanaee@huawei.com>
---
 hw/cxl/cxl-mailbox-utils.c | 74 ++++++++++++++++++++++++++++++++++++--
 1 file changed, 71 insertions(+), 3 deletions(-)

diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c
index ae723c03ec..b785553225 100644
--- a/hw/cxl/cxl-mailbox-utils.c
+++ b/hw/cxl/cxl-mailbox-utils.c
@@ -2979,6 +2979,30 @@ static CXLRetCode cxl_detect_malformed_extent_list(CXLType3Dev *ct3d,
     return CXL_MBOX_SUCCESS;
 }
 
+static bool cxl_extent_find_extent_detail(CXLDCExtentGroupList *list,
+                                          uint64_t start_dpa,
+                                          uint64_t len,
+                                          uint8_t *tag,
+                                          HostMemoryBackend **hmb,
+                                          struct CXLFixedWindow **fw,
+                                          int *rid)
+{
+    CXLDCExtent *ent, *ent_next;
+    CXLDCExtentGroup *group = QTAILQ_FIRST(list);
+
+    QTAILQ_FOREACH_SAFE(ent, &group->list, node, ent_next) {
+        if (ent->start_dpa == start_dpa && ent->len == len) {
+            *fw = ent->fw;
+            *hmb = ent->hm;
+            memcpy(tag, ent->tag, 0x10);
+            *rid = ent->rid;
+            return true;
+        }
+    }
+
+    return false;
+}
+
 static CXLRetCode cxl_dcd_add_dyn_cap_rsp_dry_run(CXLType3Dev *ct3d,
         const CXLUpdateDCExtentListInPl *in)
 {
@@ -3029,8 +3053,12 @@ static CXLRetCode cmd_dcd_add_dyn_cap_rsp(const struct cxl_cmd *cmd,
     CXLUpdateDCExtentListInPl *in = (void *)payload_in;
     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
     CXLDCExtentList *extent_list = &ct3d->dc.extents;
+    struct CXLFixedWindow *fw;
+    HostMemoryBackend *hmb_dc;
+    uint8_t tag[0x10];
     uint32_t i, num;
     uint64_t dpa, len;
+    int rid;
     CXLRetCode ret;
 
     if (len_in < sizeof(*in)) {
@@ -3065,12 +3093,52 @@ static CXLRetCode cmd_dcd_add_dyn_cap_rsp(const struct cxl_cmd *cmd,
     }
 
     for (i = 0; i < in->num_entries_updated; i++) {
+        bool found;
+        MemoryRegion *mr;
+
         dpa = in->updated_entries[i].start_dpa;
         len = in->updated_entries[i].len;
 
-        cxl_insert_extent_to_extent_list(extent_list, NULL,
-                                         NULL, dpa, len,
-                                         NULL, 0, 0);
+        if (ct3d->dc.total_capacity_cmd) {
+            found = cxl_extent_find_extent_detail(
+                &ct3d->dc.extents_pending, dpa, len, tag, &hmb_dc, &fw, &rid);
+
+            /*
+             * This only occurs when host accepts an extent where device does
+             * not know anything about it.
+             */
+            if (!found) {
+                qemu_log("Could not find the extent detail for DPA 0x%" PRIx64
+                         " LEN 0x%" PRIx64 "\n",
+                         dpa, len);
+                return CXL_MBOX_INVALID_PA;
+            }
+
+            /* The host memory backend should not be already mapped */
+            if (host_memory_backend_is_mapped(hmb_dc)) {
+                qemu_log("The host memory backend for DPA 0x%" PRIx64
+                         " LEN 0x%" PRIx64 " is already mapped\n",
+                         dpa, len);
+                return CXL_MBOX_INVALID_PA;
+            }
+
+            mr = host_memory_backend_get_memory(hmb_dc);
+            if (!mr) {
+                qemu_log("Could not get memory region from host memory "
+                         "backend\n");
+                return CXL_MBOX_INVALID_PA;
+            }
+
+            memory_region_set_nonvolatile(mr, false);
+            memory_region_set_enabled(mr, true);
+            host_memory_backend_set_mapped(hmb_dc, true);
+            cxl_insert_extent_to_extent_list(extent_list, hmb_dc, fw, dpa, len,
+                                             NULL, 0, rid);
+        } else {
+            cxl_insert_extent_to_extent_list(extent_list, NULL, NULL, dpa, len,
+                                             NULL, 0, -1);
+        }
+
         ct3d->dc.total_extent_count += 1;
         ct3d->dc.nr_extents_accepted += 1;
         ct3_set_region_block_backed(ct3d, dpa, len);
-- 
2.43.0
Re: [RFC PATCH 4/7] hw/cxl: Map lazy memory backend after host acceptance
Posted by Jonathan Cameron via qemu development 14 hours ago
On Thu, 27 Nov 2025 22:55:22 +0000
Alireza Sanaee <alireza.sanaee@huawei.com> wrote:

> Map relevant memory backend when host accepted an extent.

Explain what works at this point.  Does the old non performant
read / write land in this memory after this patch?

We could decide not to support that, but key is the patch
should explain where we are at this point.

No comments inline.

> 
> Signed-off-by: Alireza Sanaee <alireza.sanaee@huawei.com>
> ---
>  hw/cxl/cxl-mailbox-utils.c | 74 ++++++++++++++++++++++++++++++++++++--
>  1 file changed, 71 insertions(+), 3 deletions(-)
> 
> diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c
> index ae723c03ec..b785553225 100644
> --- a/hw/cxl/cxl-mailbox-utils.c
> +++ b/hw/cxl/cxl-mailbox-utils.c
> @@ -2979,6 +2979,30 @@ static CXLRetCode cxl_detect_malformed_extent_list(CXLType3Dev *ct3d,
>      return CXL_MBOX_SUCCESS;
>  }
>  
> +static bool cxl_extent_find_extent_detail(CXLDCExtentGroupList *list,
> +                                          uint64_t start_dpa,
> +                                          uint64_t len,
> +                                          uint8_t *tag,
> +                                          HostMemoryBackend **hmb,
> +                                          struct CXLFixedWindow **fw,
> +                                          int *rid)
> +{
> +    CXLDCExtent *ent, *ent_next;
> +    CXLDCExtentGroup *group = QTAILQ_FIRST(list);
> +
> +    QTAILQ_FOREACH_SAFE(ent, &group->list, node, ent_next) {
> +        if (ent->start_dpa == start_dpa && ent->len == len) {
> +            *fw = ent->fw;
> +            *hmb = ent->hm;
> +            memcpy(tag, ent->tag, 0x10);
> +            *rid = ent->rid;
> +            return true;
> +        }
> +    }
> +
> +    return false;
> +}
> +
>  static CXLRetCode cxl_dcd_add_dyn_cap_rsp_dry_run(CXLType3Dev *ct3d,
>          const CXLUpdateDCExtentListInPl *in)
>  {
> @@ -3029,8 +3053,12 @@ static CXLRetCode cmd_dcd_add_dyn_cap_rsp(const struct cxl_cmd *cmd,
>      CXLUpdateDCExtentListInPl *in = (void *)payload_in;
>      CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
>      CXLDCExtentList *extent_list = &ct3d->dc.extents;
> +    struct CXLFixedWindow *fw;
> +    HostMemoryBackend *hmb_dc;
> +    uint8_t tag[0x10];
>      uint32_t i, num;
>      uint64_t dpa, len;
> +    int rid;
>      CXLRetCode ret;
>  
>      if (len_in < sizeof(*in)) {
> @@ -3065,12 +3093,52 @@ static CXLRetCode cmd_dcd_add_dyn_cap_rsp(const struct cxl_cmd *cmd,
>      }
>  
>      for (i = 0; i < in->num_entries_updated; i++) {
> +        bool found;
> +        MemoryRegion *mr;
> +
>          dpa = in->updated_entries[i].start_dpa;
>          len = in->updated_entries[i].len;
>  
> -        cxl_insert_extent_to_extent_list(extent_list, NULL,
> -                                         NULL, dpa, len,
> -                                         NULL, 0, 0);
> +        if (ct3d->dc.total_capacity_cmd) {
> +            found = cxl_extent_find_extent_detail(
> +                &ct3d->dc.extents_pending, dpa, len, tag, &hmb_dc, &fw, &rid);
> +
> +            /*
> +             * This only occurs when host accepts an extent where device does
> +             * not know anything about it.
> +             */
> +            if (!found) {
> +                qemu_log("Could not find the extent detail for DPA 0x%" PRIx64
> +                         " LEN 0x%" PRIx64 "\n",
> +                         dpa, len);
> +                return CXL_MBOX_INVALID_PA;
> +            }
> +
> +            /* The host memory backend should not be already mapped */
> +            if (host_memory_backend_is_mapped(hmb_dc)) {
> +                qemu_log("The host memory backend for DPA 0x%" PRIx64
> +                         " LEN 0x%" PRIx64 " is already mapped\n",
> +                         dpa, len);
> +                return CXL_MBOX_INVALID_PA;
> +            }
> +
> +            mr = host_memory_backend_get_memory(hmb_dc);
> +            if (!mr) {
> +                qemu_log("Could not get memory region from host memory "
> +                         "backend\n");
> +                return CXL_MBOX_INVALID_PA;
> +            }
> +
> +            memory_region_set_nonvolatile(mr, false);
> +            memory_region_set_enabled(mr, true);
> +            host_memory_backend_set_mapped(hmb_dc, true);
> +            cxl_insert_extent_to_extent_list(extent_list, hmb_dc, fw, dpa, len,
> +                                             NULL, 0, rid);
> +        } else {
> +            cxl_insert_extent_to_extent_list(extent_list, NULL, NULL, dpa, len,
> +                                             NULL, 0, -1);
> +        }
> +
>          ct3d->dc.total_extent_count += 1;
>          ct3d->dc.nr_extents_accepted += 1;
>          ct3_set_region_block_backed(ct3d, dpa, len);