After CXL completes trimming SOFT RESERVED ranges that intersect with CXL
regions, it invokes hmem_fallback_register_device() to register any
leftover ranges. If this occurs before the DAX HMEM driver has
initialized, the call becomes a no-op and those resources are lost.
To prevent this, store fallback-registered resources in a separate
deferred tree (hmem_deferred_active). When the DAX HMEM driver is
initialized, it walks this deferred list to properly register DAX
devices.
Signed-off-by: Smita Koralahalli <Smita.KoralahalliChannabasappa@amd.com>
---
drivers/dax/hmem/device.c | 17 +++++++++++++----
drivers/dax/hmem/hmem.c | 1 -
drivers/dax/hmem/hmem_notify.c | 2 ++
3 files changed, 15 insertions(+), 5 deletions(-)
diff --git a/drivers/dax/hmem/device.c b/drivers/dax/hmem/device.c
index cc1ed7bbdb1a..41c5886a30d1 100644
--- a/drivers/dax/hmem/device.c
+++ b/drivers/dax/hmem/device.c
@@ -16,13 +16,21 @@ static struct resource hmem_active = {
.flags = IORESOURCE_MEM,
};
+static struct resource hmem_deferred_active = {
+ .name = "Deferred HMEM devices",
+ .start = 0,
+ .end = -1,
+ .flags = IORESOURCE_MEM,
+};
+static struct resource *hmem_resource_root = &hmem_active;
+
int walk_hmem_resources(walk_hmem_fn fn)
{
struct resource *res;
int rc = 0;
mutex_lock(&hmem_resource_lock);
- for (res = hmem_active.child; res; res = res->sibling) {
+ for (res = hmem_resource_root->child; res; res = res->sibling) {
rc = fn((int) res->desc, res);
if (rc)
break;
@@ -36,8 +44,8 @@ static void __hmem_register_resource(int target_nid, struct resource *res)
{
struct resource *new;
- new = __request_region(&hmem_active, res->start, resource_size(res), "",
- 0);
+ new = __request_region(hmem_resource_root, res->start,
+ resource_size(res), "", 0);
if (!new) {
pr_debug("hmem range %pr already active\n", res);
return;
@@ -72,7 +80,8 @@ static __init int hmem_init(void)
walk_iomem_res_desc(IORES_DESC_SOFT_RESERVED,
IORESOURCE_MEM, 0, -1, NULL,
hmem_register_one);
- }
+ } else
+ hmem_resource_root = &hmem_deferred_active;
pdev = platform_device_alloc("hmem_platform", 0);
if (!pdev) {
diff --git a/drivers/dax/hmem/hmem.c b/drivers/dax/hmem/hmem.c
index 16873ae0a53b..76a381c274a8 100644
--- a/drivers/dax/hmem/hmem.c
+++ b/drivers/dax/hmem/hmem.c
@@ -131,7 +131,6 @@ static int dax_hmem_platform_probe(struct platform_device *pdev)
if (IS_ENABLED(CONFIG_CXL_ACPI)) {
hmem_register_fallback_handler(hmem_register_device);
- return 0;
}
return walk_hmem_resources(hmem_register_device);
diff --git a/drivers/dax/hmem/hmem_notify.c b/drivers/dax/hmem/hmem_notify.c
index 1b366ffbda66..6c276c5bd51d 100644
--- a/drivers/dax/hmem/hmem_notify.c
+++ b/drivers/dax/hmem/hmem_notify.c
@@ -23,5 +23,7 @@ void hmem_fallback_register_device(int target_nid, const struct resource *res)
if (hmem_fn)
hmem_fn(target_nid, res);
+ else
+ hmem_register_resource(target_nid, (struct resource *)res);
}
EXPORT_SYMBOL_GPL(hmem_fallback_register_device);
--
2.17.1