devm_cxl_pmem_add_region() is used to create cxl region based on region
information scanned from LSA.
devm_cxl_add_region() is used to just allocate cxlr and its fields are
filled later by userspace tool using device attributes (*_store()).
Inspiration for devm_cxl_pmem_add_region() is taken from these device
attributes (_store*) calls. It allocates cxlr and fills information
parsed from LSA and calls device_add(&cxlr->dev) to initiate further
region creation porbes
Renamed __create_region() to cxl_create_region() and make it an exported
routine. This will be used in later patch to create cxl region after
fetching region information from LSA.
Signed-off-by: Neeraj Kumar <s.neeraj@samsung.com>
---
drivers/cxl/core/region.c | 127 ++++++++++++++++++++++++++++++++++++--
drivers/cxl/cxl.h | 12 ++++
2 files changed, 134 insertions(+), 5 deletions(-)
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index c325aa827992..d5c227ce7b09 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -2573,6 +2573,116 @@ static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd,
return ERR_PTR(rc);
}
+static ssize_t alloc_region_hpa(struct cxl_region *cxlr, u64 size)
+{
+ int rc;
+
+ ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region);
+ rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem);
+ if (rc)
+ return rc;
+
+ if (!size)
+ return -EINVAL;
+
+ return alloc_hpa(cxlr, size);
+}
+
+static ssize_t alloc_region_dpa(struct cxl_endpoint_decoder *cxled, u64 size)
+{
+ int rc;
+
+ if (!size)
+ return -EINVAL;
+
+ if (!IS_ALIGNED(size, SZ_256M))
+ return -EINVAL;
+
+ rc = cxl_dpa_free(cxled);
+ if (rc)
+ return rc;
+
+ return cxl_dpa_alloc(cxled, size);
+}
+
+static struct cxl_region *
+devm_cxl_pmem_add_region(struct cxl_root_decoder *cxlrd, int id,
+ enum cxl_partition_mode mode,
+ enum cxl_decoder_type type,
+ struct cxl_pmem_region_params *params,
+ struct cxl_decoder *cxld)
+{
+ struct cxl_endpoint_decoder *cxled;
+ struct cxl_region_params *p;
+ struct cxl_port *root_port;
+ struct device *dev;
+ int rc;
+
+ struct cxl_region *cxlr __free(put_cxl_region) =
+ cxl_region_alloc(cxlrd, id);
+ if (IS_ERR(cxlr))
+ return cxlr;
+
+ cxlr->mode = mode;
+ cxlr->type = type;
+
+ dev = &cxlr->dev;
+ rc = dev_set_name(dev, "region%d", id);
+ if (rc)
+ return ERR_PTR(rc);
+
+ p = &cxlr->params;
+ p->uuid = params->uuid;
+ p->interleave_ways = params->nlabel;
+ p->interleave_granularity = params->ig;
+
+ rc = alloc_region_hpa(cxlr, params->rawsize);
+ if (rc)
+ return ERR_PTR(rc);
+
+ cxled = to_cxl_endpoint_decoder(&cxld->dev);
+
+ rc = cxl_dpa_set_part(cxled, CXL_PARTMODE_PMEM);
+ if (rc)
+ return ERR_PTR(rc);
+
+ rc = alloc_region_dpa(cxled, params->rawsize);
+ if (rc)
+ return ERR_PTR(rc);
+
+ /*
+ * TODO: Currently we have support of interleave_way == 1, where
+ * we can only have one region per mem device. It means mem device
+ * position (params->position) will always be 0. It is therefore
+ * attaching only one target at params->position
+ */
+ if (params->position)
+ return ERR_PTR(-EINVAL);
+
+ rc = attach_target(cxlr, cxled, params->position, TASK_INTERRUPTIBLE);
+ if (rc)
+ return ERR_PTR(rc);
+
+ rc = __commit(cxlr);
+ if (rc)
+ return ERR_PTR(rc);
+
+ rc = device_add(dev);
+ if (rc)
+ return ERR_PTR(rc);
+
+ root_port = to_cxl_port(cxlrd->cxlsd.cxld.dev.parent);
+ rc = devm_add_action_or_reset(root_port->uport_dev,
+ unregister_region, cxlr);
+ if (rc)
+ return ERR_PTR(rc);
+
+ dev_dbg(root_port->uport_dev, "%s: created %s\n",
+ dev_name(&cxlrd->cxlsd.cxld.dev), dev_name(dev));
+
+ return no_free_ptr(cxlr);
+}
+
static ssize_t __create_region_show(struct cxl_root_decoder *cxlrd, char *buf)
{
return sysfs_emit(buf, "region%u\n", atomic_read(&cxlrd->region_id));
@@ -2590,8 +2700,10 @@ static ssize_t create_ram_region_show(struct device *dev,
return __create_region_show(to_cxl_root_decoder(dev), buf);
}
-static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd,
- enum cxl_partition_mode mode, int id)
+struct cxl_region *cxl_create_region(struct cxl_root_decoder *cxlrd,
+ enum cxl_partition_mode mode, int id,
+ struct cxl_pmem_region_params *pmem_params,
+ struct cxl_decoder *cxld)
{
int rc;
@@ -2613,8 +2725,12 @@ static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd,
return ERR_PTR(-EBUSY);
}
+ if (pmem_params)
+ return devm_cxl_pmem_add_region(cxlrd, id, mode,
+ CXL_DECODER_HOSTONLYMEM, pmem_params, cxld);
return devm_cxl_add_region(cxlrd, id, mode, CXL_DECODER_HOSTONLYMEM);
}
+EXPORT_SYMBOL_NS_GPL(cxl_create_region, "CXL");
static ssize_t create_region_store(struct device *dev, const char *buf,
size_t len, enum cxl_partition_mode mode)
@@ -2627,7 +2743,7 @@ static ssize_t create_region_store(struct device *dev, const char *buf,
if (rc != 1)
return -EINVAL;
- cxlr = __create_region(cxlrd, mode, id);
+ cxlr = cxl_create_region(cxlrd, mode, id, NULL, NULL);
if (IS_ERR(cxlr))
return PTR_ERR(cxlr);
@@ -3523,8 +3639,9 @@ static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd,
struct cxl_region *cxlr;
do {
- cxlr = __create_region(cxlrd, cxlds->part[part].mode,
- atomic_read(&cxlrd->region_id));
+ cxlr = cxl_create_region(cxlrd, cxlds->part[part].mode,
+ atomic_read(&cxlrd->region_id),
+ NULL, NULL);
} while (IS_ERR(cxlr) && PTR_ERR(cxlr) == -EBUSY);
if (IS_ERR(cxlr)) {
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index b57597e55f7e..3abadc3dc82e 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -874,6 +874,10 @@ int cxl_add_to_region(struct cxl_endpoint_decoder *cxled);
struct cxl_dax_region *to_cxl_dax_region(struct device *dev);
u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint, u64 spa);
void cxl_region_discovery(struct cxl_port *port);
+struct cxl_region *cxl_create_region(struct cxl_root_decoder *cxlrd,
+ enum cxl_partition_mode mode, int id,
+ struct cxl_pmem_region_params *params,
+ struct cxl_decoder *cxld);
#else
static inline bool is_cxl_pmem_region(struct device *dev)
{
@@ -899,6 +903,14 @@ static inline u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint,
static inline void cxl_region_discovery(struct cxl_port *port)
{
}
+static inline struct cxl_region *
+cxl_create_region(struct cxl_root_decoder *cxlrd,
+ enum cxl_partition_mode mode, int id,
+ struct cxl_pmem_region_params *params,
+ struct cxl_decoder *cxld)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
#endif
void cxl_endpoint_parse_cdat(struct cxl_port *port);
--
2.34.1
On 9/17/25 6:41 AM, Neeraj Kumar wrote: > devm_cxl_pmem_add_region() is used to create cxl region based on region > information scanned from LSA. > > devm_cxl_add_region() is used to just allocate cxlr and its fields are > filled later by userspace tool using device attributes (*_store()). > > Inspiration for devm_cxl_pmem_add_region() is taken from these device > attributes (_store*) calls. It allocates cxlr and fills information > parsed from LSA and calls device_add(&cxlr->dev) to initiate further > region creation porbes > > Renamed __create_region() to cxl_create_region() and make it an exported > routine. This will be used in later patch to create cxl region after > fetching region information from LSA. > > Signed-off-by: Neeraj Kumar <s.neeraj@samsung.com> > --- > drivers/cxl/core/region.c | 127 ++++++++++++++++++++++++++++++++++++-- > drivers/cxl/cxl.h | 12 ++++ > 2 files changed, 134 insertions(+), 5 deletions(-) > > diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c > index c325aa827992..d5c227ce7b09 100644 > --- a/drivers/cxl/core/region.c > +++ b/drivers/cxl/core/region.c > @@ -2573,6 +2573,116 @@ static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd, > return ERR_PTR(rc); > } > > +static ssize_t alloc_region_hpa(struct cxl_region *cxlr, u64 size) > +{ > + int rc; > + > + ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region); > + rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem); > + if (rc) > + return rc; Just a nit. Please conform to existing style in the subsystem for this new usage. + ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region); + if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem)))) + return rc; > + > + if (!size) > + return -EINVAL; > + > + return alloc_hpa(cxlr, size); > +} I think you can create another helper free_region_hpa() and call them in size_store() function to remove the duplicate code. > + > +static ssize_t alloc_region_dpa(struct cxl_endpoint_decoder *cxled, u64 size) > +{ > + int rc; > + > + if (!size) > + return -EINVAL; > + > + if (!IS_ALIGNED(size, SZ_256M)) > + return -EINVAL; > + > + rc = cxl_dpa_free(cxled); > + if (rc) > + return rc; > + > + return cxl_dpa_alloc(cxled, size); > +} > + > +static struct cxl_region * > +devm_cxl_pmem_add_region(struct cxl_root_decoder *cxlrd, int id, > + enum cxl_partition_mode mode, Wouldn't this not needed since it would be CXL_PARTMODE_PMEM always? I also wonder if we need to rename devm_cxl_add_region() to devm_cxl_add_ram_region() to be explicit. > + enum cxl_decoder_type type, > + struct cxl_pmem_region_params *params, > + struct cxl_decoder *cxld) > +{ > + struct cxl_endpoint_decoder *cxled; > + struct cxl_region_params *p; > + struct cxl_port *root_port; > + struct device *dev; > + int rc; > + > + struct cxl_region *cxlr __free(put_cxl_region) = > + cxl_region_alloc(cxlrd, id); > + if (IS_ERR(cxlr)) > + return cxlr; > + > + cxlr->mode = mode; > + cxlr->type = type; > + > + dev = &cxlr->dev; > + rc = dev_set_name(dev, "region%d", id); > + if (rc) > + return ERR_PTR(rc); > + > + p = &cxlr->params; > + p->uuid = params->uuid; > + p->interleave_ways = params->nlabel; > + p->interleave_granularity = params->ig; > + > + rc = alloc_region_hpa(cxlr, params->rawsize); > + if (rc) > + return ERR_PTR(rc); > + > + cxled = to_cxl_endpoint_decoder(&cxld->dev); > + > + rc = cxl_dpa_set_part(cxled, CXL_PARTMODE_PMEM); > + if (rc) > + return ERR_PTR(rc); > + > + rc = alloc_region_dpa(cxled, params->rawsize); > + if (rc) > + return ERR_PTR(rc); > + > + /* > + * TODO: Currently we have support of interleave_way == 1, where > + * we can only have one region per mem device. It means mem device > + * position (params->position) will always be 0. It is therefore > + * attaching only one target at params->position > + */ > + if (params->position) > + return ERR_PTR(-EINVAL); EOPNOTSUPP? Speaking of which, are there plans to support interleave in the near future? DJ > + > + rc = attach_target(cxlr, cxled, params->position, TASK_INTERRUPTIBLE); > + if (rc) > + return ERR_PTR(rc); > + > + rc = __commit(cxlr); > + if (rc) > + return ERR_PTR(rc); > + > + rc = device_add(dev); > + if (rc) > + return ERR_PTR(rc); > + > + root_port = to_cxl_port(cxlrd->cxlsd.cxld.dev.parent); > + rc = devm_add_action_or_reset(root_port->uport_dev, > + unregister_region, cxlr); > + if (rc) > + return ERR_PTR(rc); > + > + dev_dbg(root_port->uport_dev, "%s: created %s\n", > + dev_name(&cxlrd->cxlsd.cxld.dev), dev_name(dev)); > + > + return no_free_ptr(cxlr); > +} > + > static ssize_t __create_region_show(struct cxl_root_decoder *cxlrd, char *buf) > { > return sysfs_emit(buf, "region%u\n", atomic_read(&cxlrd->region_id)); > @@ -2590,8 +2700,10 @@ static ssize_t create_ram_region_show(struct device *dev, > return __create_region_show(to_cxl_root_decoder(dev), buf); > } > > -static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd, > - enum cxl_partition_mode mode, int id) > +struct cxl_region *cxl_create_region(struct cxl_root_decoder *cxlrd, > + enum cxl_partition_mode mode, int id, > + struct cxl_pmem_region_params *pmem_params, > + struct cxl_decoder *cxld) > { > int rc; > > @@ -2613,8 +2725,12 @@ static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd, > return ERR_PTR(-EBUSY); > } > > + if (pmem_params) > + return devm_cxl_pmem_add_region(cxlrd, id, mode, > + CXL_DECODER_HOSTONLYMEM, pmem_params, cxld); > return devm_cxl_add_region(cxlrd, id, mode, CXL_DECODER_HOSTONLYMEM); > } > +EXPORT_SYMBOL_NS_GPL(cxl_create_region, "CXL"); > > static ssize_t create_region_store(struct device *dev, const char *buf, > size_t len, enum cxl_partition_mode mode) > @@ -2627,7 +2743,7 @@ static ssize_t create_region_store(struct device *dev, const char *buf, > if (rc != 1) > return -EINVAL; > > - cxlr = __create_region(cxlrd, mode, id); > + cxlr = cxl_create_region(cxlrd, mode, id, NULL, NULL); > if (IS_ERR(cxlr)) > return PTR_ERR(cxlr); > > @@ -3523,8 +3639,9 @@ static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd, > struct cxl_region *cxlr; > > do { > - cxlr = __create_region(cxlrd, cxlds->part[part].mode, > - atomic_read(&cxlrd->region_id)); > + cxlr = cxl_create_region(cxlrd, cxlds->part[part].mode, > + atomic_read(&cxlrd->region_id), > + NULL, NULL); > } while (IS_ERR(cxlr) && PTR_ERR(cxlr) == -EBUSY); > > if (IS_ERR(cxlr)) { > diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h > index b57597e55f7e..3abadc3dc82e 100644 > --- a/drivers/cxl/cxl.h > +++ b/drivers/cxl/cxl.h > @@ -874,6 +874,10 @@ int cxl_add_to_region(struct cxl_endpoint_decoder *cxled); > struct cxl_dax_region *to_cxl_dax_region(struct device *dev); > u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint, u64 spa); > void cxl_region_discovery(struct cxl_port *port); > +struct cxl_region *cxl_create_region(struct cxl_root_decoder *cxlrd, > + enum cxl_partition_mode mode, int id, > + struct cxl_pmem_region_params *params, > + struct cxl_decoder *cxld); > #else > static inline bool is_cxl_pmem_region(struct device *dev) > { > @@ -899,6 +903,14 @@ static inline u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint, > static inline void cxl_region_discovery(struct cxl_port *port) > { > } > +static inline struct cxl_region * > +cxl_create_region(struct cxl_root_decoder *cxlrd, > + enum cxl_partition_mode mode, int id, > + struct cxl_pmem_region_params *params, > + struct cxl_decoder *cxld) > +{ > + return ERR_PTR(-EOPNOTSUPP); > +} > #endif > > void cxl_endpoint_parse_cdat(struct cxl_port *port);
© 2016 - 2025 Red Hat, Inc.