drivers/nvme/target/admin-cmd.c | 7 +++---- drivers/nvme/target/configfs.c | 27 +++++++++++++++++++++++++++ drivers/nvme/target/core.c | 8 ++++++++ drivers/nvme/target/nvmet.h | 2 ++ 4 files changed, 40 insertions(+), 4 deletions(-)
Using this port configuration, one will be able to set the Maximum Data
Transfer Size (MDTS) for any controller that will be associated to the
configured port.
The default value stayed 0 (no limit) but each transport will be able to
set its own values before enabling the port.
Signed-off-by: Max Gurtovoy <mgurtovoy@nvidia.com>
Signed-off-by: Aurelien Aptel <aaptel@nvidia.com>
---
drivers/nvme/target/admin-cmd.c | 7 +++----
drivers/nvme/target/configfs.c | 27 +++++++++++++++++++++++++++
drivers/nvme/target/core.c | 8 ++++++++
drivers/nvme/target/nvmet.h | 2 ++
4 files changed, 40 insertions(+), 4 deletions(-)
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index ca5b08ce1211..057ac62f6f63 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -687,11 +687,10 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->cmic = NVME_CTRL_CMIC_MULTI_PORT | NVME_CTRL_CMIC_MULTI_CTRL |
NVME_CTRL_CMIC_ANA;
- /* Limit MDTS according to transport capability */
+ /* Limit MDTS according to port config or transport capability */
+ id->mdts = req->port->mdts;
if (ctrl->ops->get_mdts)
- id->mdts = ctrl->ops->get_mdts(ctrl);
- else
- id->mdts = 0;
+ id->mdts = min_not_zero(ctrl->ops->get_mdts(ctrl), id->mdts);
id->cntlid = cpu_to_le16(ctrl->cntlid);
id->ver = cpu_to_le32(ctrl->subsys->ver);
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 3088e044dbcb..63d72fbf4d9d 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -302,6 +302,31 @@ static ssize_t nvmet_param_max_queue_size_store(struct config_item *item,
CONFIGFS_ATTR(nvmet_, param_max_queue_size);
+static ssize_t nvmet_param_mdts_show(struct config_item *item, char *page)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ return snprintf(page, PAGE_SIZE, "%d\n", port->mdts);
+}
+
+static ssize_t nvmet_param_mdts_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+ int ret;
+
+ if (nvmet_is_port_enabled(port, __func__))
+ return -EACCES;
+ ret = kstrtoint(page, 0, &port->mdts);
+ if (ret) {
+ pr_err("Invalid value '%s' for mdts\n", page);
+ return -EINVAL;
+ }
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_, param_mdts);
+
#ifdef CONFIG_BLK_DEV_INTEGRITY
static ssize_t nvmet_param_pi_enable_show(struct config_item *item,
char *page)
@@ -1996,6 +2021,7 @@ static struct configfs_attribute *nvmet_port_attrs[] = {
&nvmet_attr_addr_tsas,
&nvmet_attr_param_inline_data_size,
&nvmet_attr_param_max_queue_size,
+ &nvmet_attr_param_mdts,
#ifdef CONFIG_BLK_DEV_INTEGRITY
&nvmet_attr_param_pi_enable,
#endif
@@ -2054,6 +2080,7 @@ static struct config_group *nvmet_ports_make(struct config_group *group,
INIT_LIST_HEAD(&port->referrals);
port->inline_data_size = -1; /* < 0 == let the transport choose */
port->max_queue_size = -1; /* < 0 == let the transport choose */
+ port->mdts = -1; /* < 0 == let the transport choose */
port->disc_addr.trtype = NVMF_TRTYPE_MAX;
port->disc_addr.portid = cpu_to_le16(portid);
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 9238e13bd480..779d8a130619 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -370,6 +370,14 @@ int nvmet_enable_port(struct nvmet_port *port)
NVMET_MIN_QUEUE_SIZE,
NVMET_MAX_QUEUE_SIZE);
+ /*
+ * If the transport didn't set the mdts properly, then clamp it to the
+ * target limits. Also set default values in case the transport didn't
+ * set it at all.
+ */
+ if (port->mdts < 0 || port->mdts > NVMET_MAX_MDTS)
+ port->mdts = 0;
+
port->enabled = true;
port->tr_ops = ops;
return 0;
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 319d6a5e9cf0..90ca10cd9438 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -214,6 +214,7 @@ struct nvmet_port {
bool enabled;
int inline_data_size;
int max_queue_size;
+ int mdts;
const struct nvmet_fabrics_ops *tr_ops;
bool pi_enable;
};
@@ -671,6 +672,7 @@ void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
#define NVMET_MAX_QUEUE_SIZE 1024
#define NVMET_NR_QUEUES 128
#define NVMET_MAX_CMD(ctrl) (NVME_CAP_MQES(ctrl->cap) + 1)
+#define NVMET_MAX_MDTS 255
/*
* Nice round number that makes a list of nsids fit into a page.
--
2.51.0
On Wed, Apr 01, 2026 at 10:13:55AM +0000, Aurelien Aptel wrote: > Using this port configuration, one will be able to set the Maximum Data > Transfer Size (MDTS) for any controller that will be associated to the > configured port. > > The default value stayed 0 (no limit) but each transport will be able to > set its own values before enabling the port. The ZASL calculation in zns.c also needs to take this limit into account. So maybe add a helper to calculate it?
Christoph Hellwig <hch@lst.de> writes: > The ZASL calculation in zns.c also needs to take this limit into account. > So maybe add a helper to calculate it? Ok I will make an inline helper in nvmet.h and use it in both places. Thanks
© 2016 - 2026 Red Hat, Inc.