From: Cosmin Ratiu <cratiu@nvidia.com>
E-Switch QoS domains were added with the intention of eventually
implementing shared qos domains to support cross-esw scheduling in the
previous approach ([1]), but they are no longer necessary in the new
approach.
Remove QoS domains and switch to using the shd lock for protecting
against concurrent QoS modifications.
Enable the supported_cross_device_rate_nodes devink ops attribute so
that all calls originating from devlink rate acquire the shd lock. Only
the additional entry points into QoS need to acquire the shd lock.
Enabling supported_cross_device_rate_nodes now is safe, because
mlx5_esw_qos_vport_update_parent rejects cross-esw parent updates.
This will change in the next patch.
[1] https://lore.kernel.org/netdev/20250213180134.323929-1-tariqt@nvidia.com/
Signed-off-by: Cosmin Ratiu <cratiu@nvidia.com>
Reviewed-by: Carolina Jubran <cjubran@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
---
.../net/ethernet/mellanox/mlx5/core/devlink.c | 1 +
.../net/ethernet/mellanox/mlx5/core/esw/qos.c | 186 ++++--------------
.../net/ethernet/mellanox/mlx5/core/esw/qos.h | 3 -
.../net/ethernet/mellanox/mlx5/core/eswitch.c | 8 -
.../net/ethernet/mellanox/mlx5/core/eswitch.h | 9 +-
5 files changed, 45 insertions(+), 162 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 6698ac55a4bf..c051605fecd2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -385,6 +385,7 @@ static const struct devlink_ops mlx5_devlink_ops = {
.rate_node_del = mlx5_esw_devlink_rate_node_del,
.rate_leaf_parent_set = mlx5_esw_devlink_rate_leaf_parent_set,
.rate_node_parent_set = mlx5_esw_devlink_rate_node_parent_set,
+ .supported_cross_device_rate_nodes = true,
#endif
#ifdef CONFIG_MLX5_SF_MANAGER
.port_new = mlx5_devlink_sf_port_new,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 0be516003bcd..f67f99428959 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -11,51 +11,9 @@
/* Minimum supported BW share value by the HW is 1 Mbit/sec */
#define MLX5_MIN_BW_SHARE 1
-/* Holds rate nodes associated with an E-Switch. */
-struct mlx5_qos_domain {
- /* Serializes access to all qos changes in the qos domain. */
- struct mutex lock;
-};
-
-static void esw_qos_lock(struct mlx5_eswitch *esw)
-{
- mutex_lock(&esw->qos.domain->lock);
-}
-
-static void esw_qos_unlock(struct mlx5_eswitch *esw)
-{
- mutex_unlock(&esw->qos.domain->lock);
-}
-
static void esw_assert_qos_lock_held(struct mlx5_eswitch *esw)
{
- lockdep_assert_held(&esw->qos.domain->lock);
-}
-
-static struct mlx5_qos_domain *esw_qos_domain_alloc(void)
-{
- struct mlx5_qos_domain *qos_domain;
-
- qos_domain = kzalloc_obj(*qos_domain);
- if (!qos_domain)
- return NULL;
-
- mutex_init(&qos_domain->lock);
-
- return qos_domain;
-}
-
-static int esw_qos_domain_init(struct mlx5_eswitch *esw)
-{
- esw->qos.domain = esw_qos_domain_alloc();
-
- return esw->qos.domain ? 0 : -ENOMEM;
-}
-
-static void esw_qos_domain_release(struct mlx5_eswitch *esw)
-{
- kfree(esw->qos.domain);
- esw->qos.domain = NULL;
+ devl_assert_locked(esw->dev->shd);
}
enum sched_node_type {
@@ -1110,7 +1068,7 @@ void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport)
struct mlx5_esw_sched_node *parent;
lockdep_assert_held(&esw->state_lock);
- esw_qos_lock(esw);
+ devl_lock(esw->dev->shd);
if (!vport->qos.sched_node)
goto unlock;
@@ -1120,7 +1078,7 @@ void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport)
mlx5_esw_qos_vport_disable_locked(vport);
unlock:
- esw_qos_unlock(esw);
+ devl_unlock(esw->dev->shd);
}
static int mlx5_esw_qos_set_vport_max_rate(struct mlx5_vport *vport, u32 max_rate,
@@ -1159,26 +1117,25 @@ int mlx5_esw_qos_set_vport_rate(struct mlx5_vport *vport, u32 max_rate, u32 min_
struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
int err;
- esw_qos_lock(esw);
+ devl_lock(esw->dev->shd);
err = mlx5_esw_qos_set_vport_min_rate(vport, min_rate, NULL);
if (!err)
err = mlx5_esw_qos_set_vport_max_rate(vport, max_rate, NULL);
- esw_qos_unlock(esw);
+ devl_unlock(esw->dev->shd);
return err;
}
bool mlx5_esw_qos_get_vport_rate(struct mlx5_vport *vport, u32 *max_rate, u32 *min_rate)
{
- struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
bool enabled;
- esw_qos_lock(esw);
+ devl_lock(vport->dev->shd);
enabled = !!vport->qos.sched_node;
if (enabled) {
*max_rate = vport->qos.sched_node->max_rate;
*min_rate = vport->qos.sched_node->min_rate;
}
- esw_qos_unlock(esw);
+ devl_unlock(vport->dev->shd);
return enabled;
}
@@ -1513,9 +1470,9 @@ int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32
return err;
}
- esw_qos_lock(esw);
+ devl_lock(esw->dev->shd);
err = mlx5_esw_qos_set_vport_max_rate(vport, rate_mbps, NULL);
- esw_qos_unlock(esw);
+ devl_unlock(esw->dev->shd);
return err;
}
@@ -1604,44 +1561,24 @@ static void esw_vport_qos_prune_empty(struct mlx5_vport *vport)
mlx5_esw_qos_vport_disable_locked(vport);
}
-int mlx5_esw_qos_init(struct mlx5_eswitch *esw)
-{
- if (esw->qos.domain)
- return 0; /* Nothing to change. */
-
- return esw_qos_domain_init(esw);
-}
-
-void mlx5_esw_qos_cleanup(struct mlx5_eswitch *esw)
-{
- if (esw->qos.domain)
- esw_qos_domain_release(esw);
-}
-
/* Eswitch devlink rate API */
int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void *priv,
u64 tx_share, struct netlink_ext_ack *extack)
{
struct mlx5_vport *vport = priv;
- struct mlx5_eswitch *esw;
int err;
- esw = vport->dev->priv.eswitch;
- if (!mlx5_esw_allowed(esw))
+ if (!mlx5_esw_allowed(vport->dev->priv.eswitch))
return -EPERM;
err = esw_qos_devlink_rate_to_mbps(vport->dev, "tx_share", &tx_share, extack);
if (err)
return err;
- esw_qos_lock(esw);
err = mlx5_esw_qos_set_vport_min_rate(vport, tx_share, extack);
- if (err)
- goto out;
- esw_vport_qos_prune_empty(vport);
-out:
- esw_qos_unlock(esw);
+ if (!err)
+ esw_vport_qos_prune_empty(vport);
return err;
}
@@ -1649,24 +1586,18 @@ int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void *
u64 tx_max, struct netlink_ext_ack *extack)
{
struct mlx5_vport *vport = priv;
- struct mlx5_eswitch *esw;
int err;
- esw = vport->dev->priv.eswitch;
- if (!mlx5_esw_allowed(esw))
+ if (!mlx5_esw_allowed(vport->dev->priv.eswitch))
return -EPERM;
err = esw_qos_devlink_rate_to_mbps(vport->dev, "tx_max", &tx_max, extack);
if (err)
return err;
- esw_qos_lock(esw);
err = mlx5_esw_qos_set_vport_max_rate(vport, tx_max, extack);
- if (err)
- goto out;
- esw_vport_qos_prune_empty(vport);
-out:
- esw_qos_unlock(esw);
+ if (!err)
+ esw_vport_qos_prune_empty(vport);
return err;
}
@@ -1677,34 +1608,30 @@ int mlx5_esw_devlink_rate_leaf_tc_bw_set(struct devlink_rate *rate_leaf,
{
struct mlx5_esw_sched_node *vport_node;
struct mlx5_vport *vport = priv;
- struct mlx5_eswitch *esw;
bool disable;
int err = 0;
- esw = vport->dev->priv.eswitch;
- if (!mlx5_esw_allowed(esw))
+ if (!mlx5_esw_allowed(vport->dev->priv.eswitch))
return -EPERM;
disable = esw_qos_tc_bw_disabled(tc_bw);
- esw_qos_lock(esw);
if (!esw_qos_vport_validate_unsupported_tc_bw(vport, tc_bw)) {
NL_SET_ERR_MSG_MOD(extack,
"E-Switch traffic classes number is not supported");
- err = -EOPNOTSUPP;
- goto unlock;
+ return -EOPNOTSUPP;
}
vport_node = vport->qos.sched_node;
if (disable && !vport_node)
- goto unlock;
+ return 0;
if (disable) {
if (vport_node->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
err = esw_qos_vport_update(vport, SCHED_NODE_TYPE_VPORT,
vport_node->parent, extack);
esw_vport_qos_prune_empty(vport);
- goto unlock;
+ return err;
}
if (!vport_node) {
@@ -1719,8 +1646,6 @@ int mlx5_esw_devlink_rate_leaf_tc_bw_set(struct devlink_rate *rate_leaf,
}
if (!err)
esw_qos_set_tc_arbiter_bw_shares(vport_node, tc_bw, extack);
-unlock:
- esw_qos_unlock(esw);
return err;
}
@@ -1730,28 +1655,22 @@ int mlx5_esw_devlink_rate_node_tc_bw_set(struct devlink_rate *rate_node,
struct netlink_ext_ack *extack)
{
struct mlx5_esw_sched_node *node = priv;
- struct mlx5_eswitch *esw = node->esw;
bool disable;
int err;
- if (!esw_qos_validate_unsupported_tc_bw(esw, tc_bw)) {
+ if (!esw_qos_validate_unsupported_tc_bw(node->esw, tc_bw)) {
NL_SET_ERR_MSG_MOD(extack,
"E-Switch traffic classes number is not supported");
return -EOPNOTSUPP;
}
disable = esw_qos_tc_bw_disabled(tc_bw);
- esw_qos_lock(esw);
- if (disable) {
- err = esw_qos_node_disable_tc_arbitration(node, extack);
- goto unlock;
- }
+ if (disable)
+ return esw_qos_node_disable_tc_arbitration(node, extack);
err = esw_qos_node_enable_tc_arbitration(node, extack);
if (!err)
esw_qos_set_tc_arbiter_bw_shares(node, tc_bw, extack);
-unlock:
- esw_qos_unlock(esw);
return err;
}
@@ -1759,17 +1678,14 @@ int mlx5_esw_devlink_rate_node_tx_share_set(struct devlink_rate *rate_node, void
u64 tx_share, struct netlink_ext_ack *extack)
{
struct mlx5_esw_sched_node *node = priv;
- struct mlx5_eswitch *esw = node->esw;
int err;
- err = esw_qos_devlink_rate_to_mbps(esw->dev, "tx_share", &tx_share, extack);
+ err = esw_qos_devlink_rate_to_mbps(node->esw->dev, "tx_share",
+ &tx_share, extack);
if (err)
return err;
- esw_qos_lock(esw);
- err = esw_qos_set_node_min_rate(node, tx_share, extack);
- esw_qos_unlock(esw);
- return err;
+ return esw_qos_set_node_min_rate(node, tx_share, extack);
}
int mlx5_esw_devlink_rate_node_tx_max_set(struct devlink_rate *rate_node, void *priv,
@@ -1783,10 +1699,7 @@ int mlx5_esw_devlink_rate_node_tx_max_set(struct devlink_rate *rate_node, void *
if (err)
return err;
- esw_qos_lock(esw);
- err = esw_qos_sched_elem_config(node, tx_max, node->bw_share, extack);
- esw_qos_unlock(esw);
- return err;
+ return esw_qos_sched_elem_config(node, tx_max, node->bw_share, extack);
}
int mlx5_esw_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv,
@@ -1794,30 +1707,23 @@ int mlx5_esw_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv,
{
struct mlx5_esw_sched_node *node;
struct mlx5_eswitch *esw;
- int err = 0;
esw = mlx5_devlink_eswitch_get(rate_node->devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
- esw_qos_lock(esw);
if (esw->mode != MLX5_ESWITCH_OFFLOADS) {
NL_SET_ERR_MSG_MOD(extack,
"Rate node creation supported only in switchdev mode");
- err = -EOPNOTSUPP;
- goto unlock;
+ return -EOPNOTSUPP;
}
node = esw_qos_create_vports_sched_node(esw, extack);
- if (IS_ERR(node)) {
- err = PTR_ERR(node);
- goto unlock;
- }
+ if (IS_ERR(node))
+ return PTR_ERR(node);
*priv = node;
-unlock:
- esw_qos_unlock(esw);
- return err;
+ return 0;
}
int mlx5_esw_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv,
@@ -1826,10 +1732,9 @@ int mlx5_esw_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv,
struct mlx5_esw_sched_node *node = priv;
struct mlx5_eswitch *esw = node->esw;
- esw_qos_lock(esw);
__esw_qos_destroy_node(node, extack);
esw_qos_put(esw);
- esw_qos_unlock(esw);
+
return 0;
}
@@ -1846,7 +1751,6 @@ mlx5_esw_qos_vport_update_parent(struct mlx5_vport *vport,
return -EOPNOTSUPP;
}
- esw_qos_lock(esw);
if (!vport->qos.sched_node && parent) {
enum sched_node_type type;
@@ -1859,13 +1763,15 @@ mlx5_esw_qos_vport_update_parent(struct mlx5_vport *vport,
parent ? : esw->qos.root,
extack);
}
- esw_qos_unlock(esw);
+
return err;
}
void mlx5_esw_qos_vport_clear_parent(struct mlx5_vport *vport)
{
+ devl_lock(vport->dev->shd);
mlx5_esw_qos_vport_update_parent(vport, NULL, NULL);
+ devl_unlock(vport->dev->shd);
}
int mlx5_esw_devlink_rate_leaf_parent_set(struct devlink_rate *devlink_rate,
@@ -1878,13 +1784,8 @@ int mlx5_esw_devlink_rate_leaf_parent_set(struct devlink_rate *devlink_rate,
int err;
err = mlx5_esw_qos_vport_update_parent(vport, node, extack);
- if (!err) {
- struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
-
- esw_qos_lock(esw);
+ if (!err)
esw_vport_qos_prune_empty(vport);
- esw_qos_unlock(esw);
- }
return err;
}
@@ -2007,18 +1908,15 @@ static int mlx5_esw_qos_node_update_parent(struct mlx5_esw_sched_node *node,
struct mlx5_esw_sched_node *parent,
struct netlink_ext_ack *extack)
{
- struct mlx5_esw_sched_node *curr_parent;
- struct mlx5_eswitch *esw = node->esw;
+ struct mlx5_esw_sched_node *curr_parent = node->parent;
int err;
- esw_qos_lock(esw);
- curr_parent = node->parent;
if (!parent)
- parent = esw->qos.root;
+ parent = node->esw->qos.root;
err = mlx5_esw_qos_node_validate_set_parent(node, parent, extack);
if (err)
- goto out;
+ return err;
if (node->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) {
err = esw_qos_tc_arbiter_node_update_parent(node, parent,
@@ -2028,15 +1926,11 @@ static int mlx5_esw_qos_node_update_parent(struct mlx5_esw_sched_node *node,
}
if (err)
- goto out;
+ return err;
esw_qos_normalize_min_rate(curr_parent, extack);
esw_qos_normalize_min_rate(parent, extack);
-
-out:
- esw_qos_unlock(esw);
-
- return err;
+ return 0;
}
int mlx5_esw_devlink_rate_node_parent_set(struct devlink_rate *devlink_rate,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h
index 0a50982b0e27..f275e850d2c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h
@@ -6,9 +6,6 @@
#ifdef CONFIG_MLX5_ESWITCH
-int mlx5_esw_qos_init(struct mlx5_eswitch *esw);
-void mlx5_esw_qos_cleanup(struct mlx5_eswitch *esw);
-
int mlx5_esw_qos_set_vport_rate(struct mlx5_vport *evport, u32 max_rate, u32 min_rate);
bool mlx5_esw_qos_get_vport_rate(struct mlx5_vport *vport, u32 *max_rate, u32 *min_rate);
void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 123c96716a54..f6bbc92d2817 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1647,10 +1647,6 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
mlx5_eq_notifier_register(esw->dev, &esw->nb);
- err = mlx5_esw_qos_init(esw);
- if (err)
- goto err_esw_init;
-
if (esw->mode == MLX5_ESWITCH_LEGACY) {
err = esw_legacy_enable(esw);
} else {
@@ -2057,9 +2053,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
goto reps_err;
esw->mode = MLX5_ESWITCH_LEGACY;
- err = mlx5_esw_qos_init(esw);
- if (err)
- goto reps_err;
mutex_init(&esw->offloads.encap_tbl_lock);
hash_init(esw->offloads.encap_tbl);
@@ -2109,7 +2102,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw_info(esw->dev, "cleanup\n");
- mlx5_esw_qos_cleanup(esw);
destroy_workqueue(esw->work_queue);
WARN_ON(refcount_read(&esw->qos.refcnt));
mutex_destroy(&esw->state_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 9b3949a64784..c8865ea3858d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -224,8 +224,9 @@ struct mlx5_vport {
struct mlx5_vport_info info;
- /* Protected with the E-Switch qos domain lock. The Vport QoS can
- * either be disabled (sched_node is NULL) or in one of three states:
+ /* Protected by mlx5_shd_lock().
+ * The Vport QoS can either be disabled (sched_node is NULL) or in one
+ * of three states:
* 1. Regular QoS (sched_node is a vport node).
* 2. TC QoS enabled on the vport (sched_node is a TC arbiter).
* 3. TC QoS enabled on the vport's parent node
@@ -359,7 +360,6 @@ enum {
};
struct dentry;
-struct mlx5_qos_domain;
struct mlx5_eswitch {
struct mlx5_core_dev *dev;
@@ -386,11 +386,10 @@ struct mlx5_eswitch {
struct rw_semaphore mode_lock;
atomic64_t user_count;
- /* Protected with the E-Switch qos domain lock. */
+ /* QoS changes are serialized with mlx5_shd_lock(). */
struct {
/* Initially 0, meaning no QoS users and QoS is disabled. */
refcount_t refcnt;
- struct mlx5_qos_domain *domain;
/* The root node of the hierarchy. */
struct mlx5_esw_sched_node *root;
} qos;
--
2.44.0
This is an AI-generated review of your patch. The human sending this
email has considered the AI review valid, or at least plausible.
---
net/mlx5: qos: Remove qos domains and use shd lock
This commit removes QoS domains and switches to using the shd lock to
protect against concurrent QoS modifications. It also enables the
supported_cross_device_rate_nodes devlink ops attribute.
Link: https://lore.kernel.org/netdev/20250213180134.323929-1-tariqt@nvidia.com/
> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
> index 6698ac55a4bfa..c051605fecd23 100644
> --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
> +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
> @@ -385,6 +385,7 @@ static const struct devlink_ops mlx5_devlink_ops = {
> .rate_node_del = mlx5_esw_devlink_rate_node_del,
> .rate_leaf_parent_set = mlx5_esw_devlink_rate_leaf_parent_set,
> .rate_node_parent_set = mlx5_esw_devlink_rate_node_parent_set,
> + .supported_cross_device_rate_nodes = true,
The commit message states that enabling supported_cross_device_rate_nodes
now is safe because mlx5_esw_qos_vport_update_parent rejects cross-esw
parent updates.
When a user provides a DEVLINK_ATTR_PARENT_DEV belonging to an unrelated
device, the devlink core (devlink_nl_rate_parent_node_set) drops the lock on
the parent devlink and calls devlink_rate_node_get_by_name(). This function
traverses the rate_list of the unlocked parent devlink instance.
Does this leave the devlink core exposed to data races and potential
use-after-free vulnerabilities, since the driver's rejection callback
executes only after this lockless list traversal has occurred?
> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
> index 0be516003bcde..f67f99428959f 100644
> --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
> +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
> @@ -1110,7 +1068,7 @@ void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport)
> struct mlx5_esw_sched_node *parent;
>
> lockdep_assert_held(&esw->state_lock);
> - esw_qos_lock(esw);
> + devl_lock(esw->dev->shd);
Could esw->dev->shd be NULL here?
In mlx5_shd_init(), initialization is explicitly skipped for non-PF devices,
meaning Sub-Functions (SFs) acting as E-Switch managers will have a NULL
shd pointer. If the device lacks VPD data, pci_vpd_alloc() returns -ENODEV,
causing mlx5_shd_init() to return 0 but leaving dev->shd as NULL.
When QoS operations are attempted on these devices, calling devl_lock()
dereferences the pointer, which could cause an immediate NULL pointer
dereference and kernel panic.
Additionally, this acquires the devlink lock while already holding
esw->state_lock, establishing the lock order:
state_lock -> devlink->lock.
Conversely, the devlink core invokes driver callbacks with the devlink
instance lock held, and the driver subsequently acquires esw->state_lock
inside these callbacks, creating the reverse order:
devlink->lock -> state_lock.
Does this introduce a circular locking dependency splat between state_lock
and the devlink instance lock?
> @@ -1159,26 +1117,25 @@ int mlx5_esw_qos_set_vport_rate(struct mlx5_vport *vport, u32 max_rate, u32 min_
> struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
> int err;
>
> - esw_qos_lock(esw);
> + devl_lock(esw->dev->shd);
This function is invoked by mlx5_eswitch_set_vport_rate(), which acts as the
handler for the ndo_set_vf_rate netdev callback. The core network stack
executes ndo_set_vf_rate with the rtnl_lock held, establishing the lock
order: rtnl_lock -> devlink->lock.
However, the devlink subsystem enforces the opposite locking order
(devlink->lock -> rtnl_lock). Because all devlink locks share the same
lockdep class, does this inversion trigger a circular locking dependency
lockdep splat and pose an ABBA deadlock risk?
[ ... ]
> @@ -1859,13 +1763,15 @@ mlx5_esw_qos_vport_update_parent(struct mlx5_vport *vport,
> parent ? : esw->qos.root,
> extack);
> }
> - esw_qos_unlock(esw);
> +
> return err;
> }
>
> void mlx5_esw_qos_vport_clear_parent(struct mlx5_vport *vport)
> {
> + devl_lock(vport->dev->shd);
> mlx5_esw_qos_vport_update_parent(vport, NULL, NULL);
> + devl_unlock(vport->dev->shd);
> }
During device teardown, execution paths such as mlx5_unload_one_devl_locked()
hold the primary devlink instance lock. This path invokes
mlx5_eswitch_unload_vf_vports() -> mlx5_esw_offloads_devlink_port_unregister()
-> mlx5_esw_qos_vport_clear_parent().
Because both the primary devlink instance lock and the shd devlink instance
lock are initialized from the same site (devlink_alloc), they share the same
lockdep class.
Does calling devl_lock() directly (instead of using the devlink core's
mutex_lock_nested capability) while a lock of the same class is already held
trigger a recursive locking lockdep warning?
On Mon, 2026-03-30 at 19:08 -0700, Jakub Kicinski wrote:
> This is an AI-generated review of your patch. The human sending this
> email has considered the AI review valid, or at least plausible.
> ---
> net/mlx5: qos: Remove qos domains and use shd lock
>
> This commit removes QoS domains and switches to using the shd lock to
> protect against concurrent QoS modifications. It also enables the
> supported_cross_device_rate_nodes devlink ops attribute.
>
> Link:
> https://lore.kernel.org/netdev/20250213180134.323929-1-tariqt@nvidia.com/
>
> > diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
> > b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
> > index 6698ac55a4bfa..c051605fecd23 100644
> > --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
> > +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
> > @@ -385,6 +385,7 @@ static const struct devlink_ops
> > mlx5_devlink_ops = {
> > .rate_node_del = mlx5_esw_devlink_rate_node_del,
> > .rate_leaf_parent_set =
> > mlx5_esw_devlink_rate_leaf_parent_set,
> > .rate_node_parent_set =
> > mlx5_esw_devlink_rate_node_parent_set,
> > + .supported_cross_device_rate_nodes = true,
>
> The commit message states that enabling
> supported_cross_device_rate_nodes
> now is safe because mlx5_esw_qos_vport_update_parent rejects cross-
> esw
> parent updates.
>
> When a user provides a DEVLINK_ATTR_PARENT_DEV belonging to an
> unrelated
> device, the devlink core (devlink_nl_rate_parent_node_set) drops the
> lock on
> the parent devlink and calls devlink_rate_node_get_by_name(). This
> function
> traverses the rate_list of the unlocked parent devlink instance.
>
> Does this leave the devlink core exposed to data races and potential
> use-after-free vulnerabilities, since the driver's rejection callback
> executes only after this lockless list traversal has occurred?
I will address this by adding common ancestor validation in devlink
rate core, which was missing.
>
> > diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
> > b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
> > index 0be516003bcde..f67f99428959f 100644
> > --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
> > +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
> > @@ -1110,7 +1068,7 @@ void mlx5_esw_qos_vport_disable(struct
> > mlx5_vport *vport)
> > struct mlx5_esw_sched_node *parent;
> >
> > lockdep_assert_held(&esw->state_lock);
> > - esw_qos_lock(esw);
> > + devl_lock(esw->dev->shd);
>
> Could esw->dev->shd be NULL here?
>
> In mlx5_shd_init(), initialization is explicitly skipped for non-PF
> devices,
> meaning Sub-Functions (SFs) acting as E-Switch managers will have a
> NULL
> shd pointer. If the device lacks VPD data, pci_vpd_alloc() returns -
> ENODEV,
> causing mlx5_shd_init() to return 0 but leaving dev->shd as NULL.
>
> When QoS operations are attempted on these devices, calling
> devl_lock()
> dereferences the pointer, which could cause an immediate NULL pointer
> dereference and kernel panic.
Right. This is a race with Jiri's fix ([1]), which changed the
assumption this code was architected with that dev->shd is always
initialized. That is no longer the case, and the consequence is that
this needs to 1) not unconditionally use dev->shd and 2) use a fallback
lock when it isn't available.
>
> Additionally, this acquires the devlink lock while already holding
> esw->state_lock, establishing the lock order:
> state_lock -> devlink->lock.
>
> Conversely, the devlink core invokes driver callbacks with the
> devlink
> instance lock held, and the driver subsequently acquires esw-
> >state_lock
> inside these callbacks, creating the reverse order:
> devlink->lock -> state_lock.
>
> Does this introduce a circular locking dependency splat between
> state_lock
> and the devlink instance lock?
This claim is based on the incorrect assumption that devlink instances
share the same lock class, while in fact each is unique. The
established lock order is devlink->lock -> state_lock and this doesn't
conflict with state_lock -> shd->lock.
>
> > @@ -1159,26 +1117,25 @@ int mlx5_esw_qos_set_vport_rate(struct
> > mlx5_vport *vport, u32 max_rate, u32 min_
> > struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
> > int err;
> >
> > - esw_qos_lock(esw);
> > + devl_lock(esw->dev->shd);
>
> This function is invoked by mlx5_eswitch_set_vport_rate(), which acts
> as the
> handler for the ndo_set_vf_rate netdev callback. The core network
> stack
> executes ndo_set_vf_rate with the rtnl_lock held, establishing the
> lock
> order: rtnl_lock -> devlink->lock.
>
> However, the devlink subsystem enforces the opposite locking order
> (devlink->lock -> rtnl_lock). Because all devlink locks share the
> same
> lockdep class, does this inversion trigger a circular locking
> dependency
> lockdep splat and pose an ABBA deadlock risk?
Same as above, there's no ABBA here since devlink instances use
different lock classes.
Perhaps Sashiko (I think) could be taught this to reduce false
positives?
>
> [ ... ]
>
> > @@ -1859,13 +1763,15 @@ mlx5_esw_qos_vport_update_parent(struct
> > mlx5_vport *vport,
> > parent ? : esw-
> > >qos.root,
> > extack);
> > }
> > - esw_qos_unlock(esw);
> > +
> > return err;
> > }
> >
> > void mlx5_esw_qos_vport_clear_parent(struct mlx5_vport *vport)
> > {
> > + devl_lock(vport->dev->shd);
> > mlx5_esw_qos_vport_update_parent(vport, NULL, NULL);
> > + devl_unlock(vport->dev->shd);
> > }
>
> During device teardown, execution paths such as
> mlx5_unload_one_devl_locked()
> hold the primary devlink instance lock. This path invokes
> mlx5_eswitch_unload_vf_vports() ->
> mlx5_esw_offloads_devlink_port_unregister()
> -> mlx5_esw_qos_vport_clear_parent().
>
> Because both the primary devlink instance lock and the shd devlink
> instance
> lock are initialized from the same site (devlink_alloc), they share
> the same
> lockdep class.
>
> Does calling devl_lock() directly (instead of using the devlink
> core's
> mutex_lock_nested capability) while a lock of the same class is
> already held
> trigger a recursive locking lockdep warning?
Again, same false positive. Devlinks use unique lock classes.
Cosmin.
On Tue, 2026-03-31 at 12:53 +0000, Cosmin Ratiu wrote:
> On Mon, 2026-03-30 at 19:08 -0700, Jakub Kicinski wrote:
> > This is an AI-generated review of your patch. The human sending
> > this
> > email has considered the AI review valid, or at least plausible.
> > ---
> > net/mlx5: qos: Remove qos domains and use shd lock
> >
> > This commit removes QoS domains and switches to using the shd lock
> > to
> > protect against concurrent QoS modifications. It also enables the
> > supported_cross_device_rate_nodes devlink ops attribute.
> >
> > Link:
> > https://lore.kernel.org/netdev/20250213180134.323929-1-tariqt@nvidia.com/
> >
> > > diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
> > > b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
> > > index 6698ac55a4bfa..c051605fecd23 100644
> > > --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
> > > +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
> > > @@ -385,6 +385,7 @@ static const struct devlink_ops
> > > mlx5_devlink_ops = {
> > > .rate_node_del = mlx5_esw_devlink_rate_node_del,
> > > .rate_leaf_parent_set =
> > > mlx5_esw_devlink_rate_leaf_parent_set,
> > > .rate_node_parent_set =
> > > mlx5_esw_devlink_rate_node_parent_set,
> > > + .supported_cross_device_rate_nodes = true,
> >
> > The commit message states that enabling
> > supported_cross_device_rate_nodes
> > now is safe because mlx5_esw_qos_vport_update_parent rejects cross-
> > esw
> > parent updates.
> >
> > When a user provides a DEVLINK_ATTR_PARENT_DEV belonging to an
> > unrelated
> > device, the devlink core (devlink_nl_rate_parent_node_set) drops
> > the
> > lock on
> > the parent devlink and calls devlink_rate_node_get_by_name(). This
> > function
> > traverses the rate_list of the unlocked parent devlink instance.
> >
> > Does this leave the devlink core exposed to data races and
> > potential
> > use-after-free vulnerabilities, since the driver's rejection
> > callback
> > executes only after this lockless list traversal has occurred?
>
> I will address this by adding common ancestor validation in devlink
> rate core, which was missing.
>
> >
> > > diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
> > > b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
> > > index 0be516003bcde..f67f99428959f 100644
> > > --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
> > > +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
> > > @@ -1110,7 +1068,7 @@ void mlx5_esw_qos_vport_disable(struct
> > > mlx5_vport *vport)
> > > struct mlx5_esw_sched_node *parent;
> > >
> > > lockdep_assert_held(&esw->state_lock);
> > > - esw_qos_lock(esw);
> > > + devl_lock(esw->dev->shd);
> >
> > Could esw->dev->shd be NULL here?
> >
> > In mlx5_shd_init(), initialization is explicitly skipped for non-PF
> > devices,
> > meaning Sub-Functions (SFs) acting as E-Switch managers will have a
> > NULL
> > shd pointer. If the device lacks VPD data, pci_vpd_alloc() returns
> > -
> > ENODEV,
> > causing mlx5_shd_init() to return 0 but leaving dev->shd as NULL.
> >
> > When QoS operations are attempted on these devices, calling
> > devl_lock()
> > dereferences the pointer, which could cause an immediate NULL
> > pointer
> > dereference and kernel panic.
>
> Right. This is a race with Jiri's fix ([1]), which changed the
> assumption this code was architected with that dev->shd is always
> initialized. That is no longer the case, and the consequence is that
> this needs to 1) not unconditionally use dev->shd and 2) use a
> fallback
> lock when it isn't available.
Forgot:
[1]
https://lore.kernel.org/netdev/20260325152801.236343-1-jiri@resnulli.us/
© 2016 - 2026 Red Hat, Inc.