From: Moshe Shemesh <moshe@nvidia.com>
Ensure that flow counters allocated with mlx5_fc_single_alloc() have
bulk correctly initialized so they can safely be used in HWS rules.
Signed-off-by: Moshe Shemesh <moshe@nvidia.com>
Reviewed-by: Mark Bloch <mbloch@nvidia.com>
Reviewed-by: Yevgeny Kliteynik <kliteyn@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
---
.../net/ethernet/mellanox/mlx5/core/fs_core.h | 3 +-
.../ethernet/mellanox/mlx5/core/fs_counters.c | 39 +++++++++++++------
2 files changed, 29 insertions(+), 13 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 1c6591425260..dbaf33b537f7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -308,7 +308,8 @@ struct mlx5_flow_root_namespace {
};
enum mlx5_fc_type {
- MLX5_FC_TYPE_ACQUIRED = 0,
+ MLX5_FC_TYPE_POOL_ACQUIRED = 0,
+ MLX5_FC_TYPE_SINGLE,
MLX5_FC_TYPE_LOCAL,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 14539a20a60f..fe7caa910219 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -153,6 +153,7 @@ static void mlx5_fc_stats_query_all_counters(struct mlx5_core_dev *dev)
static void mlx5_fc_free(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
{
mlx5_cmd_fc_free(dev, counter->id);
+ kfree(counter->bulk);
kfree(counter);
}
@@ -163,7 +164,7 @@ static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
if (WARN_ON(counter->type == MLX5_FC_TYPE_LOCAL))
return;
- if (counter->bulk)
+ if (counter->type == MLX5_FC_TYPE_POOL_ACQUIRED)
mlx5_fc_pool_release_counter(&fc_stats->fc_pool, counter);
else
mlx5_fc_free(dev, counter);
@@ -220,8 +221,16 @@ static void mlx5_fc_stats_work(struct work_struct *work)
mlx5_fc_stats_query_all_counters(dev);
}
+static void mlx5_fc_bulk_init(struct mlx5_fc_bulk *fc_bulk, u32 base_id)
+{
+ fc_bulk->base_id = base_id;
+ refcount_set(&fc_bulk->hws_data.hws_action_refcount, 0);
+ mutex_init(&fc_bulk->hws_data.lock);
+}
+
static struct mlx5_fc *mlx5_fc_single_alloc(struct mlx5_core_dev *dev)
{
+ struct mlx5_fc_bulk *fc_bulk;
struct mlx5_fc *counter;
int err;
@@ -229,13 +238,26 @@ static struct mlx5_fc *mlx5_fc_single_alloc(struct mlx5_core_dev *dev)
if (!counter)
return ERR_PTR(-ENOMEM);
- err = mlx5_cmd_fc_alloc(dev, &counter->id);
- if (err) {
- kfree(counter);
- return ERR_PTR(err);
+ fc_bulk = kzalloc(sizeof(*fc_bulk), GFP_KERNEL);
+ if (!fc_bulk) {
+ err = -ENOMEM;
+ goto free_counter;
}
+ err = mlx5_cmd_fc_alloc(dev, &counter->id);
+ if (err)
+ goto free_bulk;
+ counter->type = MLX5_FC_TYPE_SINGLE;
+ mlx5_fs_bulk_init(&fc_bulk->fs_bulk, 1);
+ mlx5_fc_bulk_init(fc_bulk, counter->id);
+ counter->bulk = fc_bulk;
return counter;
+
+free_bulk:
+ kfree(fc_bulk);
+free_counter:
+ kfree(counter);
+ return ERR_PTR(err);
}
static struct mlx5_fc *mlx5_fc_acquire(struct mlx5_core_dev *dev, bool aging)
@@ -421,13 +443,6 @@ static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
counter->id = id;
}
-static void mlx5_fc_bulk_init(struct mlx5_fc_bulk *fc_bulk, u32 base_id)
-{
- fc_bulk->base_id = base_id;
- refcount_set(&fc_bulk->hws_data.hws_action_refcount, 0);
- mutex_init(&fc_bulk->hws_data.lock);
-}
-
u32 mlx5_fc_get_base_id(struct mlx5_fc *counter)
{
return counter->bulk->base_id;
--
2.31.1
On 1/12/26 10:40 AM, Tariq Toukan wrote:
> @@ -220,8 +221,16 @@ static void mlx5_fc_stats_work(struct work_struct *work)
> mlx5_fc_stats_query_all_counters(dev);
> }
>
> +static void mlx5_fc_bulk_init(struct mlx5_fc_bulk *fc_bulk, u32 base_id)
> +{
> + fc_bulk->base_id = base_id;
> + refcount_set(&fc_bulk->hws_data.hws_action_refcount, 0);
> + mutex_init(&fc_bulk->hws_data.lock);
> +}
Not worthy a repost, but you could have avoided moving this function
placing it here in patch 1/3.
/P
© 2016 - 2026 Red Hat, Inc.