From: Shay Drory <shayd@nvidia.com>
When NR_CPUS is set to 8192 or higher, the current implementation that
allocates struct cpu_mask on the stack leads to a compiler warning
about the frame size[1].
This patch addresses the issue by moving the allocation of struct
cpu_mask to the heap.
[1]
drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c: In function ‘irq_pool_request_irq’:
drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c:70:1: warning:
the frame size of 1048 bytes is larger than 1024 bytes
[-Wframe-larger-than=]
70 | }
| ^
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c: In function ‘mlx5_ctrl_irq_request’:
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c:478:1: warning: the
frame size of 1040 bytes is larger than 1024 bytes [-Wframe-larger-than=]
478 | }
| ^
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c: In function ‘mlx5_irq_request_vector’:
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c:597:1: warning: the
frame size of 1040 bytes is larger than 1024 bytes [-Wframe-larger-than=]
597 | }
| ^
drivers/net/ethernet/mellanox/mlx5/core/eq.c: In function ‘comp_irq_request_sf’:
drivers/net/ethernet/mellanox/mlx5/core/eq.c:925:1: warning: the frame
size of 1064 bytes is larger than 1024 bytes [-Wframe-larger-than=]
925 | }
| ^
drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c: In function ‘irq_pool_request_irq’:
drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c:74:1: warning:
the frame size of 1048 bytes is larger than 1024 bytes
[-Wframe-larger-than=]
74 | }
| ^
Signed-off-by: Shay Drory <shayd@nvidia.com>
Reported-by: Arnd Bergmann <arnd@kernel.org>
Closes: https://lore.kernel.org/all/20250620111010.3364606-1-arnd@kernel.org
Reviewed-by: Maher Sanalla <msanalla@nvidia.com>
Reviewed-by: Moshe Shemesh <moshe@nvidia.com>
Reviewed-by: Dragos Tatulea <dtatulea@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
---
drivers/net/ethernet/mellanox/mlx5/core/eq.c | 19 +++++++---
.../mellanox/mlx5/core/irq_affinity.c | 21 ++++++++---
.../net/ethernet/mellanox/mlx5/core/pci_irq.c | 37 +++++++++++++------
3 files changed, 53 insertions(+), 24 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 66dce17219a6..779efc186255 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -876,19 +876,25 @@ static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
{
struct mlx5_irq_pool *pool = mlx5_irq_table_get_comp_irq_pool(dev);
struct mlx5_eq_table *table = dev->priv.eq_table;
- struct irq_affinity_desc af_desc = {};
+ struct irq_affinity_desc *af_desc;
struct mlx5_irq *irq;
/* In case SF irq pool does not exist, fallback to the PF irqs*/
if (!mlx5_irq_pool_is_sf_pool(pool))
return comp_irq_request_pci(dev, vecidx);
- af_desc.is_managed = false;
- cpumask_copy(&af_desc.mask, cpu_online_mask);
- cpumask_andnot(&af_desc.mask, &af_desc.mask, &table->used_cpus);
- irq = mlx5_irq_affinity_request(dev, pool, &af_desc);
- if (IS_ERR(irq))
+ af_desc = kzalloc(sizeof(*af_desc), GFP_KERNEL);
+ if (!af_desc)
+ return -ENOMEM;
+
+ af_desc->is_managed = false;
+ cpumask_copy(&af_desc->mask, cpu_online_mask);
+ cpumask_andnot(&af_desc->mask, &af_desc->mask, &table->used_cpus);
+ irq = mlx5_irq_affinity_request(dev, pool, af_desc);
+ if (IS_ERR(irq)) {
+ kfree(af_desc);
return PTR_ERR(irq);
+ }
cpumask_or(&table->used_cpus, &table->used_cpus, mlx5_irq_get_affinity_mask(irq));
mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n",
@@ -896,6 +902,7 @@ static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)),
mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ);
+ kfree(af_desc);
return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
index 2691d88cdee1..d0a845579d33 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
@@ -47,29 +47,38 @@ static int cpu_get_least_loaded(struct mlx5_irq_pool *pool,
static struct mlx5_irq *
irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc)
{
- struct irq_affinity_desc auto_desc = {};
+ struct irq_affinity_desc *auto_desc;
struct mlx5_irq *irq;
u32 irq_index;
int err;
+ auto_desc = kzalloc(sizeof(*auto_desc), GFP_KERNEL);
+ if (!auto_desc)
+ return ERR_PTR(-ENOMEM);
+
err = xa_alloc(&pool->irqs, &irq_index, NULL, pool->xa_num_irqs, GFP_KERNEL);
- if (err)
- return ERR_PTR(err);
+ if (err) {
+ irq = ERR_PTR(err);
+ goto out;
+ }
if (pool->irqs_per_cpu) {
if (cpumask_weight(&af_desc->mask) > 1)
/* if req_mask contain more then one CPU, set the least loadad CPU
* of req_mask
*/
cpumask_set_cpu(cpu_get_least_loaded(pool, &af_desc->mask),
- &auto_desc.mask);
+ &auto_desc->mask);
else
cpu_get(pool, cpumask_first(&af_desc->mask));
}
irq = mlx5_irq_alloc(pool, irq_index,
- cpumask_empty(&auto_desc.mask) ? af_desc : &auto_desc,
- NULL);
+ cpumask_empty(&auto_desc->mask) ?
+ af_desc : auto_desc, NULL);
if (IS_ERR(irq))
xa_erase(&pool->irqs, irq_index);
+
+out:
+ kfree(auto_desc);
return irq;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 40024cfa3099..ac00aa29e61a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -470,26 +470,32 @@ void mlx5_ctrl_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *ctrl_irq)
struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev)
{
struct mlx5_irq_pool *pool = ctrl_irq_pool_get(dev);
- struct irq_affinity_desc af_desc;
+ struct irq_affinity_desc *af_desc;
struct mlx5_irq *irq;
- cpumask_copy(&af_desc.mask, cpu_online_mask);
- af_desc.is_managed = false;
+ af_desc = kzalloc(sizeof(*af_desc), GFP_KERNEL);
+ if (!af_desc)
+ return ERR_PTR(-ENOMEM);
+
+ cpumask_copy(&af_desc->mask, cpu_online_mask);
+ af_desc->is_managed = false;
if (!mlx5_irq_pool_is_sf_pool(pool)) {
/* In case we are allocating a control IRQ from a pci device's pool.
* This can happen also for a SF if the SFs pool is empty.
*/
if (!pool->xa_num_irqs.max) {
- cpumask_clear(&af_desc.mask);
+ cpumask_clear(&af_desc->mask);
/* In case we only have a single IRQ for PF/VF */
- cpumask_set_cpu(cpumask_first(cpu_online_mask), &af_desc.mask);
+ cpumask_set_cpu(cpumask_first(cpu_online_mask),
+ &af_desc->mask);
}
/* Allocate the IRQ in index 0. The vector was already allocated */
- irq = irq_pool_request_vector(pool, 0, &af_desc, NULL);
+ irq = irq_pool_request_vector(pool, 0, af_desc, NULL);
} else {
- irq = mlx5_irq_affinity_request(dev, pool, &af_desc);
+ irq = mlx5_irq_affinity_request(dev, pool, af_desc);
}
+ kfree(af_desc);
return irq;
}
@@ -548,16 +554,23 @@ struct mlx5_irq *mlx5_irq_request_vector(struct mlx5_core_dev *dev, u16 cpu,
{
struct mlx5_irq_table *table = mlx5_irq_table_get(dev);
struct mlx5_irq_pool *pool = table->pcif_pool;
- struct irq_affinity_desc af_desc;
int offset = MLX5_IRQ_VEC_COMP_BASE;
+ struct irq_affinity_desc *af_desc;
+ struct mlx5_irq *irq;
+
+ af_desc = kzalloc(sizeof(*af_desc), GFP_KERNEL);
+ if (!af_desc)
+ return ERR_PTR(-ENOMEM);
if (!pool->xa_num_irqs.max)
offset = 0;
- af_desc.is_managed = false;
- cpumask_clear(&af_desc.mask);
- cpumask_set_cpu(cpu, &af_desc.mask);
- return mlx5_irq_request(dev, vecidx + offset, &af_desc, rmap);
+ af_desc->is_managed = false;
+ cpumask_clear(&af_desc->mask);
+ cpumask_set_cpu(cpu, &af_desc->mask);
+ irq = mlx5_irq_request(dev, vecidx + offset, af_desc, rmap);
+ kfree(af_desc);
+ return irq;
}
static struct mlx5_irq_pool *
--
2.31.1
在 2025/7/17 10:03, Tariq Toukan 写道: > From: Shay Drory <shayd@nvidia.com> > > When NR_CPUS is set to 8192 or higher, the current implementation that > allocates struct cpu_mask on the stack leads to a compiler warning > about the frame size[1]. > > This patch addresses the issue by moving the allocation of struct > cpu_mask to the heap. > > [1] > drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c: In function ‘irq_pool_request_irq’: > drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c:70:1: warning: > the frame size of 1048 bytes is larger than 1024 bytes > [-Wframe-larger-than=] > 70 | } > | ^ > drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c: In function ‘mlx5_ctrl_irq_request’: > drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c:478:1: warning: the > frame size of 1040 bytes is larger than 1024 bytes [-Wframe-larger-than=] > 478 | } > | ^ > drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c: In function ‘mlx5_irq_request_vector’: > drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c:597:1: warning: the > frame size of 1040 bytes is larger than 1024 bytes [-Wframe-larger-than=] > 597 | } > | ^ > > drivers/net/ethernet/mellanox/mlx5/core/eq.c: In function ‘comp_irq_request_sf’: > drivers/net/ethernet/mellanox/mlx5/core/eq.c:925:1: warning: the frame > size of 1064 bytes is larger than 1024 bytes [-Wframe-larger-than=] > 925 | } > | ^ > > drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c: In function ‘irq_pool_request_irq’: > drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c:74:1: warning: > the frame size of 1048 bytes is larger than 1024 bytes > [-Wframe-larger-than=] > 74 | } > | ^ > https://patchwork.kernel.org/project/linux-rdma/patch/20250711030359.4419-1-yanjun.zhu@linux.dev/ This commit appears to be the same as the one above. Does the issue addressed in this commit still occur after the previous one is applied? Thanks, Yanjun.Zhu > Signed-off-by: Shay Drory <shayd@nvidia.com> > Reported-by: Arnd Bergmann <arnd@kernel.org> > Closes: https://lore.kernel.org/all/20250620111010.3364606-1-arnd@kernel.org > Reviewed-by: Maher Sanalla <msanalla@nvidia.com> > Reviewed-by: Moshe Shemesh <moshe@nvidia.com> > Reviewed-by: Dragos Tatulea <dtatulea@nvidia.com> > Signed-off-by: Tariq Toukan <tariqt@nvidia.com> > --- > drivers/net/ethernet/mellanox/mlx5/core/eq.c | 19 +++++++--- > .../mellanox/mlx5/core/irq_affinity.c | 21 ++++++++--- > .../net/ethernet/mellanox/mlx5/core/pci_irq.c | 37 +++++++++++++------ > 3 files changed, 53 insertions(+), 24 deletions(-) > > diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c > index 66dce17219a6..779efc186255 100644 > --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c > +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c > @@ -876,19 +876,25 @@ static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx) > { > struct mlx5_irq_pool *pool = mlx5_irq_table_get_comp_irq_pool(dev); > struct mlx5_eq_table *table = dev->priv.eq_table; > - struct irq_affinity_desc af_desc = {}; > + struct irq_affinity_desc *af_desc; > struct mlx5_irq *irq; > > /* In case SF irq pool does not exist, fallback to the PF irqs*/ > if (!mlx5_irq_pool_is_sf_pool(pool)) > return comp_irq_request_pci(dev, vecidx); > > - af_desc.is_managed = false; > - cpumask_copy(&af_desc.mask, cpu_online_mask); > - cpumask_andnot(&af_desc.mask, &af_desc.mask, &table->used_cpus); > - irq = mlx5_irq_affinity_request(dev, pool, &af_desc); > - if (IS_ERR(irq)) > + af_desc = kzalloc(sizeof(*af_desc), GFP_KERNEL); > + if (!af_desc) > + return -ENOMEM; > + > + af_desc->is_managed = false; > + cpumask_copy(&af_desc->mask, cpu_online_mask); > + cpumask_andnot(&af_desc->mask, &af_desc->mask, &table->used_cpus); > + irq = mlx5_irq_affinity_request(dev, pool, af_desc); > + if (IS_ERR(irq)) { > + kfree(af_desc); > return PTR_ERR(irq); > + } > > cpumask_or(&table->used_cpus, &table->used_cpus, mlx5_irq_get_affinity_mask(irq)); > mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n", > @@ -896,6 +902,7 @@ static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx) > cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)), > mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ); > > + kfree(af_desc); > return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL)); > } > > diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c > index 2691d88cdee1..d0a845579d33 100644 > --- a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c > +++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c > @@ -47,29 +47,38 @@ static int cpu_get_least_loaded(struct mlx5_irq_pool *pool, > static struct mlx5_irq * > irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc) > { > - struct irq_affinity_desc auto_desc = {}; > + struct irq_affinity_desc *auto_desc; > struct mlx5_irq *irq; > u32 irq_index; > int err; > > + auto_desc = kzalloc(sizeof(*auto_desc), GFP_KERNEL); > + if (!auto_desc) > + return ERR_PTR(-ENOMEM); > + > err = xa_alloc(&pool->irqs, &irq_index, NULL, pool->xa_num_irqs, GFP_KERNEL); > - if (err) > - return ERR_PTR(err); > + if (err) { > + irq = ERR_PTR(err); > + goto out; > + } > if (pool->irqs_per_cpu) { > if (cpumask_weight(&af_desc->mask) > 1) > /* if req_mask contain more then one CPU, set the least loadad CPU > * of req_mask > */ > cpumask_set_cpu(cpu_get_least_loaded(pool, &af_desc->mask), > - &auto_desc.mask); > + &auto_desc->mask); > else > cpu_get(pool, cpumask_first(&af_desc->mask)); > } > irq = mlx5_irq_alloc(pool, irq_index, > - cpumask_empty(&auto_desc.mask) ? af_desc : &auto_desc, > - NULL); > + cpumask_empty(&auto_desc->mask) ? > + af_desc : auto_desc, NULL); > if (IS_ERR(irq)) > xa_erase(&pool->irqs, irq_index); > + > +out: > + kfree(auto_desc); > return irq; > } > > diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c > index 40024cfa3099..ac00aa29e61a 100644 > --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c > +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c > @@ -470,26 +470,32 @@ void mlx5_ctrl_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *ctrl_irq) > struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev) > { > struct mlx5_irq_pool *pool = ctrl_irq_pool_get(dev); > - struct irq_affinity_desc af_desc; > + struct irq_affinity_desc *af_desc; > struct mlx5_irq *irq; > > - cpumask_copy(&af_desc.mask, cpu_online_mask); > - af_desc.is_managed = false; > + af_desc = kzalloc(sizeof(*af_desc), GFP_KERNEL); > + if (!af_desc) > + return ERR_PTR(-ENOMEM); > + > + cpumask_copy(&af_desc->mask, cpu_online_mask); > + af_desc->is_managed = false; > if (!mlx5_irq_pool_is_sf_pool(pool)) { > /* In case we are allocating a control IRQ from a pci device's pool. > * This can happen also for a SF if the SFs pool is empty. > */ > if (!pool->xa_num_irqs.max) { > - cpumask_clear(&af_desc.mask); > + cpumask_clear(&af_desc->mask); > /* In case we only have a single IRQ for PF/VF */ > - cpumask_set_cpu(cpumask_first(cpu_online_mask), &af_desc.mask); > + cpumask_set_cpu(cpumask_first(cpu_online_mask), > + &af_desc->mask); > } > /* Allocate the IRQ in index 0. The vector was already allocated */ > - irq = irq_pool_request_vector(pool, 0, &af_desc, NULL); > + irq = irq_pool_request_vector(pool, 0, af_desc, NULL); > } else { > - irq = mlx5_irq_affinity_request(dev, pool, &af_desc); > + irq = mlx5_irq_affinity_request(dev, pool, af_desc); > } > > + kfree(af_desc); > return irq; > } > > @@ -548,16 +554,23 @@ struct mlx5_irq *mlx5_irq_request_vector(struct mlx5_core_dev *dev, u16 cpu, > { > struct mlx5_irq_table *table = mlx5_irq_table_get(dev); > struct mlx5_irq_pool *pool = table->pcif_pool; > - struct irq_affinity_desc af_desc; > int offset = MLX5_IRQ_VEC_COMP_BASE; > + struct irq_affinity_desc *af_desc; > + struct mlx5_irq *irq; > + > + af_desc = kzalloc(sizeof(*af_desc), GFP_KERNEL); > + if (!af_desc) > + return ERR_PTR(-ENOMEM); > > if (!pool->xa_num_irqs.max) > offset = 0; > > - af_desc.is_managed = false; > - cpumask_clear(&af_desc.mask); > - cpumask_set_cpu(cpu, &af_desc.mask); > - return mlx5_irq_request(dev, vecidx + offset, &af_desc, rmap); > + af_desc->is_managed = false; > + cpumask_clear(&af_desc->mask); > + cpumask_set_cpu(cpu, &af_desc->mask); > + irq = mlx5_irq_request(dev, vecidx + offset, af_desc, rmap); > + kfree(af_desc); > + return irq; > } > > static struct mlx5_irq_pool *
© 2016 - 2025 Red Hat, Inc.