drivers/net/ethernet/ibm/ibmvnic.c | 8 ++++++++ 1 file changed, 8 insertions(+)
When the number of TX queues is reduced (e.g., via ethtool -L), the
Qdisc layer retains previously enqueued skbs with queue mappings from
before the reduction. After the reset completes and tx_queues_active is
set to true, netif_tx_start_all_queues() drains these stale skbs through
ibmvnic_xmit(). The queue index from skb_get_queue_mapping() may exceed
the newly allocated array bounds, causing out-of-bounds reads on
tx_scrq[] and tx_pool[]/tso_pool[], and out-of-bounds writes on
tx_stats_buffers[] in the function's exit path.
The existing tx_queues_active guard does not help here: it is set to
true by __ibmvnic_open() before netif_tx_start_all_queues() restarts
queue draining, so stale skbs pass the check with an invalid queue index.
Add a bounds check against num_active_tx_scrqs immediately after the
tx_queues_active guard. Use a dedicated out_unlock label to skip the
per-queue stats updates (which also index tx_stats_buffers[queue_num])
when the queue index is invalid.
Fixes: 4219196d1f66 ("ibmvnic: fix race between xmit and reset")
Reported-by: Yuhao Jiang <danisjiang@gmail.com>
Cc: stable@vger.kernel.org
Signed-off-by: Tyllis Xu <LivelyCarpet87@gmail.com>
---
drivers/net/ethernet/ibm/ibmvnic.c | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 5a510eed335e..c939391474cb 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2453,6 +2453,11 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
goto out;
}
+ if (unlikely(queue_num >= adapter->num_active_tx_scrqs)) {
+ dev_kfree_skb_any(skb);
+ goto out_unlock;
+ }
+
tx_scrq = adapter->tx_scrq[queue_num];
txq = netdev_get_tx_queue(netdev, queue_num);
ind_bufp = &tx_scrq->ind_buf;
@@ -2672,6 +2677,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
+ return ret;
+out_unlock:
+ rcu_read_unlock();
return ret;
}
--
2.43.0
On Fri, Mar 20, 2026 at 10:54:39PM -0500, Tyllis Xu wrote:
> When the number of TX queues is reduced (e.g., via ethtool -L), the
> Qdisc layer retains previously enqueued skbs with queue mappings from
> before the reduction. After the reset completes and tx_queues_active is
> set to true, netif_tx_start_all_queues() drains these stale skbs through
> ibmvnic_xmit(). The queue index from skb_get_queue_mapping() may exceed
> the newly allocated array bounds, causing out-of-bounds reads on
> tx_scrq[] and tx_pool[]/tso_pool[], and out-of-bounds writes on
> tx_stats_buffers[] in the function's exit path.
>
> The existing tx_queues_active guard does not help here: it is set to
> true by __ibmvnic_open() before netif_tx_start_all_queues() restarts
> queue draining, so stale skbs pass the check with an invalid queue index.
>
> Add a bounds check against num_active_tx_scrqs immediately after the
> tx_queues_active guard. Use a dedicated out_unlock label to skip the
> per-queue stats updates (which also index tx_stats_buffers[queue_num])
> when the queue index is invalid.
>
> Fixes: 4219196d1f66 ("ibmvnic: fix race between xmit and reset")
> Reported-by: Yuhao Jiang <danisjiang@gmail.com>
> Cc: stable@vger.kernel.org
> Signed-off-by: Tyllis Xu <LivelyCarpet87@gmail.com>
> ---
> drivers/net/ethernet/ibm/ibmvnic.c | 8 ++++++++
> 1 file changed, 8 insertions(+)
>
> diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
> index 5a510eed335e..c939391474cb 100644
> --- a/drivers/net/ethernet/ibm/ibmvnic.c
> +++ b/drivers/net/ethernet/ibm/ibmvnic.c
> @@ -2453,6 +2453,11 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
> goto out;
> }
>
> + if (unlikely(queue_num >= adapter->num_active_tx_scrqs)) {
> + dev_kfree_skb_any(skb);
> + goto out_unlock;
> + }
> +
This doesn't seem quite right. Shouldn't it be as per other
blocks in this function that drop packets. In which case
it could re-use the existing handling in the conditional immediately above
this hunk.
Also, I don't think unlikely() seems in keeping with the existing
implementation of this function.
I'm suggesting something like (completely untested):
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 5a510eed335e..67e1e62631e3 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2457,7 +2457,8 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
txq = netdev_get_tx_queue(netdev, queue_num);
ind_bufp = &tx_scrq->ind_buf;
- if (ibmvnic_xmit_workarounds(skb, netdev)) {
+ if (ibmvnic_xmit_workarounds(skb, netdev) ||
+ queue_num >= adapter->num_active_tx_scrqs) {
tx_dropped++;
tx_send_failed++;
ret = NETDEV_TX_OK;
Where the next line is:
goto out;
...
> @@ -2672,6 +2677,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
> adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
> adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
>
> + return ret;
> +out_unlock:
> + rcu_read_unlock();
> return ret;
> }
My previous comment not, withstanding:
The RCU read side critical section is already enormous.
So perhaps making it slightly better doesn't make a difference.
If so, can we go for this slightly flow here (completely untested).
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 5a510eed335e..1e1cd8c11cf9 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2664,14 +2664,14 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
netif_carrier_off(netdev);
}
out:
- rcu_read_unlock();
adapter->tx_send_failed += tx_send_failed;
adapter->tx_map_failed += tx_map_failed;
adapter->tx_stats_buffers[queue_num].batched_packets += tx_bpackets;
adapter->tx_stats_buffers[queue_num].direct_packets += tx_dpackets;
adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
-
+out_unlock:
+ rcu_read_unlock();
return ret;
}
--
pw-bot: changes-requested
I'll try out the suggested changes and use more
of the existing handling to create a new patch.
I'll also remove the unlikely(). Thank you for
your feedback!
On Mon, Mar 23, 2026 at 9:45 AM Simon Horman <horms@kernel.org> wrote:
>
> On Fri, Mar 20, 2026 at 10:54:39PM -0500, Tyllis Xu wrote:
> > When the number of TX queues is reduced (e.g., via ethtool -L), the
> > Qdisc layer retains previously enqueued skbs with queue mappings from
> > before the reduction. After the reset completes and tx_queues_active is
> > set to true, netif_tx_start_all_queues() drains these stale skbs through
> > ibmvnic_xmit(). The queue index from skb_get_queue_mapping() may exceed
> > the newly allocated array bounds, causing out-of-bounds reads on
> > tx_scrq[] and tx_pool[]/tso_pool[], and out-of-bounds writes on
> > tx_stats_buffers[] in the function's exit path.
> >
> > The existing tx_queues_active guard does not help here: it is set to
> > true by __ibmvnic_open() before netif_tx_start_all_queues() restarts
> > queue draining, so stale skbs pass the check with an invalid queue index.
> >
> > Add a bounds check against num_active_tx_scrqs immediately after the
> > tx_queues_active guard. Use a dedicated out_unlock label to skip the
> > per-queue stats updates (which also index tx_stats_buffers[queue_num])
> > when the queue index is invalid.
> >
> > Fixes: 4219196d1f66 ("ibmvnic: fix race between xmit and reset")
> > Reported-by: Yuhao Jiang <danisjiang@gmail.com>
> > Cc: stable@vger.kernel.org
> > Signed-off-by: Tyllis Xu <LivelyCarpet87@gmail.com>
> > ---
> > drivers/net/ethernet/ibm/ibmvnic.c | 8 ++++++++
> > 1 file changed, 8 insertions(+)
> >
> > diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
> > index 5a510eed335e..c939391474cb 100644
> > --- a/drivers/net/ethernet/ibm/ibmvnic.c
> > +++ b/drivers/net/ethernet/ibm/ibmvnic.c
> > @@ -2453,6 +2453,11 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
> > goto out;
> > }
> >
> > + if (unlikely(queue_num >= adapter->num_active_tx_scrqs)) {
> > + dev_kfree_skb_any(skb);
> > + goto out_unlock;
> > + }
> > +
>
> This doesn't seem quite right. Shouldn't it be as per other
> blocks in this function that drop packets. In which case
> it could re-use the existing handling in the conditional immediately above
> this hunk.
>
> Also, I don't think unlikely() seems in keeping with the existing
> implementation of this function.
>
> I'm suggesting something like (completely untested):
>
> diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
> index 5a510eed335e..67e1e62631e3 100644
> --- a/drivers/net/ethernet/ibm/ibmvnic.c
> +++ b/drivers/net/ethernet/ibm/ibmvnic.c
> @@ -2457,7 +2457,8 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
> txq = netdev_get_tx_queue(netdev, queue_num);
> ind_bufp = &tx_scrq->ind_buf;
>
> - if (ibmvnic_xmit_workarounds(skb, netdev)) {
> + if (ibmvnic_xmit_workarounds(skb, netdev) ||
> + queue_num >= adapter->num_active_tx_scrqs) {
> tx_dropped++;
> tx_send_failed++;
> ret = NETDEV_TX_OK;
>
> Where the next line is:
>
> goto out;
>
> ...
>
> > @@ -2672,6 +2677,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
> > adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
> > adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
> >
> > + return ret;
> > +out_unlock:
> > + rcu_read_unlock();
> > return ret;
> > }
>
> My previous comment not, withstanding:
>
> The RCU read side critical section is already enormous.
> So perhaps making it slightly better doesn't make a difference.
>
> If so, can we go for this slightly flow here (completely untested).
>
> diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
> index 5a510eed335e..1e1cd8c11cf9 100644
> --- a/drivers/net/ethernet/ibm/ibmvnic.c
> +++ b/drivers/net/ethernet/ibm/ibmvnic.c
> @@ -2664,14 +2664,14 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
> netif_carrier_off(netdev);
> }
> out:
> - rcu_read_unlock();
> adapter->tx_send_failed += tx_send_failed;
> adapter->tx_map_failed += tx_map_failed;
> adapter->tx_stats_buffers[queue_num].batched_packets += tx_bpackets;
> adapter->tx_stats_buffers[queue_num].direct_packets += tx_dpackets;
> adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
> adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
> -
> +out_unlock:
> + rcu_read_unlock();
> return ret;
> }
>
>
> --
> pw-bot: changes-requested
When the number of TX queues is reduced (e.g., via ethtool -L), the
Qdisc layer retains previously enqueued skbs with queue mappings from
before the reduction. After the reset completes and tx_queues_active is
set to true, netif_tx_start_all_queues() drains these stale skbs through
ibmvnic_xmit(). The queue index from skb_get_queue_mapping() may exceed
the newly allocated array bounds, causing out-of-bounds reads on
tx_scrq[] and tx_pool[]/tso_pool[].
The existing tx_queues_active guard does not help here: it is set to
true by __ibmvnic_open() before netif_tx_start_all_queues() restarts
queue draining, so stale skbs pass the check with an invalid queue index.
Fold a bounds check against num_active_tx_scrqs into the tx_queues_active
guard, reusing the same drop-packet handling. Since tx_stats_buffers[] is
allocated for IBMVNIC_MAX_QUEUES entries (not just num_active_tx_scrqs),
all drop paths can safely fall through to the out: label's stats update.
Also move rcu_read_unlock() to after the per-queue stats updates, as the
RCU critical section is already large and releasing it a few instructions
earlier provides no practical benefit.
Fixes: 4219196d1f66 ("ibmvnic: fix race between xmit and reset")
Reported-by: Yuhao Jiang <danisjiang@gmail.com>
Cc: stable@vger.kernel.org
Signed-off-by: Tyllis Xu <LivelyCarpet87@gmail.com>
---
v2: Fold the bounds check into the existing !tx_queues_active guard rather
than adding a separate if block with unlikely(), reusing the same
drop-packet handling (dev_kfree_skb_any + tx_send_failed/tx_dropped
increments + goto out). Remove the dedicated out_unlock: label;
tx_stats_buffers[] is allocated for IBMVNIC_MAX_QUEUES entries so all
drop paths can safely fall through to the out: stats update. Move
rcu_read_unlock() to after the stats updates per maintainer suggestion.
(Rick Lindsley)
drivers/net/ethernet/ibm/ibmvnic.c | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 5a510eed335e..d5c611c3d9ec 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2444,14 +2444,15 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
* rcu to ensure reset waits for us to complete.
*/
rcu_read_lock();
- if (!adapter->tx_queues_active) {
+ if (!adapter->tx_queues_active ||
+ queue_num >= adapter->num_active_tx_scrqs) {
dev_kfree_skb_any(skb);
tx_send_failed++;
tx_dropped++;
ret = NETDEV_TX_OK;
goto out;
}
tx_scrq = adapter->tx_scrq[queue_num];
txq = netdev_get_tx_queue(netdev, queue_num);
@@ -2663,14 +2664,13 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev);
}
out:
- rcu_read_unlock();
adapter->tx_send_failed += tx_send_failed;
adapter->tx_map_failed += tx_map_failed;
adapter->tx_stats_buffers[queue_num].batched_packets += tx_bpackets;
adapter->tx_stats_buffers[queue_num].direct_packets += tx_dpackets;
adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
-
+ rcu_read_unlock();
return ret;
}
--
2.43.0
On Wed, 1 Apr 2026 00:08:45 -0500 Tyllis Xu wrote: > When the number of TX queues is reduced (e.g., via ethtool -L), the > Qdisc layer retains previously enqueued skbs with queue mappings from > before the reduction. After the reset completes and tx_queues_active is > set to true, netif_tx_start_all_queues() drains these stale skbs through > ibmvnic_xmit(). The queue index from skb_get_queue_mapping() may exceed > the newly allocated array bounds, causing out-of-bounds reads on > tx_scrq[] and tx_pool[]/tso_pool[]. This should not happen if the interface configures itself correctly, see https://lore.kernel.org/all/20260106182244.7188a8f6@kernel.org/ Please share are a repro if you have one.
© 2016 - 2026 Red Hat, Inc.