drivers/net/ethernet/natsemi/ns83820.c | 32 ++++++++++++++++++++++++++ 1 file changed, 32 insertions(+)
The ns83820 driver currently ignores the return values of dma_map_single()
and skb_frag_dma_map() in the transmit path. If DMA mapping fails due to
IOMMU exhaustion or SWIOTLB pressure, the driver may proceed with invalid
DMA addresses, potentially causing hardware errors, data corruption, or
system instability.
Additionally, if mapping fails midway through processing fragmented
packets,previously mapped DMA resources are not released, leading
to DMA resource leaks.
Fix this by:
1. Checking dma_mapping_error() after each DMA mapping call.
2. Implementing an error handling path to unmap successfully
mapped buffers(both linear and fragments) using
dma_unmap_single().
3. Freeing the skb using dev_kfree_skb_any() to safely handle
both process and softirq contexts, as hard_start_xmit may be
called with IRQs enabled.
4. Returning NETDEV_TX_OK to drop the packet gracefully and
prevent TX queue stagnation.
This ensures compliance with the DMA API guidelines and
improves driver stability under memory pressure.
Fixes: fd9e4d6fec15 ("natsemi: switch from 'pci_' to 'dma_' API")
Cc: stable@vger.kernel.org
Signed-off-by: Wang Jun <1742789905@qq.com>
---
drivers/net/ethernet/natsemi/ns83820.c | 32 ++++++++++++++++++++++++++
1 file changed, 32 insertions(+)
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index cdbf82affa7b..90465e4977c3 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -1051,6 +1051,12 @@ static netdev_tx_t ns83820_hard_start_xmit(struct sk_buff *skb,
int stopped = 0;
int do_intr = 0;
volatile __le32 *first_desc;
+ dma_addr_t frag_dma_addr[MAX_SKB_FRAGS];
+ unsigned int frag_dma_len[MAX_SKB_FRAGS];
+ int frag_mapped_count = 0;
+ dma_addr_t main_buf = 0;
+ unsigned int main_len = 0;
+ int i;
dprintk("ns83820_hard_start_xmit\n");
@@ -1121,6 +1127,13 @@ static netdev_tx_t ns83820_hard_start_xmit(struct sk_buff *skb,
buf = dma_map_single(&dev->pci_dev->dev, skb->data, len,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&dev->pci_dev->dev, buf)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ main_buf = buf;
+ main_len = len;
+
first_desc = dev->tx_descs + (free_idx * DESC_SIZE);
for (;;) {
@@ -1144,6 +1157,15 @@ static netdev_tx_t ns83820_hard_start_xmit(struct sk_buff *skb,
buf = skb_frag_dma_map(&dev->pci_dev->dev, frag, 0,
skb_frag_size(frag), DMA_TO_DEVICE);
+ if (dma_mapping_error(&dev->pci_dev->dev, buf))
+ goto dma_map_error;
+
+ if (frag_mapped_count < MAX_SKB_FRAGS) {
+ frag_dma_addr[frag_mapped_count] = buf;
+ frag_dma_len[frag_mapped_count] = skb_frag_size(frag);
+ frag_mapped_count++;
+ }
+
dprintk("frag: buf=%08Lx page=%08lx offset=%08lx\n",
(long long)buf, (long) page_to_pfn(frag->page),
frag->page_offset);
@@ -1166,6 +1188,16 @@ static netdev_tx_t ns83820_hard_start_xmit(struct sk_buff *skb,
if (stopped && (dev->tx_done_idx != tx_done_idx) && start_tx_okay(dev))
netif_start_queue(ndev);
+ return NETDEV_TX_OK;
+dma_map_error:
+ dma_unmap_single(&dev->pci_dev->dev, main_buf, main_len, DMA_TO_DEVICE);
+ for (i = 0; i < frag_mapped_count; i++) {
+ dma_unmap_single(&dev->pci_dev->dev, frag_dma_addr[i], frag_dma_len[i],
+ DMA_TO_DEVICE);
+ }
+
+ dev_kfree_skb_any(skb);
+
return NETDEV_TX_OK;
}
--
2.43.0
On 3/27/26 2:16 AM, Wang Jun wrote:
> The ns83820 driver currently ignores the return values of dma_map_single()
> and skb_frag_dma_map() in the transmit path. If DMA mapping fails due to
> IOMMU exhaustion or SWIOTLB pressure, the driver may proceed with invalid
> DMA addresses, potentially causing hardware errors, data corruption, or
> system instability.
>
> Additionally, if mapping fails midway through processing fragmented
> packets,previously mapped DMA resources are not released, leading
> to DMA resource leaks.
>
> Fix this by:
> 1. Checking dma_mapping_error() after each DMA mapping call.
> 2. Implementing an error handling path to unmap successfully
> mapped buffers(both linear and fragments) using
> dma_unmap_single().
> 3. Freeing the skb using dev_kfree_skb_any() to safely handle
> both process and softirq contexts, as hard_start_xmit may be
> called with IRQs enabled.
> 4. Returning NETDEV_TX_OK to drop the packet gracefully and
> prevent TX queue stagnation.
>
> This ensures compliance with the DMA API guidelines and
> improves driver stability under memory pressure.
>
> Fixes: fd9e4d6fec15 ("natsemi: switch from 'pci_' to 'dma_' API")
The fix tag looks wrong, as the above just to mechanical substitution of
used APIs. The issue is pre-existent
> Cc: stable@vger.kernel.org
> Signed-off-by: Wang Jun <1742789905@qq.com>
> ---
> drivers/net/ethernet/natsemi/ns83820.c | 32 ++++++++++++++++++++++++++
> 1 file changed, 32 insertions(+)
>
> diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
> index cdbf82affa7b..90465e4977c3 100644
> --- a/drivers/net/ethernet/natsemi/ns83820.c
> +++ b/drivers/net/ethernet/natsemi/ns83820.c
> @@ -1051,6 +1051,12 @@ static netdev_tx_t ns83820_hard_start_xmit(struct sk_buff *skb,
> int stopped = 0;
> int do_intr = 0;
> volatile __le32 *first_desc;
> + dma_addr_t frag_dma_addr[MAX_SKB_FRAGS];
> + unsigned int frag_dma_len[MAX_SKB_FRAGS];
> + int frag_mapped_count = 0;
> + dma_addr_t main_buf = 0;
> + unsigned int main_len = 0;
> + int i;
Please respect the reverse christmas tree order above.
> dprintk("ns83820_hard_start_xmit\n");
>
> @@ -1121,6 +1127,13 @@ static netdev_tx_t ns83820_hard_start_xmit(struct sk_buff *skb,
> buf = dma_map_single(&dev->pci_dev->dev, skb->data, len,
> DMA_TO_DEVICE);
>
> + if (dma_mapping_error(&dev->pci_dev->dev, buf)) {
> + dev_kfree_skb_any(skb);
> + return NETDEV_TX_OK;
> + }
> + main_buf = buf;
> + main_len = len;
> +
> first_desc = dev->tx_descs + (free_idx * DESC_SIZE);
>
> for (;;) {
> @@ -1144,6 +1157,15 @@ static netdev_tx_t ns83820_hard_start_xmit(struct sk_buff *skb,
>
> buf = skb_frag_dma_map(&dev->pci_dev->dev, frag, 0,
> skb_frag_size(frag), DMA_TO_DEVICE);
> + if (dma_mapping_error(&dev->pci_dev->dev, buf))
> + goto dma_map_error;
> +
> + if (frag_mapped_count < MAX_SKB_FRAGS) {
> + frag_dma_addr[frag_mapped_count] = buf;
> + frag_dma_len[frag_mapped_count] = skb_frag_size(frag);
> + frag_mapped_count++;
> + }
> +
> dprintk("frag: buf=%08Lx page=%08lx offset=%08lx\n",
> (long long)buf, (long) page_to_pfn(frag->page),
> frag->page_offset);
> @@ -1166,6 +1188,16 @@ static netdev_tx_t ns83820_hard_start_xmit(struct sk_buff *skb,
> if (stopped && (dev->tx_done_idx != tx_done_idx) && start_tx_okay(dev))
> netif_start_queue(ndev);
>
> + return NETDEV_TX_OK;
> +dma_map_error:
> + dma_unmap_single(&dev->pci_dev->dev, main_buf, main_len, DMA_TO_DEVICE);
> + for (i = 0; i < frag_mapped_count; i++) {
> + dma_unmap_single(&dev->pci_dev->dev, frag_dma_addr[i], frag_dma_len[i],
> + DMA_TO_DEVICE);
> + }
> +
> + dev_kfree_skb_any(skb);
AI review says:
If nr_free < MIN_TX_DESC_FREE earlier in the function, netif_stop_queue
is called and the stopped flag is set to 1. By returning NETDEV_TX_OK
directly from the error path, we skip the race check at the end of the
normal
flow:
if (stopped && (dev->tx_done_idx != tx_done_idx) && start_tx_okay(dev))
netif_start_queue(ndev);
If all pending packets completed concurrently right before the queue was
stopped, could this cause the TX queue to stall indefinitely until the
tx watchdog fires?
Hi Paolo Abeni,
This is v2 of the DMA mapping error handling fix for ns83820. Changes since v1:
- Added queue restart check in error path to avoid potential TX queue stall
(as pointed out by the AI review)
- Adjusted variable declarations to follow reverse christmas tree order
- Switched from dma_unmap_single to dma_unmap_page for fragments
Thanks to reviewers for the feedback.
---
The ns83820 driver currently ignores the return values of dma_map_single()
and skb_frag_dma_map() in the transmit path. If DMA mapping fails due to
IOMMU exhaustion or SWIOTLB pressure, the driver may proceed with invalid
DMA addresses, potentially causing hardware errors, data corruption, or
system instability.
Additionally, if mapping fails midway through processing fragmented
packets, previously mapped DMA resources are not released, leading to
DMA resource leaks.
Fix this by:
1. Checking dma_mapping_error() after each DMA mapping call.
2. Implementing an error handling path to unmap successfully mapped
buffers (both linear and fragments) using dma_unmap_single() /
dma_unmap_page().
3. Freeing the skb using dev_kfree_skb_any() to safely handle both
process and softirq contexts.
4. Returning NETDEV_TX_OK to drop the packet gracefully and prevent
TX queue stagnation.
5. Moving the queue restart check (for race conditions when the queue
was stopped) into a common label, so that the error path also
executes it, avoiding a possible permanent TX queue stall.
This ensures compliance with the DMA API guidelines and improves driver
stability under memory pressure.
Signed-off-by: Wang Jun <1742789905@qq.com>
---
drivers/net/ethernet/natsemi/ns83820.c | 31 +++++++++++++++++++++++++-
1 file changed, 30 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index cdbf82affa7b..f8d037db4ffb 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -1051,6 +1051,12 @@ static netdev_tx_t ns83820_hard_start_xmit(struct sk_buff *skb,
int stopped = 0;
int do_intr = 0;
volatile __le32 *first_desc;
+ int i;
+ int frag_mapped_count = 0;
+ unsigned int main_len = 0;
+ unsigned int frag_dma_len[MAX_SKB_FRAGS];
+ dma_addr_t main_buf = 0;
+ dma_addr_t frag_dma_addr[MAX_SKB_FRAGS];
dprintk("ns83820_hard_start_xmit\n");
@@ -1120,6 +1126,12 @@ static netdev_tx_t ns83820_hard_start_xmit(struct sk_buff *skb,
len -= skb->data_len;
buf = dma_map_single(&dev->pci_dev->dev, skb->data, len,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&dev->pci_dev->dev, buf)) {
+ dev_kfree_skb_any(skb);
+ goto check_queue_and_return;
+ }
+ main_buf = buf;
+ main_len = len;
first_desc = dev->tx_descs + (free_idx * DESC_SIZE);
@@ -1144,6 +1156,15 @@ static netdev_tx_t ns83820_hard_start_xmit(struct sk_buff *skb,
buf = skb_frag_dma_map(&dev->pci_dev->dev, frag, 0,
skb_frag_size(frag), DMA_TO_DEVICE);
+ if (dma_mapping_error(&dev->pci_dev->dev, buf))
+ goto dma_map_error;
+
+ if (frag_mapped_count < MAX_SKB_FRAGS) {
+ frag_dma_addr[frag_mapped_count] = buf;
+ frag_dma_len[frag_mapped_count] = skb_frag_size(frag);
+ frag_mapped_count++;
+ }
+
dprintk("frag: buf=%08Lx page=%08lx offset=%08lx\n",
(long long)buf, (long) page_to_pfn(frag->page),
frag->page_offset);
@@ -1161,12 +1182,20 @@ static netdev_tx_t ns83820_hard_start_xmit(struct sk_buff *skb,
spin_unlock_irq(&dev->tx_lock);
kick_tx(dev);
-
+check_queue_and_return:
/* Check again: we may have raced with a tx done irq */
if (stopped && (dev->tx_done_idx != tx_done_idx) && start_tx_okay(dev))
netif_start_queue(ndev);
return NETDEV_TX_OK;
+dma_map_error:
+ dma_unmap_single(&dev->pci_dev->dev, main_buf, main_len, DMA_TO_DEVICE);
+ for (i = 0; i < frag_mapped_count; i++) {
+ dma_unmap_page(&dev->pci_dev->dev, frag_dma_addr[i],
+ frag_dma_len[i], DMA_TO_DEVICE);
+ }
+ dev_kfree_skb_any(skb);
+ goto check_queue_and_return;
}
static void ns83820_update_stats(struct ns83820 *dev)
--
2.43.0
On Tue, 31 Mar 2026 20:21:41 +0800 Wang Jun wrote: > This is v2 of the DMA mapping error handling fix for ns83820. Changes since v1: > > - Added queue restart check in error path to avoid potential TX queue stall > (as pointed out by the AI review) > - Adjusted variable declarations to follow reverse christmas tree order > - Switched from dma_unmap_single to dma_unmap_page for fragments > > Thanks to reviewers for the feedback. Please do not post new patches in reply to old ones. Please do not post new patches within 24h of previous posting. And ask yourself if you trying and failing to post patches for this ancient driver is a good use of maintainer's time.
On Tue, Mar 31, 2026 at 08:21:41PM +0800, Wang Jun wrote: > Hi Paolo Abeni, > > This is v2 of the DMA mapping error handling fix for ns83820. Changes since v1: > > - Added queue restart check in error path to avoid potential TX queue stall > (as pointed out by the AI review) > - Adjusted variable declarations to follow reverse christmas tree order > - Switched from dma_unmap_single to dma_unmap_page for fragments Usually you'd put the changelog below. Take a look at how this recent patch does it: https://lore.kernel.org/netdev/20260331123858.1912449-2-charles.perry@microchip.com/ > diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c > index cdbf82affa7b..f8d037db4ffb 100644 > --- a/drivers/net/ethernet/natsemi/ns83820.c > +++ b/drivers/net/ethernet/natsemi/ns83820.c > @@ -1051,6 +1051,12 @@ static netdev_tx_t ns83820_hard_start_xmit(struct sk_buff *skb, > int stopped = 0; > int do_intr = 0; > volatile __le32 *first_desc; > + int i; > + int frag_mapped_count = 0; > + unsigned int main_len = 0; > + unsigned int frag_dma_len[MAX_SKB_FRAGS]; > + dma_addr_t main_buf = 0; > + dma_addr_t frag_dma_addr[MAX_SKB_FRAGS]; You mentioned it was fixed to reverse christmas tree in the changelog above, but doesn't seem like it in the patch?
© 2016 - 2026 Red Hat, Inc.