[PATCH net-next 06/11] net: fec: transmit XDP frames in bulk

Wei Fang posted 11 patches 3 weeks, 6 days ago
There is a newer version of this series
[PATCH net-next 06/11] net: fec: transmit XDP frames in bulk
Posted by Wei Fang 3 weeks, 6 days ago
Currently, the driver writes the ENET_TDAR register for every XDP frame
to trigger transmit start. Frequent MMIO writes consume more CPU cycles
and may reduce XDP TX performance, so transmit XDP frames in bulk.

Signed-off-by: Wei Fang <wei.fang@nxp.com>
---
 drivers/net/ethernet/freescale/fec_main.c | 12 +++++++++---
 1 file changed, 9 insertions(+), 3 deletions(-)

diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 0b114a68cd8e..f3e93598a27c 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1994,6 +1994,8 @@ static int fec_enet_rx_queue_xdp(struct fec_enet_private *fep, int queue,
 				rxq->stats[RX_XDP_TX_ERRORS]++;
 				fec_xdp_drop(rxq, &xdp, sync);
 				trace_xdp_exception(ndev, prog, XDP_TX);
+			} else {
+				xdp_res |= FEC_ENET_XDP_TX;
 			}
 			break;
 		default:
@@ -2043,6 +2045,10 @@ static int fec_enet_rx_queue_xdp(struct fec_enet_private *fep, int queue,
 	if (xdp_res & FEC_ENET_XDP_REDIR)
 		xdp_do_flush();
 
+	if (xdp_res & FEC_ENET_XDP_TX)
+		/* Trigger transmission start */
+		fec_txq_trigger_xmit(fep, fep->tx_queue[queue]);
+
 	return pkt_received;
 }
 
@@ -4033,9 +4039,6 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
 
 	txq->bd.cur = bdp;
 
-	/* Trigger transmission start */
-	fec_txq_trigger_xmit(fep, txq);
-
 	return 0;
 }
 
@@ -4087,6 +4090,9 @@ static int fec_enet_xdp_xmit(struct net_device *dev,
 		sent_frames++;
 	}
 
+	if (sent_frames)
+		fec_txq_trigger_xmit(fep, txq);
+
 	__netif_tx_unlock(nq);
 
 	return sent_frames;
-- 
2.34.1
Re: [PATCH net-next 06/11] net: fec: transmit XDP frames in bulk
Posted by Frank Li 3 weeks, 5 days ago
On Tue, Jan 13, 2026 at 11:29:34AM +0800, Wei Fang wrote:
> Currently, the driver writes the ENET_TDAR register for every XDP frame
> to trigger transmit start. Frequent MMIO writes consume more CPU cycles
> and may reduce XDP TX performance, so transmit XDP frames in bulk.
>
> Signed-off-by: Wei Fang <wei.fang@nxp.com>
> ---

Did you test light loading case? Any unexpected latency happen? sometime
missing trigger is hard to find when heavy loading.

Frank

>  drivers/net/ethernet/freescale/fec_main.c | 12 +++++++++---
>  1 file changed, 9 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
> index 0b114a68cd8e..f3e93598a27c 100644
> --- a/drivers/net/ethernet/freescale/fec_main.c
> +++ b/drivers/net/ethernet/freescale/fec_main.c
> @@ -1994,6 +1994,8 @@ static int fec_enet_rx_queue_xdp(struct fec_enet_private *fep, int queue,
>  				rxq->stats[RX_XDP_TX_ERRORS]++;
>  				fec_xdp_drop(rxq, &xdp, sync);
>  				trace_xdp_exception(ndev, prog, XDP_TX);
> +			} else {
> +				xdp_res |= FEC_ENET_XDP_TX;
>  			}
>  			break;
>  		default:
> @@ -2043,6 +2045,10 @@ static int fec_enet_rx_queue_xdp(struct fec_enet_private *fep, int queue,
>  	if (xdp_res & FEC_ENET_XDP_REDIR)
>  		xdp_do_flush();
>
> +	if (xdp_res & FEC_ENET_XDP_TX)
> +		/* Trigger transmission start */
> +		fec_txq_trigger_xmit(fep, fep->tx_queue[queue]);
> +
>  	return pkt_received;
>  }
>
> @@ -4033,9 +4039,6 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
>
>  	txq->bd.cur = bdp;
>
> -	/* Trigger transmission start */
> -	fec_txq_trigger_xmit(fep, txq);
> -
>  	return 0;
>  }
>
> @@ -4087,6 +4090,9 @@ static int fec_enet_xdp_xmit(struct net_device *dev,
>  		sent_frames++;
>  	}
>
> +	if (sent_frames)
> +		fec_txq_trigger_xmit(fep, txq);
> +
>  	__netif_tx_unlock(nq);
>
>  	return sent_frames;
> --
> 2.34.1
>