[PATCH net-next 07/11] net: fec: use switch statement to check the type of tx_buf

Wei Fang posted 11 patches 3 weeks, 6 days ago
There is a newer version of this series
[PATCH net-next 07/11] net: fec: use switch statement to check the type of tx_buf
Posted by Wei Fang 3 weeks, 6 days ago
The tx_buf has three types: FEC_TXBUF_T_SKB, FEC_TXBUF_T_XDP_NDO and
FEC_TXBUF_T_XDP_TX. Currently, the driver uses 'if...else...' statements
to check the type and perform the corresponding processing. This is very
detrimental to future expansion. For example, if new types are added to
support XDP zero copy in the future, continuing to use 'if...else...'
would be a very bad coding style. So the 'if...else...' statements in
the current driver are replaced with switch statements to support XDP
zero copy in the future.

Signed-off-by: Wei Fang <wei.fang@nxp.com>
---
 drivers/net/ethernet/freescale/fec_main.c | 167 +++++++++++-----------
 1 file changed, 82 insertions(+), 85 deletions(-)

diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index f3e93598a27c..3bd89d7f105b 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1023,33 +1023,33 @@ static void fec_enet_bd_init(struct net_device *dev)
 		txq->bd.cur = bdp;
 
 		for (i = 0; i < txq->bd.ring_size; i++) {
+			dma_addr_t dma = fec32_to_cpu(bdp->cbd_bufaddr);
+			struct page *page;
+
 			/* Initialize the BD for every fragment in the page. */
 			bdp->cbd_sc = cpu_to_fec16(0);
-			if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
-				if (bdp->cbd_bufaddr &&
-				    !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
-					dma_unmap_single(&fep->pdev->dev,
-							 fec32_to_cpu(bdp->cbd_bufaddr),
-							 fec16_to_cpu(bdp->cbd_datlen),
-							 DMA_TO_DEVICE);
-				if (txq->tx_buf[i].buf_p)
-					dev_kfree_skb_any(txq->tx_buf[i].buf_p);
-			} else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
-				if (bdp->cbd_bufaddr)
-					dma_unmap_single(&fep->pdev->dev,
-							 fec32_to_cpu(bdp->cbd_bufaddr),
+			switch (txq->tx_buf[i].type) {
+			case FEC_TXBUF_T_SKB:
+				if (dma && !IS_TSO_HEADER(txq, dma))
+					dma_unmap_single(&fep->pdev->dev, dma,
 							 fec16_to_cpu(bdp->cbd_datlen),
 							 DMA_TO_DEVICE);
 
-				if (txq->tx_buf[i].buf_p)
-					xdp_return_frame(txq->tx_buf[i].buf_p);
-			} else {
-				struct page *page = txq->tx_buf[i].buf_p;
-
-				if (page)
-					page_pool_put_page(pp_page_to_nmdesc(page)->pp,
-							   page, 0,
-							   false);
+				dev_kfree_skb_any(txq->tx_buf[i].buf_p);
+				break;
+			case FEC_TXBUF_T_XDP_NDO:
+				dma_unmap_single(&fep->pdev->dev, dma,
+						 fec16_to_cpu(bdp->cbd_datlen),
+						 DMA_TO_DEVICE);
+				xdp_return_frame(txq->tx_buf[i].buf_p);
+				break;
+			case FEC_TXBUF_T_XDP_TX:
+				page = txq->tx_buf[i].buf_p;
+				page_pool_put_page(pp_page_to_nmdesc(page)->pp,
+						   page, 0, false);
+				break;
+			default:
+				break;
 			}
 
 			txq->tx_buf[i].buf_p = NULL;
@@ -1514,45 +1514,66 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
 			break;
 
 		index = fec_enet_get_bd_index(bdp, &txq->bd);
+		frame_len = fec16_to_cpu(bdp->cbd_datlen);
 
-		if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
-			skb = txq->tx_buf[index].buf_p;
+		switch (txq->tx_buf[index].type) {
+		case FEC_TXBUF_T_SKB:
 			if (bdp->cbd_bufaddr &&
 			    !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
 				dma_unmap_single(&fep->pdev->dev,
 						 fec32_to_cpu(bdp->cbd_bufaddr),
-						 fec16_to_cpu(bdp->cbd_datlen),
-						 DMA_TO_DEVICE);
-			bdp->cbd_bufaddr = cpu_to_fec32(0);
+						 frame_len, DMA_TO_DEVICE);
+
+			skb = txq->tx_buf[index].buf_p;
 			if (!skb)
 				goto tx_buf_done;
-		} else {
+
+			frame_len = skb->len;
+
+			/* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
+			 * are to time stamp the packet, so we still need to check time
+			 * stamping enabled flag.
+			 */
+			if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
+				     fep->hwts_tx_en) && fep->bufdesc_ex) {
+				struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+				struct skb_shared_hwtstamps shhwtstamps;
+
+				fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
+				skb_tstamp_tx(skb, &shhwtstamps);
+			}
+
+			/* Free the sk buffer associated with this last transmit */
+			napi_consume_skb(skb, budget);
+			break;
+		case FEC_TXBUF_T_XDP_NDO:
 			/* Tx processing cannot call any XDP (or page pool) APIs if
 			 * the "budget" is 0. Because NAPI is called with budget of
 			 * 0 (such as netpoll) indicates we may be in an IRQ context,
 			 * however, we can't use the page pool from IRQ context.
 			 */
 			if (unlikely(!budget))
-				break;
+				goto out;
 
-			if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
-				xdpf = txq->tx_buf[index].buf_p;
-				if (bdp->cbd_bufaddr)
-					dma_unmap_single(&fep->pdev->dev,
-							 fec32_to_cpu(bdp->cbd_bufaddr),
-							 fec16_to_cpu(bdp->cbd_datlen),
-							 DMA_TO_DEVICE);
-			} else {
-				page = txq->tx_buf[index].buf_p;
-			}
-
-			bdp->cbd_bufaddr = cpu_to_fec32(0);
-			if (unlikely(!txq->tx_buf[index].buf_p)) {
-				txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
-				goto tx_buf_done;
-			}
+			xdpf = txq->tx_buf[index].buf_p;
+			dma_unmap_single(&fep->pdev->dev,
+					 fec32_to_cpu(bdp->cbd_bufaddr),
+					 frame_len,  DMA_TO_DEVICE);
+			xdp_return_frame_rx_napi(xdpf);
+			break;
+		case FEC_TXBUF_T_XDP_TX:
+			if (unlikely(!budget))
+				goto out;
 
-			frame_len = fec16_to_cpu(bdp->cbd_datlen);
+			page = txq->tx_buf[index].buf_p;
+			/* The dma_sync_size = 0 as XDP_TX has already synced
+			 * DMA for_device
+			 */
+			page_pool_put_page(pp_page_to_nmdesc(page)->pp, page,
+					   0, true);
+			break;
+		default:
+			break;
 		}
 
 		/* Check for errors. */
@@ -1572,11 +1593,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
 				ndev->stats.tx_carrier_errors++;
 		} else {
 			ndev->stats.tx_packets++;
-
-			if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB)
-				ndev->stats.tx_bytes += skb->len;
-			else
-				ndev->stats.tx_bytes += frame_len;
+			ndev->stats.tx_bytes += frame_len;
 		}
 
 		/* Deferred means some collisions occurred during transmit,
@@ -1585,35 +1602,12 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
 		if (status & BD_ENET_TX_DEF)
 			ndev->stats.collisions++;
 
-		if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
-			/* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
-			 * are to time stamp the packet, so we still need to check time
-			 * stamping enabled flag.
-			 */
-			if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
-				     fep->hwts_tx_en) && fep->bufdesc_ex) {
-				struct skb_shared_hwtstamps shhwtstamps;
-				struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
-
-				fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
-				skb_tstamp_tx(skb, &shhwtstamps);
-			}
-
-			/* Free the sk buffer associated with this last transmit */
-			napi_consume_skb(skb, budget);
-		} else if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
-			xdp_return_frame_rx_napi(xdpf);
-		} else { /* recycle pages of XDP_TX frames */
-			/* The dma_sync_size = 0 as XDP_TX has already synced DMA for_device */
-			page_pool_put_page(pp_page_to_nmdesc(page)->pp, page,
-					   0, true);
-		}
-
 		txq->tx_buf[index].buf_p = NULL;
 		/* restore default tx buffer type: FEC_TXBUF_T_SKB */
 		txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
 
 tx_buf_done:
+		bdp->cbd_bufaddr = cpu_to_fec32(0);
 		/* Make sure the update to bdp and tx_buf are performed
 		 * before dirty_tx
 		 */
@@ -1632,6 +1626,8 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
 		}
 	}
 
+out:
+
 	/* ERR006358: Keep the transmitter going */
 	if (bdp != txq->bd.cur &&
 	    readl(txq->bd.reg_desc_active) == 0)
@@ -3413,6 +3409,7 @@ static void fec_enet_free_buffers(struct net_device *ndev)
 	unsigned int i;
 	struct fec_enet_priv_tx_q *txq;
 	struct fec_enet_priv_rx_q *rxq;
+	struct page *page;
 	unsigned int q;
 
 	for (q = 0; q < fep->num_rx_queues; q++) {
@@ -3436,20 +3433,20 @@ static void fec_enet_free_buffers(struct net_device *ndev)
 			kfree(txq->tx_bounce[i]);
 			txq->tx_bounce[i] = NULL;
 
-			if (!txq->tx_buf[i].buf_p) {
-				txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
-				continue;
-			}
-
-			if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
+			switch (txq->tx_buf[i].type) {
+			case FEC_TXBUF_T_SKB:
 				dev_kfree_skb(txq->tx_buf[i].buf_p);
-			} else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
+				break;
+			case FEC_TXBUF_T_XDP_NDO:
 				xdp_return_frame(txq->tx_buf[i].buf_p);
-			} else {
-				struct page *page = txq->tx_buf[i].buf_p;
-
+				break;
+			case FEC_TXBUF_T_XDP_TX:
+				page = txq->tx_buf[i].buf_p;
 				page_pool_put_page(pp_page_to_nmdesc(page)->pp,
 						   page, 0, false);
+				break;
+			default:
+				break;
 			}
 
 			txq->tx_buf[i].buf_p = NULL;
-- 
2.34.1
Re: [PATCH net-next 07/11] net: fec: use switch statement to check the type of tx_buf
Posted by David Laight 3 weeks, 4 days ago
On Tue, 13 Jan 2026 11:29:35 +0800
Wei Fang <wei.fang@nxp.com> wrote:

> The tx_buf has three types: FEC_TXBUF_T_SKB, FEC_TXBUF_T_XDP_NDO and
> FEC_TXBUF_T_XDP_TX. Currently, the driver uses 'if...else...' statements
> to check the type and perform the corresponding processing. This is very
> detrimental to future expansion. For example, if new types are added to
> support XDP zero copy in the future, continuing to use 'if...else...'
> would be a very bad coding style. So the 'if...else...' statements in
> the current driver are replaced with switch statements to support XDP
> zero copy in the future.

The if...else... sequence has the advantage that the common 'cases'
can be put first.
The compiler will use a branch tree for a switch statement (jumps tables
are pretty much not allowed because of speculative execution issues) and
limit the maximum number of branches.
That is likely to be pessimal in many cases - especially if it generates
mispredicted branches for the common cases.

So not clear cut at all.

	David
Re: [PATCH net-next 07/11] net: fec: use switch statement to check the type of tx_buf
Posted by Frank Li 3 weeks, 5 days ago
On Tue, Jan 13, 2026 at 11:29:35AM +0800, Wei Fang wrote:
> The tx_buf has three types: FEC_TXBUF_T_SKB, FEC_TXBUF_T_XDP_NDO and
> FEC_TXBUF_T_XDP_TX. Currently, the driver uses 'if...else...' statements
> to check the type and perform the corresponding processing. This is very
> detrimental to future expansion. For example, if new types are added to
> support XDP zero copy in the future, continuing to use 'if...else...'
> would be a very bad coding style. So the 'if...else...' statements in
> the current driver are replaced with switch statements to support XDP
> zero copy in the future.
>
> Signed-off-by: Wei Fang <wei.fang@nxp.com>
> ---
>  drivers/net/ethernet/freescale/fec_main.c | 167 +++++++++++-----------
>  1 file changed, 82 insertions(+), 85 deletions(-)
>
> diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
> index f3e93598a27c..3bd89d7f105b 100644
> --- a/drivers/net/ethernet/freescale/fec_main.c
> +++ b/drivers/net/ethernet/freescale/fec_main.c
> @@ -1023,33 +1023,33 @@ static void fec_enet_bd_init(struct net_device *dev)
>  		txq->bd.cur = bdp;
>
>  		for (i = 0; i < txq->bd.ring_size; i++) {
> +			dma_addr_t dma = fec32_to_cpu(bdp->cbd_bufaddr);
> +			struct page *page;
> +
>  			/* Initialize the BD for every fragment in the page. */
>  			bdp->cbd_sc = cpu_to_fec16(0);
> -			if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
> -				if (bdp->cbd_bufaddr &&
> -				    !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
> -					dma_unmap_single(&fep->pdev->dev,
> -							 fec32_to_cpu(bdp->cbd_bufaddr),
> -							 fec16_to_cpu(bdp->cbd_datlen),
> -							 DMA_TO_DEVICE);
> -				if (txq->tx_buf[i].buf_p)
> -					dev_kfree_skb_any(txq->tx_buf[i].buf_p);
> -			} else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
> -				if (bdp->cbd_bufaddr)
> -					dma_unmap_single(&fep->pdev->dev,
> -							 fec32_to_cpu(bdp->cbd_bufaddr),
> +			switch (txq->tx_buf[i].type) {
> +			case FEC_TXBUF_T_SKB:
> +				if (dma && !IS_TSO_HEADER(txq, dma))
> +					dma_unmap_single(&fep->pdev->dev, dma,
>  							 fec16_to_cpu(bdp->cbd_datlen),
>  							 DMA_TO_DEVICE);
>
> -				if (txq->tx_buf[i].buf_p)
> -					xdp_return_frame(txq->tx_buf[i].buf_p);
> -			} else {
> -				struct page *page = txq->tx_buf[i].buf_p;
> -
> -				if (page)
> -					page_pool_put_page(pp_page_to_nmdesc(page)->pp,
> -							   page, 0,
> -							   false);
> +				dev_kfree_skb_any(txq->tx_buf[i].buf_p);
> +				break;
> +			case FEC_TXBUF_T_XDP_NDO:
> +				dma_unmap_single(&fep->pdev->dev, dma,
> +						 fec16_to_cpu(bdp->cbd_datlen),
> +						 DMA_TO_DEVICE);
> +				xdp_return_frame(txq->tx_buf[i].buf_p);

look like logic is not exactly same as original one

if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
	if (bdp->cbd_bufaddr)
		...

Frank

> +				break;
> +			case FEC_TXBUF_T_XDP_TX:
> +				page = txq->tx_buf[i].buf_p;
> +				page_pool_put_page(pp_page_to_nmdesc(page)->pp,
> +						   page, 0, false);
> +				break;
> +			default:
> +				break;
>  			}
>
>  			txq->tx_buf[i].buf_p = NULL;
> @@ -1514,45 +1514,66 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
>  			break;
>
>  		index = fec_enet_get_bd_index(bdp, &txq->bd);
> +		frame_len = fec16_to_cpu(bdp->cbd_datlen);
>
> -		if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
> -			skb = txq->tx_buf[index].buf_p;
> +		switch (txq->tx_buf[index].type) {
> +		case FEC_TXBUF_T_SKB:
>  			if (bdp->cbd_bufaddr &&
>  			    !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
>  				dma_unmap_single(&fep->pdev->dev,
>  						 fec32_to_cpu(bdp->cbd_bufaddr),
> -						 fec16_to_cpu(bdp->cbd_datlen),
> -						 DMA_TO_DEVICE);
> -			bdp->cbd_bufaddr = cpu_to_fec32(0);
> +						 frame_len, DMA_TO_DEVICE);
> +
> +			skb = txq->tx_buf[index].buf_p;
>  			if (!skb)
>  				goto tx_buf_done;
> -		} else {
> +
> +			frame_len = skb->len;
> +
> +			/* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
> +			 * are to time stamp the packet, so we still need to check time
> +			 * stamping enabled flag.
> +			 */
> +			if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
> +				     fep->hwts_tx_en) && fep->bufdesc_ex) {
> +				struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
> +				struct skb_shared_hwtstamps shhwtstamps;
> +
> +				fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
> +				skb_tstamp_tx(skb, &shhwtstamps);
> +			}
> +
> +			/* Free the sk buffer associated with this last transmit */
> +			napi_consume_skb(skb, budget);
> +			break;
> +		case FEC_TXBUF_T_XDP_NDO:
>  			/* Tx processing cannot call any XDP (or page pool) APIs if
>  			 * the "budget" is 0. Because NAPI is called with budget of
>  			 * 0 (such as netpoll) indicates we may be in an IRQ context,
>  			 * however, we can't use the page pool from IRQ context.
>  			 */
>  			if (unlikely(!budget))
> -				break;
> +				goto out;
>
> -			if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
> -				xdpf = txq->tx_buf[index].buf_p;
> -				if (bdp->cbd_bufaddr)
> -					dma_unmap_single(&fep->pdev->dev,
> -							 fec32_to_cpu(bdp->cbd_bufaddr),
> -							 fec16_to_cpu(bdp->cbd_datlen),
> -							 DMA_TO_DEVICE);
> -			} else {
> -				page = txq->tx_buf[index].buf_p;
> -			}
> -
> -			bdp->cbd_bufaddr = cpu_to_fec32(0);
> -			if (unlikely(!txq->tx_buf[index].buf_p)) {
> -				txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
> -				goto tx_buf_done;
> -			}
> +			xdpf = txq->tx_buf[index].buf_p;
> +			dma_unmap_single(&fep->pdev->dev,
> +					 fec32_to_cpu(bdp->cbd_bufaddr),
> +					 frame_len,  DMA_TO_DEVICE);
> +			xdp_return_frame_rx_napi(xdpf);
> +			break;
> +		case FEC_TXBUF_T_XDP_TX:
> +			if (unlikely(!budget))
> +				goto out;
>
> -			frame_len = fec16_to_cpu(bdp->cbd_datlen);
> +			page = txq->tx_buf[index].buf_p;
> +			/* The dma_sync_size = 0 as XDP_TX has already synced
> +			 * DMA for_device
> +			 */
> +			page_pool_put_page(pp_page_to_nmdesc(page)->pp, page,
> +					   0, true);
> +			break;
> +		default:
> +			break;
>  		}
>
>  		/* Check for errors. */
> @@ -1572,11 +1593,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
>  				ndev->stats.tx_carrier_errors++;
>  		} else {
>  			ndev->stats.tx_packets++;
> -
> -			if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB)
> -				ndev->stats.tx_bytes += skb->len;
> -			else
> -				ndev->stats.tx_bytes += frame_len;
> +			ndev->stats.tx_bytes += frame_len;
>  		}
>
>  		/* Deferred means some collisions occurred during transmit,
> @@ -1585,35 +1602,12 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
>  		if (status & BD_ENET_TX_DEF)
>  			ndev->stats.collisions++;
>
> -		if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
> -			/* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
> -			 * are to time stamp the packet, so we still need to check time
> -			 * stamping enabled flag.
> -			 */
> -			if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
> -				     fep->hwts_tx_en) && fep->bufdesc_ex) {
> -				struct skb_shared_hwtstamps shhwtstamps;
> -				struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
> -
> -				fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
> -				skb_tstamp_tx(skb, &shhwtstamps);
> -			}
> -
> -			/* Free the sk buffer associated with this last transmit */
> -			napi_consume_skb(skb, budget);
> -		} else if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
> -			xdp_return_frame_rx_napi(xdpf);
> -		} else { /* recycle pages of XDP_TX frames */
> -			/* The dma_sync_size = 0 as XDP_TX has already synced DMA for_device */
> -			page_pool_put_page(pp_page_to_nmdesc(page)->pp, page,
> -					   0, true);
> -		}
> -
>  		txq->tx_buf[index].buf_p = NULL;
>  		/* restore default tx buffer type: FEC_TXBUF_T_SKB */
>  		txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
>
>  tx_buf_done:
> +		bdp->cbd_bufaddr = cpu_to_fec32(0);
>  		/* Make sure the update to bdp and tx_buf are performed
>  		 * before dirty_tx
>  		 */
> @@ -1632,6 +1626,8 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
>  		}
>  	}
>
> +out:
> +
>  	/* ERR006358: Keep the transmitter going */
>  	if (bdp != txq->bd.cur &&
>  	    readl(txq->bd.reg_desc_active) == 0)
> @@ -3413,6 +3409,7 @@ static void fec_enet_free_buffers(struct net_device *ndev)
>  	unsigned int i;
>  	struct fec_enet_priv_tx_q *txq;
>  	struct fec_enet_priv_rx_q *rxq;
> +	struct page *page;
>  	unsigned int q;
>
>  	for (q = 0; q < fep->num_rx_queues; q++) {
> @@ -3436,20 +3433,20 @@ static void fec_enet_free_buffers(struct net_device *ndev)
>  			kfree(txq->tx_bounce[i]);
>  			txq->tx_bounce[i] = NULL;
>
> -			if (!txq->tx_buf[i].buf_p) {
> -				txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
> -				continue;
> -			}
> -
> -			if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
> +			switch (txq->tx_buf[i].type) {
> +			case FEC_TXBUF_T_SKB:
>  				dev_kfree_skb(txq->tx_buf[i].buf_p);
> -			} else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
> +				break;
> +			case FEC_TXBUF_T_XDP_NDO:
>  				xdp_return_frame(txq->tx_buf[i].buf_p);
> -			} else {
> -				struct page *page = txq->tx_buf[i].buf_p;
> -
> +				break;
> +			case FEC_TXBUF_T_XDP_TX:
> +				page = txq->tx_buf[i].buf_p;
>  				page_pool_put_page(pp_page_to_nmdesc(page)->pp,
>  						   page, 0, false);
> +				break;
> +			default:
> +				break;
>  			}
>
>  			txq->tx_buf[i].buf_p = NULL;
> --
> 2.34.1
>