[PATCH net-next 4/8] net: macb: drop handling of recycled buffers in gem_rx_refill()

Théo Lebrun posted 8 patches 1 month ago
[PATCH net-next 4/8] net: macb: drop handling of recycled buffers in gem_rx_refill()
Posted by Théo Lebrun 1 month ago
The refill operation supports detecting if a buffer is present in a
slot; if it is, then it updates its DMA descriptor reusing the same
buffer.

This behavior can be dropped; all codepaths of gem_rx() letting a buffer
lay around to be reused by refill have disappeared. Said another way:
every time queue->tx_tail is incremented, queue->rx_buff[entry] is set
to NULL.

On the same occasion, move `gfp_alloc` assignment out of the loop and
into variable declarations. Its value is constant across the function's
lifetime. Also fix tiny alignment issue with the while statement.

Signed-off-by: Théo Lebrun <theo.lebrun@bootlin.com>
---
 drivers/net/ethernet/cadence/macb_main.c | 64 ++++++++++++++------------------
 1 file changed, 28 insertions(+), 36 deletions(-)

diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index c1677f1d8f23..ed94f9f0894b 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -1351,18 +1351,18 @@ static unsigned int gem_total_rx_buffer_size(struct macb *bp)
 
 static int gem_rx_refill(struct macb_queue *queue, bool napi)
 {
+	gfp_t gfp_alloc = napi ? GFP_ATOMIC : GFP_KERNEL;
 	struct macb *bp = queue->bp;
 	struct macb_dma_desc *desc;
 	unsigned int entry;
 	struct page *page;
 	dma_addr_t paddr;
-	gfp_t gfp_alloc;
 	int err = 0;
 	void *data;
 	int offset;
 
 	while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail,
-			bp->rx_ring_size) > 0) {
+			  bp->rx_ring_size) > 0) {
 		entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head);
 
 		/* Make hw descriptor updates visible to CPU */
@@ -1370,41 +1370,33 @@ static int gem_rx_refill(struct macb_queue *queue, bool napi)
 
 		desc = macb_rx_desc(queue, entry);
 
-		if (!queue->rx_buff[entry]) {
-			gfp_alloc = napi ? GFP_ATOMIC : GFP_KERNEL;
-			page = page_pool_alloc_frag(queue->page_pool, &offset,
-						    gem_total_rx_buffer_size(bp),
-						    gfp_alloc | __GFP_NOWARN);
-			if (!page) {
-				dev_err_ratelimited(&bp->pdev->dev,
-						    "Unable to allocate rx buffer\n");
-				err = -ENOMEM;
-				break;
-			}
-
-			paddr = page_pool_get_dma_addr(page) +
-				gem_rx_pad(bp) + offset;
-
-			dma_sync_single_for_device(&bp->pdev->dev,
-						   paddr, bp->rx_buffer_size,
-						   page_pool_get_dma_dir(queue->page_pool));
-
-			data = page_address(page) + offset;
-			queue->rx_buff[entry] = data;
-
-			if (entry == bp->rx_ring_size - 1)
-				paddr |= MACB_BIT(RX_WRAP);
-			desc->ctrl = 0;
-			/* Setting addr clears RX_USED and allows reception,
-			 * make sure ctrl is cleared first to avoid a race.
-			 */
-			dma_wmb();
-			macb_set_addr(bp, desc, paddr);
-		} else {
-			desc->ctrl = 0;
-			dma_wmb();
-			desc->addr &= ~MACB_BIT(RX_USED);
+		page = page_pool_alloc_frag(queue->page_pool, &offset,
+					    gem_total_rx_buffer_size(bp),
+					    gfp_alloc | __GFP_NOWARN);
+		if (!page) {
+			dev_err_ratelimited(&bp->pdev->dev,
+					    "Unable to allocate rx buffer\n");
+			err = -ENOMEM;
+			break;
 		}
+
+		paddr = page_pool_get_dma_addr(page) + gem_rx_pad(bp) + offset;
+
+		dma_sync_single_for_device(&bp->pdev->dev,
+					   paddr, bp->rx_buffer_size,
+					   page_pool_get_dma_dir(queue->page_pool));
+
+		data = page_address(page) + offset;
+		queue->rx_buff[entry] = data;
+
+		if (entry == bp->rx_ring_size - 1)
+			paddr |= MACB_BIT(RX_WRAP);
+		desc->ctrl = 0;
+		/* Setting addr clears RX_USED and allows reception,
+		 * make sure ctrl is cleared first to avoid a race.
+		 */
+		dma_wmb();
+		macb_set_addr(bp, desc, paddr);
 		queue->rx_prepared_head++;
 	}
 

-- 
2.53.0