[PATCH net-next V2 06/11] net/mlx5e: SHAMPO: Separate pool for headers

Tariq Toukan posted 11 patches 6 months, 3 weeks ago
There is a newer version of this series
[PATCH net-next V2 06/11] net/mlx5e: SHAMPO: Separate pool for headers
Posted by Tariq Toukan 6 months, 3 weeks ago
From: Saeed Mahameed <saeedm@nvidia.com>

Allocate a separate page pool for headers when SHAMPO is enabled.
This will be useful for adding support to zc page pool, which has to be
different from the headers page pool.

Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Reviewed-by: Dragos Tatulea <dtatulea@nvidia.com>
Signed-off-by: Cosmin Ratiu <cratiu@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
---
 drivers/net/ethernet/mellanox/mlx5/core/en.h  |  4 ++
 .../net/ethernet/mellanox/mlx5/core/en_main.c | 37 ++++++++++++++---
 .../net/ethernet/mellanox/mlx5/core/en_rx.c   | 41 +++++++++++--------
 3 files changed, 59 insertions(+), 23 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 581eef34f512..c329de1d4f0a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -716,7 +716,11 @@ struct mlx5e_rq {
 	struct bpf_prog __rcu *xdp_prog;
 	struct mlx5e_xdpsq    *xdpsq;
 	DECLARE_BITMAP(flags, 8);
+
+	/* page pools */
 	struct page_pool      *page_pool;
+	struct page_pool      *hd_page_pool;
+
 	struct mlx5e_xdp_buff mxbuf;
 
 	/* AF_XDP zero-copy */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index a81d354af7c8..9e2975782a82 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -750,12 +750,10 @@ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
 				struct mlx5e_params *params,
 				struct mlx5e_rq_param *rqp,
 				struct mlx5e_rq *rq,
-				u32 *pool_size,
 				int node)
 {
 	void *wqc = MLX5_ADDR_OF(rqc, rqp->rqc, wq);
 	u16 hd_per_wq;
-	int wq_size;
 	int err;
 
 	if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
@@ -780,9 +778,33 @@ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
 	rq->mpwqe.shampo->key = cpu_to_be32(rq->mpwqe.shampo->mkey);
 	rq->mpwqe.shampo->hd_per_wqe =
 		mlx5e_shampo_hd_per_wqe(mdev, params, rqp);
-	wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
-	*pool_size += (rq->mpwqe.shampo->hd_per_wqe * wq_size) /
-		     MLX5E_SHAMPO_WQ_HEADER_PER_PAGE;
+
+	/* separate page pool for shampo headers */
+	{
+		int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
+		struct page_pool_params pp_params = { };
+		u32 pool_size;
+
+		pool_size = (rq->mpwqe.shampo->hd_per_wqe * wq_size) /
+				MLX5E_SHAMPO_WQ_HEADER_PER_PAGE;
+
+		pp_params.order     = 0;
+		pp_params.flags     = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+		pp_params.pool_size = pool_size;
+		pp_params.nid       = node;
+		pp_params.dev       = rq->pdev;
+		pp_params.napi      = rq->cq.napi;
+		pp_params.netdev    = rq->netdev;
+		pp_params.dma_dir   = rq->buff.map_dir;
+		pp_params.max_len   = PAGE_SIZE;
+
+		rq->hd_page_pool = page_pool_create(&pp_params);
+		if (IS_ERR(rq->hd_page_pool)) {
+			err = PTR_ERR(rq->hd_page_pool);
+			rq->hd_page_pool = NULL;
+			goto err_hds_page_pool;
+		}
+	}
 
 	/* gro only data structures */
 	rq->hw_gro_data = kvzalloc_node(sizeof(*rq->hw_gro_data), GFP_KERNEL, node);
@@ -794,6 +816,8 @@ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
 	return 0;
 
 err_hw_gro_data:
+	page_pool_destroy(rq->hd_page_pool);
+err_hds_page_pool:
 	mlx5_core_destroy_mkey(mdev, rq->mpwqe.shampo->mkey);
 err_umr_mkey:
 	mlx5e_rq_shampo_hd_info_free(rq);
@@ -808,6 +832,7 @@ static void mlx5e_rq_free_shampo(struct mlx5e_rq *rq)
 		return;
 
 	kvfree(rq->hw_gro_data);
+	page_pool_destroy(rq->hd_page_pool);
 	mlx5e_rq_shampo_hd_info_free(rq);
 	mlx5_core_destroy_mkey(rq->mdev, rq->mpwqe.shampo->mkey);
 	kvfree(rq->mpwqe.shampo);
@@ -887,7 +912,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
 		if (err)
 			goto err_rq_mkey;
 
-		err = mlx5_rq_shampo_alloc(mdev, params, rqp, rq, &pool_size, node);
+		err = mlx5_rq_shampo_alloc(mdev, params, rqp, rq, node);
 		if (err)
 			goto err_free_mpwqe_info;
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 84b1ab8233b8..e34ef53ebd0e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -273,12 +273,12 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
 
 #define MLX5E_PAGECNT_BIAS_MAX (PAGE_SIZE / 64)
 
-static int mlx5e_page_alloc_fragmented(struct mlx5e_rq *rq,
+static int mlx5e_page_alloc_fragmented(struct page_pool *pool,
 				       struct mlx5e_frag_page *frag_page)
 {
 	struct page *page;
 
-	page = page_pool_dev_alloc_pages(rq->page_pool);
+	page = page_pool_dev_alloc_pages(pool);
 	if (unlikely(!page))
 		return -ENOMEM;
 
@@ -292,14 +292,14 @@ static int mlx5e_page_alloc_fragmented(struct mlx5e_rq *rq,
 	return 0;
 }
 
-static void mlx5e_page_release_fragmented(struct mlx5e_rq *rq,
+static void mlx5e_page_release_fragmented(struct page_pool *pool,
 					  struct mlx5e_frag_page *frag_page)
 {
 	u16 drain_count = MLX5E_PAGECNT_BIAS_MAX - frag_page->frags;
 	struct page *page = frag_page->page;
 
 	if (page_pool_unref_page(page, drain_count) == 0)
-		page_pool_put_unrefed_page(rq->page_pool, page, -1, true);
+		page_pool_put_unrefed_page(pool, page, -1, true);
 }
 
 static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
@@ -313,7 +313,8 @@ static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
 		 * offset) should just use the new one without replenishing again
 		 * by themselves.
 		 */
-		err = mlx5e_page_alloc_fragmented(rq, frag->frag_page);
+		err = mlx5e_page_alloc_fragmented(rq->page_pool,
+						  frag->frag_page);
 
 	return err;
 }
@@ -332,7 +333,7 @@ static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq,
 				     struct mlx5e_wqe_frag_info *frag)
 {
 	if (mlx5e_frag_can_release(frag))
-		mlx5e_page_release_fragmented(rq, frag->frag_page);
+		mlx5e_page_release_fragmented(rq->page_pool, frag->frag_page);
 }
 
 static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix)
@@ -584,7 +585,8 @@ mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
 				struct mlx5e_frag_page *frag_page;
 
 				frag_page = &wi->alloc_units.frag_pages[i];
-				mlx5e_page_release_fragmented(rq, frag_page);
+				mlx5e_page_release_fragmented(rq->page_pool,
+							      frag_page);
 			}
 		}
 	}
@@ -679,11 +681,10 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
 		struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index);
 		u64 addr;
 
-		err = mlx5e_page_alloc_fragmented(rq, frag_page);
+		err = mlx5e_page_alloc_fragmented(rq->hd_page_pool, frag_page);
 		if (unlikely(err))
 			goto err_unmap;
 
-
 		addr = page_pool_get_dma_addr(frag_page->page);
 
 		for (int j = 0; j < MLX5E_SHAMPO_WQ_HEADER_PER_PAGE; j++) {
@@ -715,7 +716,8 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
 		if (!header_offset) {
 			struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index);
 
-			mlx5e_page_release_fragmented(rq, frag_page);
+			mlx5e_page_release_fragmented(rq->hd_page_pool,
+						      frag_page);
 		}
 	}
 
@@ -791,7 +793,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
 	for (i = 0; i < rq->mpwqe.pages_per_wqe; i++, frag_page++) {
 		dma_addr_t addr;
 
-		err = mlx5e_page_alloc_fragmented(rq, frag_page);
+		err = mlx5e_page_alloc_fragmented(rq->page_pool, frag_page);
 		if (unlikely(err))
 			goto err_unmap;
 		addr = page_pool_get_dma_addr(frag_page->page);
@@ -836,7 +838,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
 err_unmap:
 	while (--i >= 0) {
 		frag_page--;
-		mlx5e_page_release_fragmented(rq, frag_page);
+		mlx5e_page_release_fragmented(rq->page_pool, frag_page);
 	}
 
 	bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
@@ -855,7 +857,7 @@ mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
 	if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) {
 		struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
 
-		mlx5e_page_release_fragmented(rq, frag_page);
+		mlx5e_page_release_fragmented(rq->hd_page_pool, frag_page);
 	}
 	clear_bit(header_index, shampo->bitmap);
 }
@@ -1100,6 +1102,8 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
 
 	if (rq->page_pool)
 		page_pool_nid_changed(rq->page_pool, numa_mem_id());
+	if (rq->hd_page_pool)
+		page_pool_nid_changed(rq->hd_page_pool, numa_mem_id());
 
 	head = rq->mpwqe.actual_wq_head;
 	i = missing;
@@ -2004,7 +2008,8 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
 	if (prog) {
 		/* area for bpf_xdp_[store|load]_bytes */
 		net_prefetchw(page_address(frag_page->page) + frag_offset);
-		if (unlikely(mlx5e_page_alloc_fragmented(rq, &wi->linear_page))) {
+		if (unlikely(mlx5e_page_alloc_fragmented(rq->page_pool,
+							 &wi->linear_page))) {
 			rq->stats->buff_alloc_err++;
 			return NULL;
 		}
@@ -2068,7 +2073,8 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
 
 				wi->linear_page.frags++;
 			}
-			mlx5e_page_release_fragmented(rq, &wi->linear_page);
+			mlx5e_page_release_fragmented(rq->page_pool,
+						      &wi->linear_page);
 			return NULL; /* page/packet was consumed by XDP */
 		}
 
@@ -2077,13 +2083,14 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
 			mxbuf->xdp.data - mxbuf->xdp.data_hard_start, 0,
 			mxbuf->xdp.data - mxbuf->xdp.data_meta);
 		if (unlikely(!skb)) {
-			mlx5e_page_release_fragmented(rq, &wi->linear_page);
+			mlx5e_page_release_fragmented(rq->page_pool,
+						      &wi->linear_page);
 			return NULL;
 		}
 
 		skb_mark_for_recycle(skb);
 		wi->linear_page.frags++;
-		mlx5e_page_release_fragmented(rq, &wi->linear_page);
+		mlx5e_page_release_fragmented(rq->page_pool, &wi->linear_page);
 
 		if (xdp_buff_has_frags(&mxbuf->xdp)) {
 			struct mlx5e_frag_page *pagep;
-- 
2.31.1
Re: [PATCH net-next V2 06/11] net/mlx5e: SHAMPO: Separate pool for headers
Posted by Jakub Kicinski 6 months, 3 weeks ago
On Fri, 23 May 2025 00:41:21 +0300 Tariq Toukan wrote:
> Allocate a separate page pool for headers when SHAMPO is enabled.
> This will be useful for adding support to zc page pool, which has to be
> different from the headers page pool.

Could you explain why always allocate a separate pool? 
For bnxt we do it only if ZC is enabled (or system pages are large),
see bnxt_separate_head_pool() and page_pool_is_unreadable().

Not sure if page_pool_is_unreadable() existed when this code
was written.

> -	wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
> -	*pool_size += (rq->mpwqe.shampo->hd_per_wqe * wq_size) /
> -		     MLX5E_SHAMPO_WQ_HEADER_PER_PAGE;
> +
> +	/* separate page pool for shampo headers */
> +	{
> +		int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
> +		struct page_pool_params pp_params = { };
> +		u32 pool_size;

A free standing code block? I this it's universally understood 
to be very poor coding style..
Re: [PATCH net-next V2 06/11] net/mlx5e: SHAMPO: Separate pool for headers
Posted by Saeed Mahameed 6 months, 3 weeks ago
On 22 May 15:30, Jakub Kicinski wrote:
>On Fri, 23 May 2025 00:41:21 +0300 Tariq Toukan wrote:
>> Allocate a separate page pool for headers when SHAMPO is enabled.
>> This will be useful for adding support to zc page pool, which has to be
>> different from the headers page pool.
>
>Could you explain why always allocate a separate pool?

Better flow management, 0 conditional code on data path to alloc/return
header buffers, since in mlx5 we already have separate paths to handle
header, we don't have/need bnxt_separate_head_pool() and
rxr->need_head_pool spread across the code.. 

Since we alloc and return pages in bulks, it makes more sense to manage
headers and data in separate pools if we are going to do it anyway for 
"undreadable_pools", and when there's no performance impact.

>For bnxt we do it only if ZC is enabled (or system pages are large),
>see bnxt_separate_head_pool() and page_pool_is_unreadable().
>
>Not sure if page_pool_is_unreadable() existed when this code
>was written.
>
>> -	wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
>> -	*pool_size += (rq->mpwqe.shampo->hd_per_wqe * wq_size) /
>> -		     MLX5E_SHAMPO_WQ_HEADER_PER_PAGE;
>> +
>> +	/* separate page pool for shampo headers */
>> +	{
>> +		int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
>> +		struct page_pool_params pp_params = { };
>> +		u32 pool_size;
>
>A free standing code block? I this it's universally understood
>to be very poor coding style..
>

Sure if used excessively and incorrectly. Here it's only used for temporary
variable scoping. I don't think there is anything wrong with free-standing
blocks when used in the proper situations.
Re: [PATCH net-next V2 06/11] net/mlx5e: SHAMPO: Separate pool for headers
Posted by Jakub Kicinski 6 months, 2 weeks ago
On Thu, 22 May 2025 16:08:48 -0700 Saeed Mahameed wrote:
> On 22 May 15:30, Jakub Kicinski wrote:
> >On Fri, 23 May 2025 00:41:21 +0300 Tariq Toukan wrote:  
> >> Allocate a separate page pool for headers when SHAMPO is enabled.
> >> This will be useful for adding support to zc page pool, which has to be
> >> different from the headers page pool.  
> >
> >Could you explain why always allocate a separate pool?  
> 
> Better flow management, 0 conditional code on data path to alloc/return
> header buffers, since in mlx5 we already have separate paths to handle
> header, we don't have/need bnxt_separate_head_pool() and
> rxr->need_head_pool spread across the code.. 
> 
> Since we alloc and return pages in bulks, it makes more sense to manage
> headers and data in separate pools if we are going to do it anyway for 
> "undreadable_pools", and when there's no performance impact.

I think you need to look closer at the bnxt implementation.
There is no conditional on the buffer alloc path. If the head and
payload pools are identical we simply assign the same pointer to 
(using mlx5 naming) page_pool and hd_page_pool.

Your arguments are not very convincing, TBH.
The memory sitting in the recycling rings is very much not free.
Re: [PATCH net-next V2 06/11] net/mlx5e: SHAMPO: Separate pool for headers
Posted by Dragos Tatulea 6 months, 2 weeks ago
On Tue, May 27, 2025 at 08:29:56AM -0700, Jakub Kicinski wrote:
> On Thu, 22 May 2025 16:08:48 -0700 Saeed Mahameed wrote:
> > On 22 May 15:30, Jakub Kicinski wrote:
> > >On Fri, 23 May 2025 00:41:21 +0300 Tariq Toukan wrote:  
> > >> Allocate a separate page pool for headers when SHAMPO is enabled.
> > >> This will be useful for adding support to zc page pool, which has to be
> > >> different from the headers page pool.  
> > >
> > >Could you explain why always allocate a separate pool?  
> > 
> > Better flow management, 0 conditional code on data path to alloc/return
> > header buffers, since in mlx5 we already have separate paths to handle
> > header, we don't have/need bnxt_separate_head_pool() and
> > rxr->need_head_pool spread across the code.. 
> > 
> > Since we alloc and return pages in bulks, it makes more sense to manage
> > headers and data in separate pools if we are going to do it anyway for 
> > "undreadable_pools", and when there's no performance impact.
> 
> I think you need to look closer at the bnxt implementation.
> There is no conditional on the buffer alloc path. If the head and
> payload pools are identical we simply assign the same pointer to 
> (using mlx5 naming) page_pool and hd_page_pool.
> 
> Your arguments are not very convincing, TBH.
> The memory sitting in the recycling rings is very much not free.

I can add 2 more small argumens for always using 2 page pools:

- For large ring size + high MTU the page_pool size will go above the
  internal limit of the page_pool in HW GRO mode.

- Debugability (already mentioned by Saeed in the counters pach): if
  something goes wrong (page leaks for example) we can easily pinpoint
  to where the issue is.

Thanks,
Dragos
Re: [PATCH net-next V2 06/11] net/mlx5e: SHAMPO: Separate pool for headers
Posted by Mina Almasry 6 months, 3 weeks ago
On Thu, May 22, 2025 at 4:09 PM Saeed Mahameed <saeed@kernel.org> wrote:
>
> On 22 May 15:30, Jakub Kicinski wrote:
> >On Fri, 23 May 2025 00:41:21 +0300 Tariq Toukan wrote:
> >> Allocate a separate page pool for headers when SHAMPO is enabled.
> >> This will be useful for adding support to zc page pool, which has to be
> >> different from the headers page pool.
> >
> >Could you explain why always allocate a separate pool?
>
> Better flow management, 0 conditional code on data path to alloc/return
> header buffers, since in mlx5 we already have separate paths to handle
> header, we don't have/need bnxt_separate_head_pool() and
> rxr->need_head_pool spread across the code..
>
> Since we alloc and return pages in bulks, it makes more sense to manage
> headers and data in separate pools if we are going to do it anyway for
> "undreadable_pools", and when there's no performance impact.
>

Are you allocating full pages for each incoming header (which is much
smaller than a page)? Or are you reusing the same PAGE_SIZE from the
page_pool to store multiple headers?

-- 
Thanks,
Mina
Re: [PATCH net-next V2 06/11] net/mlx5e: SHAMPO: Separate pool for headers
Posted by Saeed Mahameed 6 months, 3 weeks ago
On 22 May 16:24, Mina Almasry wrote:
>On Thu, May 22, 2025 at 4:09 PM Saeed Mahameed <saeed@kernel.org> wrote:
>>
>> On 22 May 15:30, Jakub Kicinski wrote:
>> >On Fri, 23 May 2025 00:41:21 +0300 Tariq Toukan wrote:
>> >> Allocate a separate page pool for headers when SHAMPO is enabled.
>> >> This will be useful for adding support to zc page pool, which has to be
>> >> different from the headers page pool.
>> >
>> >Could you explain why always allocate a separate pool?
>>
>> Better flow management, 0 conditional code on data path to alloc/return
>> header buffers, since in mlx5 we already have separate paths to handle
>> header, we don't have/need bnxt_separate_head_pool() and
>> rxr->need_head_pool spread across the code..
>>
>> Since we alloc and return pages in bulks, it makes more sense to manage
>> headers and data in separate pools if we are going to do it anyway for
>> "undreadable_pools", and when there's no performance impact.
>>
>
>Are you allocating full pages for each incoming header (which is much
>smaller than a page)? Or are you reusing the same PAGE_SIZE from the
>page_pool to store multiple headers?
>

Multiple headers MLX5E_SHAMPO_WQ_HEADER_PER_PAGE;

>-- 
>Thanks,
>Mina