From: Jakub Kicinski <kuba@kernel.org>
In normal operation only a subset of queues is configured for
zero-copy. Since zero-copy is the main use for larger buffer
sizes we need to configure the sizes per queue.
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
drivers/net/ethernet/broadcom/bnxt/bnxt.c | 46 ++++++++++---------
drivers/net/ethernet/broadcom/bnxt/bnxt.h | 1 +
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c | 6 +--
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h | 2 +-
4 files changed, 30 insertions(+), 25 deletions(-)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 467e8a0745e1..50f663777843 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -900,7 +900,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
static bool bnxt_separate_head_pool(struct bnxt_rx_ring_info *rxr)
{
- return rxr->need_head_pool || PAGE_SIZE > rxr->bnapi->bp->rx_page_size;
+ return rxr->need_head_pool || PAGE_SIZE > rxr->rx_page_size;
}
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
@@ -910,9 +910,9 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
{
struct page *page;
- if (PAGE_SIZE > bp->rx_page_size) {
+ if (PAGE_SIZE > rxr->rx_page_size) {
page = page_pool_dev_alloc_frag(rxr->page_pool, offset,
- bp->rx_page_size);
+ rxr->rx_page_size);
} else {
page = page_pool_dev_alloc_pages(rxr->page_pool);
*offset = 0;
@@ -1150,9 +1150,9 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
return NULL;
}
dma_addr -= bp->rx_dma_offset;
- dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, bp->rx_page_size,
+ dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, rxr->rx_page_size,
bp->rx_dir);
- skb = napi_build_skb(data_ptr - bp->rx_offset, bp->rx_page_size);
+ skb = napi_build_skb(data_ptr - bp->rx_offset, rxr->rx_page_size);
if (!skb) {
page_pool_recycle_direct(rxr->page_pool, page);
return NULL;
@@ -1184,7 +1184,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
return NULL;
}
dma_addr -= bp->rx_dma_offset;
- dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, bp->rx_page_size,
+ dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, rxr->rx_page_size,
bp->rx_dir);
if (unlikely(!payload))
@@ -1198,7 +1198,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
skb_mark_for_recycle(skb);
off = (void *)data_ptr - page_address(page);
- skb_add_rx_frag(skb, 0, page, off, len, bp->rx_page_size);
+ skb_add_rx_frag(skb, 0, page, off, len, rxr->rx_page_size);
memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
payload + NET_IP_ALIGN);
@@ -1283,7 +1283,7 @@ static u32 __bnxt_rx_agg_netmems(struct bnxt *bp,
if (skb) {
skb_add_rx_frag_netmem(skb, i, cons_rx_buf->netmem,
cons_rx_buf->offset,
- frag_len, bp->rx_page_size);
+ frag_len, rxr->rx_page_size);
} else {
skb_frag_t *frag = &shinfo->frags[i];
@@ -1308,7 +1308,7 @@ static u32 __bnxt_rx_agg_netmems(struct bnxt *bp,
if (skb) {
skb->len -= frag_len;
skb->data_len -= frag_len;
- skb->truesize -= bp->rx_page_size;
+ skb->truesize -= rxr->rx_page_size;
}
--shinfo->nr_frags;
@@ -1323,7 +1323,7 @@ static u32 __bnxt_rx_agg_netmems(struct bnxt *bp,
}
page_pool_dma_sync_netmem_for_cpu(rxr->page_pool, netmem, 0,
- bp->rx_page_size);
+ rxr->rx_page_size);
total_frag_len += frag_len;
prod = NEXT_RX_AGG(prod);
@@ -2276,8 +2276,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (!skb)
goto oom_next_rx;
} else {
- skb = bnxt_xdp_build_skb(bp, skb, agg_bufs,
- rxr->page_pool, &xdp);
+ skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr, &xdp);
if (!skb) {
/* we should be able to free the old skb here */
bnxt_xdp_buff_frags_free(rxr, &xdp);
@@ -3825,7 +3824,7 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
if (BNXT_RX_PAGE_MODE(bp))
pp.pool_size += bp->rx_ring_size / rx_size_fac;
- pp.order = get_order(bp->rx_page_size);
+ pp.order = get_order(rxr->rx_page_size);
pp.nid = numa_node;
pp.netdev = bp->dev;
pp.dev = &bp->pdev->dev;
@@ -4318,6 +4317,8 @@ static void bnxt_init_ring_struct(struct bnxt *bp)
if (!rxr)
goto skip_rx;
+ rxr->rx_page_size = bp->rx_page_size;
+
ring = &rxr->rx_ring_struct;
rmem = &ring->ring_mem;
rmem->nr_pages = bp->rx_nr_pages;
@@ -4477,7 +4478,7 @@ static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp,
ring = &rxr->rx_agg_ring_struct;
ring->fw_ring_id = INVALID_HW_RING_ID;
if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
- type = ((u32)bp->rx_page_size << RX_BD_LEN_SHIFT) |
+ type = ((u32)rxr->rx_page_size << RX_BD_LEN_SHIFT) |
RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
bnxt_init_rxbd_pages(ring, type);
@@ -7042,6 +7043,7 @@ static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type,
struct hwrm_ring_alloc_input *req,
+ struct bnxt_rx_ring_info *rxr,
struct bnxt_ring_struct *ring)
{
struct bnxt_ring_grp_info *grp_info = &bp->grp_info[ring->grp_idx];
@@ -7051,7 +7053,7 @@ static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type,
if (ring_type == HWRM_RING_ALLOC_AGG) {
req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
- req->rx_buf_size = cpu_to_le16(bp->rx_page_size);
+ req->rx_buf_size = cpu_to_le16(rxr->rx_page_size);
enables |= RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID;
} else {
req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
@@ -7065,6 +7067,7 @@ static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type,
}
static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
struct bnxt_ring_struct *ring,
u32 ring_type, u32 map_index)
{
@@ -7121,7 +7124,8 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
cpu_to_le32(bp->rx_ring_mask + 1) :
cpu_to_le32(bp->rx_agg_ring_mask + 1);
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
- bnxt_set_rx_ring_params_p5(bp, ring_type, req, ring);
+ bnxt_set_rx_ring_params_p5(bp, ring_type, req,
+ rxr, ring);
break;
case HWRM_RING_ALLOC_CMPL:
req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
@@ -7269,7 +7273,7 @@ static int bnxt_hwrm_rx_ring_alloc(struct bnxt *bp,
u32 map_idx = bnapi->index;
int rc;
- rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ rc = hwrm_ring_alloc_send_msg(bp, rxr, ring, type, map_idx);
if (rc)
return rc;
@@ -7289,7 +7293,7 @@ static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp,
int rc;
map_idx = grp_idx + bp->rx_nr_rings;
- rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ rc = hwrm_ring_alloc_send_msg(bp, rxr, ring, type, map_idx);
if (rc)
return rc;
@@ -7313,7 +7317,7 @@ static int bnxt_hwrm_cp_ring_alloc_p5(struct bnxt *bp,
ring = &cpr->cp_ring_struct;
ring->handle = BNXT_SET_NQ_HDL(cpr);
- rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, map_idx);
if (rc)
return rc;
bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
@@ -7328,7 +7332,7 @@ static int bnxt_hwrm_tx_ring_alloc(struct bnxt *bp,
const u32 type = HWRM_RING_ALLOC_TX;
int rc;
- rc = hwrm_ring_alloc_send_msg(bp, ring, type, tx_idx);
+ rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, tx_idx);
if (rc)
return rc;
bnxt_set_db(bp, &txr->tx_db, type, tx_idx, ring->fw_ring_id);
@@ -7354,7 +7358,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
vector = bp->irq_tbl[map_idx].vector;
disable_irq_nosync(vector);
- rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, map_idx);
if (rc) {
enable_irq(vector);
goto err_out;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 56aafae568f8..4f9d4c71c0e2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1107,6 +1107,7 @@ struct bnxt_rx_ring_info {
unsigned long *rx_agg_bmap;
u16 rx_agg_bmap_size;
+ u16 rx_page_size;
bool need_head_pool;
dma_addr_t rx_desc_mapping[MAX_RX_PAGES];
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 41d3ba56ba41..19dda0201c69 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -183,7 +183,7 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
u16 cons, u8 *data_ptr, unsigned int len,
struct xdp_buff *xdp)
{
- u32 buflen = bp->rx_page_size;
+ u32 buflen = rxr->rx_page_size;
struct bnxt_sw_rx_bd *rx_buf;
struct pci_dev *pdev;
dma_addr_t mapping;
@@ -461,7 +461,7 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
struct sk_buff *
bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
- struct page_pool *pool, struct xdp_buff *xdp)
+ struct bnxt_rx_ring_info *rxr, struct xdp_buff *xdp)
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
@@ -470,7 +470,7 @@ bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
xdp_update_skb_shared_info(skb, num_frags,
sinfo->xdp_frags_size,
- bp->rx_page_size * num_frags,
+ rxr->rx_page_size * num_frags,
xdp_buff_is_frag_pfmemalloc(xdp));
return skb;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
index 220285e190fc..8933a0dec09a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
@@ -32,6 +32,6 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
struct xdp_buff *xdp);
struct sk_buff *bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb,
- u8 num_frags, struct page_pool *pool,
+ u8 num_frags, struct bnxt_rx_ring_info *rxr,
struct xdp_buff *xdp);
#endif
--
2.49.0
On Mon, Aug 18, 2025 at 7:45 PM Pavel Begunkov <asml.silence@gmail.com> wrote: > > From: Jakub Kicinski <kuba@kernel.org> > > In normal operation only a subset of queues is configured for > zero-copy. Since zero-copy is the main use for larger buffer > sizes we need to configure the sizes per queue. > > Signed-off-by: Jakub Kicinski <kuba@kernel.org> > Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> > --- > drivers/net/ethernet/broadcom/bnxt/bnxt.c | 46 ++++++++++--------- > drivers/net/ethernet/broadcom/bnxt/bnxt.h | 1 + > drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c | 6 +-- > drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h | 2 +- > 4 files changed, 30 insertions(+), 25 deletions(-) > > diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c > index 467e8a0745e1..50f663777843 100644 > --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c > +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c > @@ -900,7 +900,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) > > static bool bnxt_separate_head_pool(struct bnxt_rx_ring_info *rxr) > { > - return rxr->need_head_pool || PAGE_SIZE > rxr->bnapi->bp->rx_page_size; > + return rxr->need_head_pool || PAGE_SIZE > rxr->rx_page_size; > } > > static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, > @@ -910,9 +910,9 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, > { > struct page *page; > > - if (PAGE_SIZE > bp->rx_page_size) { > + if (PAGE_SIZE > rxr->rx_page_size) { > page = page_pool_dev_alloc_frag(rxr->page_pool, offset, > - bp->rx_page_size); > + rxr->rx_page_size); > } else { > page = page_pool_dev_alloc_pages(rxr->page_pool); > *offset = 0; > @@ -1150,9 +1150,9 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp, > return NULL; > } > dma_addr -= bp->rx_dma_offset; > - dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, bp->rx_page_size, > + dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, rxr->rx_page_size, > bp->rx_dir); > - skb = napi_build_skb(data_ptr - bp->rx_offset, bp->rx_page_size); > + skb = napi_build_skb(data_ptr - bp->rx_offset, rxr->rx_page_size); > if (!skb) { > page_pool_recycle_direct(rxr->page_pool, page); > return NULL; > @@ -1184,7 +1184,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, > return NULL; > } > dma_addr -= bp->rx_dma_offset; > - dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, bp->rx_page_size, > + dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, rxr->rx_page_size, > bp->rx_dir); > > if (unlikely(!payload)) > @@ -1198,7 +1198,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, > > skb_mark_for_recycle(skb); > off = (void *)data_ptr - page_address(page); > - skb_add_rx_frag(skb, 0, page, off, len, bp->rx_page_size); > + skb_add_rx_frag(skb, 0, page, off, len, rxr->rx_page_size); > memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN, > payload + NET_IP_ALIGN); > > @@ -1283,7 +1283,7 @@ static u32 __bnxt_rx_agg_netmems(struct bnxt *bp, > if (skb) { > skb_add_rx_frag_netmem(skb, i, cons_rx_buf->netmem, > cons_rx_buf->offset, > - frag_len, bp->rx_page_size); > + frag_len, rxr->rx_page_size); > } else { > skb_frag_t *frag = &shinfo->frags[i]; > > @@ -1308,7 +1308,7 @@ static u32 __bnxt_rx_agg_netmems(struct bnxt *bp, > if (skb) { > skb->len -= frag_len; > skb->data_len -= frag_len; > - skb->truesize -= bp->rx_page_size; > + skb->truesize -= rxr->rx_page_size; > } > > --shinfo->nr_frags; > @@ -1323,7 +1323,7 @@ static u32 __bnxt_rx_agg_netmems(struct bnxt *bp, > } > > page_pool_dma_sync_netmem_for_cpu(rxr->page_pool, netmem, 0, > - bp->rx_page_size); > + rxr->rx_page_size); > > total_frag_len += frag_len; > prod = NEXT_RX_AGG(prod); > @@ -2276,8 +2276,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, > if (!skb) > goto oom_next_rx; > } else { > - skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, > - rxr->page_pool, &xdp); > + skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr, &xdp); > if (!skb) { > /* we should be able to free the old skb here */ > bnxt_xdp_buff_frags_free(rxr, &xdp); > @@ -3825,7 +3824,7 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp, > if (BNXT_RX_PAGE_MODE(bp)) > pp.pool_size += bp->rx_ring_size / rx_size_fac; > > - pp.order = get_order(bp->rx_page_size); > + pp.order = get_order(rxr->rx_page_size); > pp.nid = numa_node; > pp.netdev = bp->dev; > pp.dev = &bp->pdev->dev; > @@ -4318,6 +4317,8 @@ static void bnxt_init_ring_struct(struct bnxt *bp) > if (!rxr) > goto skip_rx; > > + rxr->rx_page_size = bp->rx_page_size; > + > ring = &rxr->rx_ring_struct; > rmem = &ring->ring_mem; > rmem->nr_pages = bp->rx_nr_pages; > @@ -4477,7 +4478,7 @@ static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp, > ring = &rxr->rx_agg_ring_struct; > ring->fw_ring_id = INVALID_HW_RING_ID; > if ((bp->flags & BNXT_FLAG_AGG_RINGS)) { > - type = ((u32)bp->rx_page_size << RX_BD_LEN_SHIFT) | > + type = ((u32)rxr->rx_page_size << RX_BD_LEN_SHIFT) | > RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; > > bnxt_init_rxbd_pages(ring, type); > @@ -7042,6 +7043,7 @@ static void bnxt_hwrm_ring_grp_free(struct bnxt *bp) > > static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type, > struct hwrm_ring_alloc_input *req, > + struct bnxt_rx_ring_info *rxr, > struct bnxt_ring_struct *ring) > { > struct bnxt_ring_grp_info *grp_info = &bp->grp_info[ring->grp_idx]; > @@ -7051,7 +7053,7 @@ static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type, > if (ring_type == HWRM_RING_ALLOC_AGG) { > req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG; > req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id); > - req->rx_buf_size = cpu_to_le16(bp->rx_page_size); > + req->rx_buf_size = cpu_to_le16(rxr->rx_page_size); > enables |= RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID; > } else { > req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size); > @@ -7065,6 +7067,7 @@ static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type, > } > > static int hwrm_ring_alloc_send_msg(struct bnxt *bp, > + struct bnxt_rx_ring_info *rxr, > struct bnxt_ring_struct *ring, > u32 ring_type, u32 map_index) > { > @@ -7121,7 +7124,8 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, > cpu_to_le32(bp->rx_ring_mask + 1) : > cpu_to_le32(bp->rx_agg_ring_mask + 1); > if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) > - bnxt_set_rx_ring_params_p5(bp, ring_type, req, ring); > + bnxt_set_rx_ring_params_p5(bp, ring_type, req, > + rxr, ring); > break; > case HWRM_RING_ALLOC_CMPL: > req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; > @@ -7269,7 +7273,7 @@ static int bnxt_hwrm_rx_ring_alloc(struct bnxt *bp, > u32 map_idx = bnapi->index; > int rc; > > - rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); > + rc = hwrm_ring_alloc_send_msg(bp, rxr, ring, type, map_idx); > if (rc) > return rc; > > @@ -7289,7 +7293,7 @@ static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp, > int rc; > > map_idx = grp_idx + bp->rx_nr_rings; > - rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); > + rc = hwrm_ring_alloc_send_msg(bp, rxr, ring, type, map_idx); > if (rc) > return rc; > > @@ -7313,7 +7317,7 @@ static int bnxt_hwrm_cp_ring_alloc_p5(struct bnxt *bp, > > ring = &cpr->cp_ring_struct; > ring->handle = BNXT_SET_NQ_HDL(cpr); > - rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); > + rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, map_idx); > if (rc) > return rc; > bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); > @@ -7328,7 +7332,7 @@ static int bnxt_hwrm_tx_ring_alloc(struct bnxt *bp, > const u32 type = HWRM_RING_ALLOC_TX; > int rc; > > - rc = hwrm_ring_alloc_send_msg(bp, ring, type, tx_idx); > + rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, tx_idx); > if (rc) > return rc; > bnxt_set_db(bp, &txr->tx_db, type, tx_idx, ring->fw_ring_id); > @@ -7354,7 +7358,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) > > vector = bp->irq_tbl[map_idx].vector; > disable_irq_nosync(vector); > - rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); > + rc = hwrm_ring_alloc_send_msg(bp, NULL, ring, type, map_idx); > if (rc) { > enable_irq(vector); > goto err_out; > diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h > index 56aafae568f8..4f9d4c71c0e2 100644 > --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h > +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h > @@ -1107,6 +1107,7 @@ struct bnxt_rx_ring_info { > > unsigned long *rx_agg_bmap; > u16 rx_agg_bmap_size; > + u16 rx_page_size; > bool need_head_pool; > > dma_addr_t rx_desc_mapping[MAX_RX_PAGES]; > diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c > index 41d3ba56ba41..19dda0201c69 100644 > --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c > +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c > @@ -183,7 +183,7 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, > u16 cons, u8 *data_ptr, unsigned int len, > struct xdp_buff *xdp) > { > - u32 buflen = bp->rx_page_size; > + u32 buflen = rxr->rx_page_size; > struct bnxt_sw_rx_bd *rx_buf; > struct pci_dev *pdev; > dma_addr_t mapping; > @@ -461,7 +461,7 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp) > > struct sk_buff * > bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags, > - struct page_pool *pool, struct xdp_buff *xdp) > + struct bnxt_rx_ring_info *rxr, struct xdp_buff *xdp) > { > struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); > > @@ -470,7 +470,7 @@ bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags, > > xdp_update_skb_shared_info(skb, num_frags, > sinfo->xdp_frags_size, > - bp->rx_page_size * num_frags, > + rxr->rx_page_size * num_frags, > xdp_buff_is_frag_pfmemalloc(xdp)); > return skb; > } > diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h > index 220285e190fc..8933a0dec09a 100644 > --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h > +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h > @@ -32,6 +32,6 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, > void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr, > struct xdp_buff *xdp); > struct sk_buff *bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, > - u8 num_frags, struct page_pool *pool, > + u8 num_frags, struct bnxt_rx_ring_info *rxr, > struct xdp_buff *xdp); > #endif > -- > 2.49.0 > > Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
© 2016 - 2025 Red Hat, Inc.