Use unified Halo context present in CN20K hardware for
octeontx2 netdevs instead of aura and pool contexts.
Signed-off-by: Subbaraya Sundeep <sbhatta@marvell.com>
---
.../ethernet/marvell/octeontx2/nic/cn20k.c | 207 +++++++++---------
.../ethernet/marvell/octeontx2/nic/cn20k.h | 3 +
.../marvell/octeontx2/nic/otx2_common.h | 2 +
.../ethernet/marvell/octeontx2/nic/otx2_pf.c | 6 +
4 files changed, 115 insertions(+), 103 deletions(-)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c
index a5a8f4558717..866f48e758a2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c
@@ -242,15 +242,6 @@ int cn20k_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
#define RQ_BP_LVL_AURA (255 - ((85 * 256) / 100)) /* BP when 85% is full */
-static u8 cn20k_aura_bpid_idx(struct otx2_nic *pfvf, int aura_id)
-{
-#ifdef CONFIG_DCB
- return pfvf->queue_to_pfc_map[aura_id];
-#else
- return 0;
-#endif
-}
-
static int cn20k_tc_get_entry_index(struct otx2_flow_config *flow_cfg,
struct otx2_tc_flow *node)
{
@@ -517,84 +508,7 @@ int cn20k_tc_alloc_entry(struct otx2_nic *nic,
return 0;
}
-static int cn20k_aura_aq_init(struct otx2_nic *pfvf, int aura_id,
- int pool_id, int numptrs)
-{
- struct npa_cn20k_aq_enq_req *aq;
- struct otx2_pool *pool;
- u8 bpid_idx;
- int err;
-
- pool = &pfvf->qset.pool[pool_id];
-
- /* Allocate memory for HW to update Aura count.
- * Alloc one cache line, so that it fits all FC_STYPE modes.
- */
- if (!pool->fc_addr) {
- err = qmem_alloc(pfvf->dev, &pool->fc_addr, 1, OTX2_ALIGN);
- if (err)
- return err;
- }
-
- /* Initialize this aura's context via AF */
- aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox);
- if (!aq) {
- /* Shared mbox memory buffer is full, flush it and retry */
- err = otx2_sync_mbox_msg(&pfvf->mbox);
- if (err)
- return err;
- aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox);
- if (!aq)
- return -ENOMEM;
- }
-
- aq->aura_id = aura_id;
-
- /* Will be filled by AF with correct pool context address */
- aq->aura.pool_addr = pool_id;
- aq->aura.pool_caching = 1;
- aq->aura.shift = ilog2(numptrs) - 8;
- aq->aura.count = numptrs;
- aq->aura.limit = numptrs;
- aq->aura.avg_level = 255;
- aq->aura.ena = 1;
- aq->aura.fc_ena = 1;
- aq->aura.fc_addr = pool->fc_addr->iova;
- aq->aura.fc_hyst_bits = 0; /* Store count on all updates */
-
- /* Enable backpressure for RQ aura */
- if (aura_id < pfvf->hw.rqpool_cnt && !is_otx2_lbkvf(pfvf->pdev)) {
- aq->aura.bp_ena = 0;
- /* If NIX1 LF is attached then specify NIX1_RX.
- *
- * Below NPA_AURA_S[BP_ENA] is set according to the
- * NPA_BPINTF_E enumeration given as:
- * 0x0 + a*0x1 where 'a' is 0 for NIX0_RX and 1 for NIX1_RX so
- * NIX0_RX is 0x0 + 0*0x1 = 0
- * NIX1_RX is 0x0 + 1*0x1 = 1
- * But in HRM it is given that
- * "NPA_AURA_S[BP_ENA](w1[33:32]) - Enable aura backpressure to
- * NIX-RX based on [BP] level. One bit per NIX-RX; index
- * enumerated by NPA_BPINTF_E."
- */
- if (pfvf->nix_blkaddr == BLKADDR_NIX1)
- aq->aura.bp_ena = 1;
-
- bpid_idx = cn20k_aura_bpid_idx(pfvf, aura_id);
- aq->aura.bpid = pfvf->bpid[bpid_idx];
-
- /* Set backpressure level for RQ's Aura */
- aq->aura.bp = RQ_BP_LVL_AURA;
- }
-
- /* Fill AQ info */
- aq->ctype = NPA_AQ_CTYPE_AURA;
- aq->op = NPA_AQ_INSTOP_INIT;
-
- return 0;
-}
-
-static int cn20k_pool_aq_init(struct otx2_nic *pfvf, u16 pool_id,
+static int cn20k_halo_aq_init(struct otx2_nic *pfvf, u16 pool_id,
int stack_pages, int numptrs, int buf_size,
int type)
{
@@ -610,36 +524,55 @@ static int cn20k_pool_aq_init(struct otx2_nic *pfvf, u16 pool_id,
if (err)
return err;
+ /* Allocate memory for HW to update Aura count.
+ * Alloc one cache line, so that it fits all FC_STYPE modes.
+ */
+ if (!pool->fc_addr) {
+ err = qmem_alloc(pfvf->dev, &pool->fc_addr, 1, OTX2_ALIGN);
+ if (err) {
+ qmem_free(pfvf->dev, pool->stack);
+ return err;
+ }
+ }
+
pool->rbsize = buf_size;
- /* Initialize this pool's context via AF */
+ /* Initialize this aura's context via AF */
aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox);
if (!aq) {
/* Shared mbox memory buffer is full, flush it and retry */
err = otx2_sync_mbox_msg(&pfvf->mbox);
- if (err) {
- qmem_free(pfvf->dev, pool->stack);
- return err;
- }
+ if (err)
+ goto free_mem;
aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox);
if (!aq) {
- qmem_free(pfvf->dev, pool->stack);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto free_mem;
}
}
aq->aura_id = pool_id;
- aq->pool.stack_base = pool->stack->iova;
- aq->pool.stack_caching = 1;
- aq->pool.ena = 1;
- aq->pool.buf_size = buf_size / 128;
- aq->pool.stack_max_pages = stack_pages;
- aq->pool.shift = ilog2(numptrs) - 8;
- aq->pool.ptr_start = 0;
- aq->pool.ptr_end = ~0ULL;
+
+ aq->halo.stack_base = pool->stack->iova;
+ aq->halo.stack_caching = 1;
+ aq->halo.ena = 1;
+ aq->halo.buf_size = buf_size / 128;
+ aq->halo.stack_max_pages = stack_pages;
+ aq->halo.shift = ilog2(numptrs) - 8;
+ aq->halo.ptr_start = 0;
+ aq->halo.ptr_end = ~0ULL;
+
+ aq->halo.avg_level = 255;
+ aq->halo.fc_ena = 1;
+ aq->halo.fc_addr = pool->fc_addr->iova;
+ aq->halo.fc_hyst_bits = 0; /* Store count on all updates */
+
+ aq->halo.op_dpc_ena = 1;
+ aq->halo.op_dpc_set = pfvf->npa_dpc;
+ aq->halo.unified_ctx = 1;
/* Fill AQ info */
- aq->ctype = NPA_AQ_CTYPE_POOL;
+ aq->ctype = NPA_AQ_CTYPE_HALO;
aq->op = NPA_AQ_INSTOP_INIT;
if (type != AURA_NIX_RQ) {
@@ -661,6 +594,74 @@ static int cn20k_pool_aq_init(struct otx2_nic *pfvf, u16 pool_id,
}
return 0;
+
+free_mem:
+ qmem_free(pfvf->dev, pool->stack);
+ qmem_free(pfvf->dev, pool->fc_addr);
+ return err;
+}
+
+static int cn20k_aura_aq_init(struct otx2_nic *pfvf, int aura_id,
+ int pool_id, int numptrs)
+{
+ return 0;
+}
+
+static int cn20k_pool_aq_init(struct otx2_nic *pfvf, u16 pool_id,
+ int stack_pages, int numptrs, int buf_size,
+ int type)
+{
+ return cn20k_halo_aq_init(pfvf, pool_id, stack_pages,
+ numptrs, buf_size, type);
+}
+
+int cn20k_npa_alloc_dpc(struct otx2_nic *nic)
+{
+ struct npa_cn20k_dpc_alloc_req *req;
+ struct npa_cn20k_dpc_alloc_rsp *rsp;
+ int err;
+
+ req = otx2_mbox_alloc_msg_npa_cn20k_dpc_alloc(&nic->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ /* Count successful ALLOC requests only */
+ req->dpc_conf = 1ULL << 4;
+
+ err = otx2_sync_mbox_msg(&nic->mbox);
+ if (err)
+ return err;
+
+ rsp = (struct npa_cn20k_dpc_alloc_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp))
+ return PTR_ERR(rsp);
+
+ nic->npa_dpc = rsp->cntr_id;
+
+ return 0;
+}
+
+int cn20k_npa_free_dpc(struct otx2_nic *nic)
+{
+ struct npa_cn20k_dpc_free_req *req;
+ int err;
+
+ mutex_lock(&nic->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_npa_cn20k_dpc_free(&nic->mbox);
+ if (!req) {
+ mutex_unlock(&nic->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->cntr_id = nic->npa_dpc;
+
+ err = otx2_sync_mbox_msg(&nic->mbox);
+
+ mutex_unlock(&nic->mbox.lock);
+
+ return err;
}
static int cn20k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h
index b5e527f6d7eb..16a69d84ea79 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h
@@ -28,4 +28,7 @@ int cn20k_tc_alloc_entry(struct otx2_nic *nic,
struct otx2_tc_flow *new_node,
struct npc_install_flow_req *dummy);
int cn20k_tc_free_mcam_entry(struct otx2_nic *nic, u16 entry);
+int cn20k_npa_alloc_dpc(struct otx2_nic *nic);
+int cn20k_npa_free_dpc(struct otx2_nic *nic);
+
#endif /* CN20K_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index eecee612b7b2..06d96059d026 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -592,6 +592,8 @@ struct otx2_nic {
struct cn10k_ipsec ipsec;
/* af_xdp zero-copy */
unsigned long *af_xdp_zc_qidx;
+
+ u8 npa_dpc; /* NPA DPC counter id */
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index ee623476e5ff..2941549d46c8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1651,6 +1651,9 @@ int otx2_init_hw_resources(struct otx2_nic *pf)
if (!is_otx2_lbkvf(pf->pdev))
otx2_nix_config_bp(pf, true);
+ if (is_cn20k(pf->pdev))
+ cn20k_npa_alloc_dpc(pf);
+
/* Init Auras and pools used by NIX RQ, for free buffer ptrs */
err = otx2_rq_aura_pool_init(pf);
if (err) {
@@ -1790,6 +1793,9 @@ void otx2_free_hw_resources(struct otx2_nic *pf)
otx2_free_sq_res(pf);
+ if (is_cn20k(pf->pdev))
+ cn20k_npa_free_dpc(pf);
+
/* Free RQ buffer pointers*/
otx2_free_aura_ptr(pf, AURA_NIX_RQ);
--
2.48.1
This is an AI-generated review of your patch. The human sending this
email has considered the AI review valid, or at least plausible.
For local reproduction steps see https://netdev-ai.bots.linux.dev/ai-local.html
---
octeontx2-pf: cn20k: Use unified Halo context
Use unified Halo context present in CN20K hardware for
octeontx2 netdevs instead of aura and pool contexts.
> diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c
> --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c
> +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c
[ ... ]
> +int cn20k_npa_alloc_dpc(struct otx2_nic *nic)
> +{
> + struct npa_cn20k_dpc_alloc_req *req;
> + struct npa_cn20k_dpc_alloc_rsp *rsp;
> + int err;
> +
> + req = otx2_mbox_alloc_msg_npa_cn20k_dpc_alloc(&nic->mbox);
> + if (!req)
> + return -ENOMEM;
> +
> + /* Count successful ALLOC requests only */
> + req->dpc_conf = 1ULL << 4;
> +
> + err = otx2_sync_mbox_msg(&nic->mbox);
> + if (err)
> + return err;
> +
> + rsp = (struct npa_cn20k_dpc_alloc_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox,
> + 0, &req->hdr);
> + if (IS_ERR(rsp))
> + return PTR_ERR(rsp);
> +
> + nic->npa_dpc = rsp->cntr_id;
> +
> + return 0;
> +}
[ ... ]
> diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
> --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
> +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
> @@ -1651,6 +1651,9 @@ int otx2_init_hw_resources(struct otx2_nic *pf)
> if (!is_otx2_lbkvf(pf->pdev))
> otx2_nix_config_bp(pf, true);
>
> + if (is_cn20k(pf->pdev))
> + cn20k_npa_alloc_dpc(pf);
^^^^^^^^^^^^^^^^^^^^^
Should the return value of cn20k_npa_alloc_dpc() be checked here?
Looking at cn20k_npa_alloc_dpc(), it can return -ENOMEM or errors from
otx2_sync_mbox_msg() or PTR_ERR() from otx2_mbox_get_rsp(). If the DPC
counter allocation fails, pf->npa_dpc remains 0 (from struct
zero-initialization).
Then cn20k_halo_aq_init() configures all NPA auras with
aq->halo.op_dpc_set = pfvf->npa_dpc (= 0), referencing an unallocated
DPC performance counter. On teardown, cn20k_npa_free_dpc() sends a mbox
message to free counter ID 0, which was never allocated.
Every other error-returning mbox operation in this function checks the
return value and jumps to an error label on failure. Should this follow
the same pattern?
> +
> /* Init Auras and pools used by NIX RQ, for free buffer ptrs */
> err = otx2_rq_aura_pool_init(pf);
> if (err) {
[ ... ]
On 2026-03-20 at 22:20:37, Simon Horman (horms@kernel.org) wrote:
> This is an AI-generated review of your patch. The human sending this
> email has considered the AI review valid, or at least plausible.
>
> For local reproduction steps see https://netdev-ai.bots.linux.dev/ai-local.html
> ---
> octeontx2-pf: cn20k: Use unified Halo context
>
> Use unified Halo context present in CN20K hardware for
> octeontx2 netdevs instead of aura and pool contexts.
>
> > diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c
> > --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c
> > +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c
>
> [ ... ]
>
> > +int cn20k_npa_alloc_dpc(struct otx2_nic *nic)
> > +{
> > + struct npa_cn20k_dpc_alloc_req *req;
> > + struct npa_cn20k_dpc_alloc_rsp *rsp;
> > + int err;
> > +
> > + req = otx2_mbox_alloc_msg_npa_cn20k_dpc_alloc(&nic->mbox);
> > + if (!req)
> > + return -ENOMEM;
> > +
> > + /* Count successful ALLOC requests only */
> > + req->dpc_conf = 1ULL << 4;
> > +
> > + err = otx2_sync_mbox_msg(&nic->mbox);
> > + if (err)
> > + return err;
> > +
> > + rsp = (struct npa_cn20k_dpc_alloc_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox,
> > + 0, &req->hdr);
> > + if (IS_ERR(rsp))
> > + return PTR_ERR(rsp);
> > +
> > + nic->npa_dpc = rsp->cntr_id;
> > +
> > + return 0;
> > +}
>
> [ ... ]
>
> > diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
> > --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
> > +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
> > @@ -1651,6 +1651,9 @@ int otx2_init_hw_resources(struct otx2_nic *pf)
> > if (!is_otx2_lbkvf(pf->pdev))
> > otx2_nix_config_bp(pf, true);
> >
> > + if (is_cn20k(pf->pdev))
> > + cn20k_npa_alloc_dpc(pf);
> ^^^^^^^^^^^^^^^^^^^^^
>
> Should the return value of cn20k_npa_alloc_dpc() be checked here?
>
DPC counters are for debugging only we can proceed if counter was not
allocated.
> Looking at cn20k_npa_alloc_dpc(), it can return -ENOMEM or errors from
> otx2_sync_mbox_msg() or PTR_ERR() from otx2_mbox_get_rsp(). If the DPC
> counter allocation fails, pf->npa_dpc remains 0 (from struct
> zero-initialization).
>
> Then cn20k_halo_aq_init() configures all NPA auras with
> aq->halo.op_dpc_set = pfvf->npa_dpc (= 0), referencing an unallocated
> DPC performance counter. On teardown, cn20k_npa_free_dpc() sends a mbox
> message to free counter ID 0, which was never allocated.
Agreed. Will add npa_dpc_valid flag and based on that only will use
pfvf->npa_dpc to fix this.
Thanks,
Sundeep
>
> Every other error-returning mbox operation in this function checks the
> return value and jumps to an error label on failure. Should this follow
> the same pattern?
>
> > +
> > /* Init Auras and pools used by NIX RQ, for free buffer ptrs */
> > err = otx2_rq_aura_pool_init(pf);
> > if (err) {
>
> [ ... ]
© 2016 - 2026 Red Hat, Inc.