[net-next PATCH v1 10/15] octeontx2-pf: ipsec: Setup NIX HW resources for inbound flows

Tanmay Jagdale posted 15 patches 9 months, 1 week ago
[net-next PATCH v1 10/15] octeontx2-pf: ipsec: Setup NIX HW resources for inbound flows
Posted by Tanmay Jagdale 9 months, 1 week ago
A incoming encrypted IPsec packet in the RVU NIX hardware needs
to be classified for inline fastpath processing and then assinged
a RQ and Aura pool before sending to CPT for decryption.

Create a dedicated RQ, Aura and Pool with the following setup
specifically for IPsec flows:
 - Set ipsech_en, ipsecd_drop_en in RQ context to enable hardware
   fastpath processing for IPsec flows.
 - Configure the dedicated Aura to raise an interrupt when
   it's buffer count drops below a threshold value so that the
   buffers can be replenished from the CPU.

The RQ, Aura and Pool contexts are initialized only when esp-hw-offload
feature is enabled via ethtool.

Also, move some of the RQ context macro definitions to otx2_common.h
so that they can be used in the IPsec driver as well.

Signed-off-by: Tanmay Jagdale <tanmay@marvell.com>
---
 .../marvell/octeontx2/nic/cn10k_ipsec.c       | 201 +++++++++++++++++-
 .../marvell/octeontx2/nic/cn10k_ipsec.h       |   2 +
 .../marvell/octeontx2/nic/otx2_common.c       |  23 +-
 .../marvell/octeontx2/nic/otx2_common.h       |  16 ++
 .../ethernet/marvell/octeontx2/nic/otx2_pf.c  |   4 +
 5 files changed, 227 insertions(+), 19 deletions(-)

diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
index c435dcae4929..b88c1b4c5839 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
@@ -346,6 +346,193 @@ static int cn10k_outb_cpt_init(struct net_device *netdev)
 	return ret;
 }
 
+static int cn10k_ipsec_ingress_aura_init(struct otx2_nic *pfvf, int aura_id,
+					 int pool_id, int numptrs)
+{
+	struct npa_aq_enq_req *aq;
+	struct otx2_pool *pool;
+	int err;
+
+	pool = &pfvf->qset.pool[pool_id];
+
+	/* Allocate memory for HW to update Aura count.
+	 * Alloc one cache line, so that it fits all FC_STYPE modes.
+	 */
+	if (!pool->fc_addr) {
+		err = qmem_alloc(pfvf->dev, &pool->fc_addr, 1, OTX2_ALIGN);
+		if (err)
+			return err;
+	}
+
+	/* Initialize this aura's context via AF */
+	aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+	if (!aq)
+		return -ENOMEM;
+
+	aq->aura_id = aura_id;
+	/* Will be filled by AF with correct pool context address */
+	aq->aura.pool_addr = pool_id;
+	aq->aura.pool_caching = 1;
+	aq->aura.shift = ilog2(numptrs) - 8;
+	aq->aura.count = numptrs;
+	aq->aura.limit = numptrs;
+	aq->aura.avg_level = 255;
+	aq->aura.ena = 1;
+	aq->aura.fc_ena = 1;
+	aq->aura.fc_addr = pool->fc_addr->iova;
+	aq->aura.fc_hyst_bits = 0; /* Store count on all updates */
+	aq->aura.thresh_up = 1;
+	aq->aura.thresh = aq->aura.count / 4;
+	aq->aura.thresh_qint_idx = 0;
+
+	/* Enable backpressure for RQ aura */
+	if (!is_otx2_lbkvf(pfvf->pdev)) {
+		aq->aura.bp_ena = 0;
+		/* If NIX1 LF is attached then specify NIX1_RX.
+		 *
+		 * Below NPA_AURA_S[BP_ENA] is set according to the
+		 * NPA_BPINTF_E enumeration given as:
+		 * 0x0 + a*0x1 where 'a' is 0 for NIX0_RX and 1 for NIX1_RX so
+		 * NIX0_RX is 0x0 + 0*0x1 = 0
+		 * NIX1_RX is 0x0 + 1*0x1 = 1
+		 * But in HRM it is given that
+		 * "NPA_AURA_S[BP_ENA](w1[33:32]) - Enable aura backpressure to
+		 * NIX-RX based on [BP] level. One bit per NIX-RX; index
+		 * enumerated by NPA_BPINTF_E."
+		 */
+		if (pfvf->nix_blkaddr == BLKADDR_NIX1)
+			aq->aura.bp_ena = 1;
+#ifdef CONFIG_DCB
+		aq->aura.nix0_bpid = pfvf->bpid[pfvf->queue_to_pfc_map[aura_id]];
+#else
+		aq->aura.nix0_bpid = pfvf->bpid[0];
+#endif
+
+		/* Set backpressure level for RQ's Aura */
+		aq->aura.bp = RQ_BP_LVL_AURA;
+	}
+
+	/* Fill AQ info */
+	aq->ctype = NPA_AQ_CTYPE_AURA;
+	aq->op = NPA_AQ_INSTOP_INIT;
+
+	return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+static int cn10k_ipsec_ingress_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
+{
+	struct otx2_qset *qset = &pfvf->qset;
+	struct nix_cn10k_aq_enq_req *aq;
+
+	/* Get memory to put this msg */
+	aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
+	if (!aq)
+		return -ENOMEM;
+
+	aq->rq.cq = qidx;
+	aq->rq.ena = 1;
+	aq->rq.pb_caching = 1;
+	aq->rq.lpb_aura = lpb_aura; /* Use large packet buffer aura */
+	aq->rq.lpb_sizem1 = (DMA_BUFFER_LEN(pfvf->rbsize) / 8) - 1;
+	aq->rq.xqe_imm_size = 0; /* Copying of packet to CQE not needed */
+	aq->rq.flow_tagw = 32; /* Copy full 32bit flow_tag to CQE header */
+	aq->rq.qint_idx = 0;
+	aq->rq.lpb_drop_ena = 1; /* Enable RED dropping for AURA */
+	aq->rq.xqe_drop_ena = 1; /* Enable RED dropping for CQ/SSO */
+	aq->rq.xqe_pass = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
+	aq->rq.xqe_drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
+	aq->rq.lpb_aura_pass = RQ_PASS_LVL_AURA;
+	aq->rq.lpb_aura_drop = RQ_DROP_LVL_AURA;
+	aq->rq.ipsech_ena = 1;		/* IPsec HW fast path enable */
+	aq->rq.ipsecd_drop_ena = 1;	/* IPsec dynamic drop enable */
+	aq->rq.xqe_drop_ena = 0;
+	aq->rq.ena_wqwd = 1;		/* Store NIX headers in packet buffer */
+	aq->rq.first_skip = 16;		/* Store packet after skiiping 16*8 bytes
+					 * to accommodate NIX headers.
+					 */
+
+	/* Fill AQ info */
+	aq->qidx = qidx;
+	aq->ctype = NIX_AQ_CTYPE_RQ;
+	aq->op = NIX_AQ_INSTOP_INIT;
+
+	return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+static int cn10k_ipsec_setup_nix_rx_hw_resources(struct otx2_nic *pfvf)
+{
+	struct otx2_hw *hw = &pfvf->hw;
+	int stack_pages, pool_id;
+	struct otx2_pool *pool;
+	int err, ptr, num_ptrs;
+	dma_addr_t bufptr;
+
+	num_ptrs = 256;
+	pool_id = pfvf->ipsec.inb_ipsec_pool;
+	stack_pages = (num_ptrs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
+
+	mutex_lock(&pfvf->mbox.lock);
+
+	/* Initialize aura context */
+	err = cn10k_ipsec_ingress_aura_init(pfvf, pool_id, pool_id, num_ptrs);
+	if (err)
+		goto fail;
+
+	/* Initialize pool */
+	err = otx2_pool_init(pfvf, pool_id, stack_pages, num_ptrs, pfvf->rbsize, AURA_NIX_RQ);
+	if (err)
+		goto fail;
+
+	/* Flush accumulated messages */
+	err = otx2_sync_mbox_msg(&pfvf->mbox);
+	if (err)
+		goto pool_fail;
+
+	/* Allocate pointers and free them to aura/pool */
+	pool = &pfvf->qset.pool[pool_id];
+	for (ptr = 0; ptr < num_ptrs; ptr++) {
+		err = otx2_alloc_rbuf(pfvf, pool, &bufptr, pool_id, ptr);
+		if (err) {
+			err = -ENOMEM;
+			goto pool_fail;
+		}
+		pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr + OTX2_HEAD_ROOM);
+	}
+
+	/* Initialize RQ and map buffers from pool_id */
+	err = cn10k_ipsec_ingress_rq_init(pfvf, pfvf->ipsec.inb_ipsec_rq, pool_id);
+	if (err)
+		goto pool_fail;
+
+	mutex_unlock(&pfvf->mbox.lock);
+	return 0;
+
+pool_fail:
+	mutex_unlock(&pfvf->mbox.lock);
+	qmem_free(pfvf->dev, pool->stack);
+	qmem_free(pfvf->dev, pool->fc_addr);
+	page_pool_destroy(pool->page_pool);
+	devm_kfree(pfvf->dev, pool->xdp);
+	pool->xsk_pool = NULL;
+fail:
+	otx2_mbox_reset(&pfvf->mbox.mbox, 0);
+	return err;
+}
+
+static int cn10k_inb_cpt_init(struct net_device *netdev)
+{
+	struct otx2_nic *pfvf = netdev_priv(netdev);
+	int ret = 0;
+
+	ret = cn10k_ipsec_setup_nix_rx_hw_resources(pfvf);
+	if (ret) {
+		netdev_err(netdev, "Failed to setup NIX HW resources for IPsec\n");
+		return ret;
+	}
+
+	return ret;
+}
+
 static int cn10k_outb_cpt_clean(struct otx2_nic *pf)
 {
 	int ret;
@@ -765,14 +952,22 @@ static void cn10k_ipsec_sa_wq_handler(struct work_struct *work)
 int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable)
 {
 	struct otx2_nic *pf = netdev_priv(netdev);
+	int ret = 0;
 
 	/* IPsec offload supported on cn10k */
 	if (!is_dev_support_ipsec_offload(pf->pdev))
 		return -EOPNOTSUPP;
 
-	/* Initialize CPT for outbound ipsec offload */
-	if (enable)
-		return cn10k_outb_cpt_init(netdev);
+	/* Initialize CPT for outbound and inbound IPsec offload */
+	if (enable) {
+		ret = cn10k_outb_cpt_init(netdev);
+		if (ret)
+			return ret;
+
+		ret = cn10k_inb_cpt_init(netdev);
+		if (ret)
+			return ret;
+	}
 
 	/* Don't do CPT cleanup if SA installed */
 	if (pf->ipsec.outb_sa_count) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
index 6dd6ead0b28b..5b7b8f3db913 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
@@ -111,6 +111,8 @@ struct cn10k_ipsec {
 	struct workqueue_struct *sa_workq;
 
 	/* For Inbound Inline IPSec flows */
+	u16 inb_ipsec_rq;
+	u16 inb_ipsec_pool;
 	u32 sa_tbl_entry_sz;
 	struct qmem *inb_sa;
 	struct list_head inb_sw_ctx_list;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 84cd029a85aa..c077e5ae346f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -877,22 +877,6 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
 	}
 }
 
-/* RED and drop levels of CQ on packet reception.
- * For CQ level is measure of emptiness ( 0x0 = full, 255 = empty).
- */
-#define RQ_PASS_LVL_CQ(skid, qsize)	((((skid) + 16) * 256) / (qsize))
-#define RQ_DROP_LVL_CQ(skid, qsize)	(((skid) * 256) / (qsize))
-
-/* RED and drop levels of AURA for packet reception.
- * For AURA level is measure of fullness (0x0 = empty, 255 = full).
- * Eg: For RQ length 1K, for pass/drop level 204/230.
- * RED accepts pkts if free pointers > 102 & <= 205.
- * Drops pkts if free pointers < 102.
- */
-#define RQ_BP_LVL_AURA   (255 - ((85 * 256) / 100)) /* BP when 85% is full */
-#define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */
-#define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */
-
 int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
 {
 	struct otx2_qset *qset = &pfvf->qset;
@@ -1242,6 +1226,13 @@ int otx2_config_nix(struct otx2_nic *pfvf)
 	nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE;
 	nixlf->rss_grps = MAX_RSS_GROUPS;
 	nixlf->xqe_sz = pfvf->hw.xqe_size == 128 ? NIX_XQESZ_W16 : NIX_XQESZ_W64;
+	/* Add an additional RQ for inline inbound IPsec flows
+	 * and store the RQ index for setting it up later when
+	 * IPsec offload is enabled via ethtool.
+	 */
+	nixlf->rq_cnt++;
+	pfvf->ipsec.inb_ipsec_rq = pfvf->hw.rx_queues;
+
 	/* We don't know absolute NPA LF idx attached.
 	 * AF will replace 'RVU_DEFAULT_PF_FUNC' with
 	 * NPA LF attached to this RVU PF/VF.
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 7e3ddb0bee12..b5b87b5553ea 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -76,6 +76,22 @@ enum arua_mapped_qtypes {
 /* Send skid of 2000 packets required for CQ size of 4K CQEs. */
 #define SEND_CQ_SKID	2000
 
+/* RED and drop levels of CQ on packet reception.
+ * For CQ level is measure of emptiness ( 0x0 = full, 255 = empty).
+ */
+#define RQ_PASS_LVL_CQ(skid, qsize)	((((skid) + 16) * 256) / (qsize))
+#define RQ_DROP_LVL_CQ(skid, qsize)	(((skid) * 256) / (qsize))
+
+/* RED and drop levels of AURA for packet reception.
+ * For AURA level is measure of fullness (0x0 = empty, 255 = full).
+ * Eg: For RQ length 1K, for pass/drop level 204/230.
+ * RED accepts pkts if free pointers > 102 & <= 205.
+ * Drops pkts if free pointers < 102.
+ */
+#define RQ_BP_LVL_AURA   (255 - ((85 * 256) / 100)) /* BP when 85% is full */
+#define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */
+#define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */
+
 #define OTX2_GET_RX_STATS(reg) \
 	otx2_read64(pfvf, NIX_LF_RX_STATX(reg))
 #define OTX2_GET_TX_STATS(reg) \
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 0aee8e3861f3..8f1c17fa5a0b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1538,6 +1538,10 @@ int otx2_init_hw_resources(struct otx2_nic *pf)
 	hw->sqpool_cnt = otx2_get_total_tx_queues(pf);
 	hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
 
+	/* Increase pool count by 1 for ingress inline IPsec */
+	pf->ipsec.inb_ipsec_pool = hw->pool_cnt;
+	hw->pool_cnt++;
+
 	if (!otx2_rep_dev(pf->pdev)) {
 		/* Maximum hardware supported transmit length */
 		pf->tx_max_pktlen = pf->netdev->max_mtu + OTX2_ETH_HLEN;
-- 
2.43.0
Re: [net-next PATCH v1 10/15] octeontx2-pf: ipsec: Setup NIX HW resources for inbound flows
Posted by Simon Horman 9 months, 1 week ago
On Fri, May 02, 2025 at 06:49:51PM +0530, Tanmay Jagdale wrote:
> A incoming encrypted IPsec packet in the RVU NIX hardware needs
> to be classified for inline fastpath processing and then assinged

nit: assigned

     checkpatch.pl --codespell is your friend

> a RQ and Aura pool before sending to CPT for decryption.
> 
> Create a dedicated RQ, Aura and Pool with the following setup
> specifically for IPsec flows:
>  - Set ipsech_en, ipsecd_drop_en in RQ context to enable hardware
>    fastpath processing for IPsec flows.
>  - Configure the dedicated Aura to raise an interrupt when
>    it's buffer count drops below a threshold value so that the
>    buffers can be replenished from the CPU.
> 
> The RQ, Aura and Pool contexts are initialized only when esp-hw-offload
> feature is enabled via ethtool.
> 
> Also, move some of the RQ context macro definitions to otx2_common.h
> so that they can be used in the IPsec driver as well.
> 
> Signed-off-by: Tanmay Jagdale <tanmay@marvell.com>

...

> diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c

...

> +static int cn10k_ipsec_setup_nix_rx_hw_resources(struct otx2_nic *pfvf)
> +{
> +	struct otx2_hw *hw = &pfvf->hw;
> +	int stack_pages, pool_id;
> +	struct otx2_pool *pool;
> +	int err, ptr, num_ptrs;
> +	dma_addr_t bufptr;
> +
> +	num_ptrs = 256;
> +	pool_id = pfvf->ipsec.inb_ipsec_pool;
> +	stack_pages = (num_ptrs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
> +
> +	mutex_lock(&pfvf->mbox.lock);
> +
> +	/* Initialize aura context */
> +	err = cn10k_ipsec_ingress_aura_init(pfvf, pool_id, pool_id, num_ptrs);
> +	if (err)
> +		goto fail;
> +
> +	/* Initialize pool */
> +	err = otx2_pool_init(pfvf, pool_id, stack_pages, num_ptrs, pfvf->rbsize, AURA_NIX_RQ);
> +	if (err)

This appears to leak pool->fc_addr.

> +		goto fail;
> +
> +	/* Flush accumulated messages */
> +	err = otx2_sync_mbox_msg(&pfvf->mbox);
> +	if (err)
> +		goto pool_fail;
> +
> +	/* Allocate pointers and free them to aura/pool */
> +	pool = &pfvf->qset.pool[pool_id];
> +	for (ptr = 0; ptr < num_ptrs; ptr++) {
> +		err = otx2_alloc_rbuf(pfvf, pool, &bufptr, pool_id, ptr);
> +		if (err) {
> +			err = -ENOMEM;
> +			goto pool_fail;
> +		}
> +		pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr + OTX2_HEAD_ROOM);
> +	}
> +
> +	/* Initialize RQ and map buffers from pool_id */
> +	err = cn10k_ipsec_ingress_rq_init(pfvf, pfvf->ipsec.inb_ipsec_rq, pool_id);
> +	if (err)
> +		goto pool_fail;
> +
> +	mutex_unlock(&pfvf->mbox.lock);
> +	return 0;
> +
> +pool_fail:
> +	mutex_unlock(&pfvf->mbox.lock);
> +	qmem_free(pfvf->dev, pool->stack);
> +	qmem_free(pfvf->dev, pool->fc_addr);
> +	page_pool_destroy(pool->page_pool);
> +	devm_kfree(pfvf->dev, pool->xdp);

It is not clear to me why devm_kfree() is being called here.
I didn't look deeply. But I think it is likely that
either pool->xdp should be freed when the device is released.
Or pool->xdp should not be allocated (and freed) using devm functions.

> +	pool->xsk_pool = NULL;

The clean-up of pool->stack, pool->page_pool), pool->xdp, and
pool->xsk_pool, all seem to unwind initialisation performed by
otx2_pool_init(). And appear to be duplicated elsewhere.
I would suggest adding a helper for that.

> +fail:
> +	otx2_mbox_reset(&pfvf->mbox.mbox, 0);
> +	return err;
> +}

...
Re: [net-next PATCH v1 10/15] octeontx2-pf: ipsec: Setup NIX HW resources for inbound flows
Posted by Tanmay Jagdale 8 months, 3 weeks ago
Hi Simon,

On 2025-05-07 at 19:16:20, Simon Horman (horms@kernel.org) wrote:
> On Fri, May 02, 2025 at 06:49:51PM +0530, Tanmay Jagdale wrote:
> > A incoming encrypted IPsec packet in the RVU NIX hardware needs
> > to be classified for inline fastpath processing and then assinged
> 
> nit: assigned
> 
>      checkpatch.pl --codespell is your friend
> 
ACK.

> > a RQ and Aura pool before sending to CPT for decryption.
> > 
> > Create a dedicated RQ, Aura and Pool with the following setup
> > specifically for IPsec flows:
> >  - Set ipsech_en, ipsecd_drop_en in RQ context to enable hardware
> >    fastpath processing for IPsec flows.
> >  - Configure the dedicated Aura to raise an interrupt when
> >    it's buffer count drops below a threshold value so that the
> >    buffers can be replenished from the CPU.
> > 
> > The RQ, Aura and Pool contexts are initialized only when esp-hw-offload
> > feature is enabled via ethtool.
> > 
> > Also, move some of the RQ context macro definitions to otx2_common.h
> > so that they can be used in the IPsec driver as well.
> > 
> > Signed-off-by: Tanmay Jagdale <tanmay@marvell.com>
> 
> ...
> 
> > diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
> 
> ...
> 
> > +static int cn10k_ipsec_setup_nix_rx_hw_resources(struct otx2_nic *pfvf)
> > +{
> > +	struct otx2_hw *hw = &pfvf->hw;
> > +	int stack_pages, pool_id;
> > +	struct otx2_pool *pool;
> > +	int err, ptr, num_ptrs;
> > +	dma_addr_t bufptr;
> > +
> > +	num_ptrs = 256;
> > +	pool_id = pfvf->ipsec.inb_ipsec_pool;
> > +	stack_pages = (num_ptrs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
> > +
> > +	mutex_lock(&pfvf->mbox.lock);
> > +
> > +	/* Initialize aura context */
> > +	err = cn10k_ipsec_ingress_aura_init(pfvf, pool_id, pool_id, num_ptrs);
> > +	if (err)
> > +		goto fail;
> > +
> > +	/* Initialize pool */
> > +	err = otx2_pool_init(pfvf, pool_id, stack_pages, num_ptrs, pfvf->rbsize, AURA_NIX_RQ);
> > +	if (err)
> 
> This appears to leak pool->fc_addr.
Okay, let me look into this.

> 
> > +		goto fail;
> > +
> > +	/* Flush accumulated messages */
> > +	err = otx2_sync_mbox_msg(&pfvf->mbox);
> > +	if (err)
> > +		goto pool_fail;
> > +
> > +	/* Allocate pointers and free them to aura/pool */
> > +	pool = &pfvf->qset.pool[pool_id];
> > +	for (ptr = 0; ptr < num_ptrs; ptr++) {
> > +		err = otx2_alloc_rbuf(pfvf, pool, &bufptr, pool_id, ptr);
> > +		if (err) {
> > +			err = -ENOMEM;
> > +			goto pool_fail;
> > +		}
> > +		pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr + OTX2_HEAD_ROOM);
> > +	}
> > +
> > +	/* Initialize RQ and map buffers from pool_id */
> > +	err = cn10k_ipsec_ingress_rq_init(pfvf, pfvf->ipsec.inb_ipsec_rq, pool_id);
> > +	if (err)
> > +		goto pool_fail;
> > +
> > +	mutex_unlock(&pfvf->mbox.lock);
> > +	return 0;
> > +
> > +pool_fail:
> > +	mutex_unlock(&pfvf->mbox.lock);
> > +	qmem_free(pfvf->dev, pool->stack);
> > +	qmem_free(pfvf->dev, pool->fc_addr);
> > +	page_pool_destroy(pool->page_pool);
> > +	devm_kfree(pfvf->dev, pool->xdp);
> 
> It is not clear to me why devm_kfree() is being called here.
> I didn't look deeply. But I think it is likely that
> either pool->xdp should be freed when the device is released.
> Or pool->xdp should not be allocated (and freed) using devm functions.
Good catch. We aren't used pool->xdp for inbound IPsec yet, so I'll
drop this.

> 
> > +	pool->xsk_pool = NULL;
> 
> The clean-up of pool->stack, pool->page_pool), pool->xdp, and
> pool->xsk_pool, all seem to unwind initialisation performed by
> otx2_pool_init(). And appear to be duplicated elsewhere.
> I would suggest adding a helper for that.
Okay I'll look into reusing common code.

> 
> > +fail:
> > +	otx2_mbox_reset(&pfvf->mbox.mbox, 0);
> > +	return err;
> > +}
> 
> ...
Re: [net-next PATCH v1 10/15] octeontx2-pf: ipsec: Setup NIX HW resources for inbound flows
Posted by kernel test robot 9 months, 1 week ago
Hi Tanmay,

kernel test robot noticed the following build warnings:

[auto build test WARNING on net-next/main]

url:    https://github.com/intel-lab-lkp/linux/commits/Tanmay-Jagdale/crypto-octeontx2-Share-engine-group-info-with-AF-driver/20250502-213203
base:   net-next/main
patch link:    https://lore.kernel.org/r/20250502132005.611698-11-tanmay%40marvell.com
patch subject: [net-next PATCH v1 10/15] octeontx2-pf: ipsec: Setup NIX HW resources for inbound flows
config: x86_64-allyesconfig (https://download.01.org/0day-ci/archive/20250507/202505071739.xTGCCtUx-lkp@intel.com/config)
compiler: clang version 20.1.2 (https://github.com/llvm/llvm-project 58df0ef89dd64126512e4ee27b4ac3fd8ddf6247)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250507/202505071739.xTGCCtUx-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202505071739.xTGCCtUx-lkp@intel.com/

All warnings (new ones prefixed by >>):

>> drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c:488:6: warning: variable 'pool' is used uninitialized whenever 'if' condition is true [-Wsometimes-uninitialized]
     488 |         if (err)
         |             ^~~
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c:512:23: note: uninitialized use occurs here
     512 |         qmem_free(pfvf->dev, pool->stack);
         |                              ^~~~
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c:488:2: note: remove the 'if' if its condition is always false
     488 |         if (err)
         |         ^~~~~~~~
     489 |                 goto pool_fail;
         |                 ~~~~~~~~~~~~~~
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c:466:24: note: initialize the variable 'pool' to silence this warning
     466 |         struct otx2_pool *pool;
         |                               ^
         |                                = NULL
   1 warning generated.


vim +488 drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c

   461	
   462	static int cn10k_ipsec_setup_nix_rx_hw_resources(struct otx2_nic *pfvf)
   463	{
   464		struct otx2_hw *hw = &pfvf->hw;
   465		int stack_pages, pool_id;
   466		struct otx2_pool *pool;
   467		int err, ptr, num_ptrs;
   468		dma_addr_t bufptr;
   469	
   470		num_ptrs = 256;
   471		pool_id = pfvf->ipsec.inb_ipsec_pool;
   472		stack_pages = (num_ptrs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
   473	
   474		mutex_lock(&pfvf->mbox.lock);
   475	
   476		/* Initialize aura context */
   477		err = cn10k_ipsec_ingress_aura_init(pfvf, pool_id, pool_id, num_ptrs);
   478		if (err)
   479			goto fail;
   480	
   481		/* Initialize pool */
   482		err = otx2_pool_init(pfvf, pool_id, stack_pages, num_ptrs, pfvf->rbsize, AURA_NIX_RQ);
   483		if (err)
   484			goto fail;
   485	
   486		/* Flush accumulated messages */
   487		err = otx2_sync_mbox_msg(&pfvf->mbox);
 > 488		if (err)
   489			goto pool_fail;
   490	
   491		/* Allocate pointers and free them to aura/pool */
   492		pool = &pfvf->qset.pool[pool_id];
   493		for (ptr = 0; ptr < num_ptrs; ptr++) {
   494			err = otx2_alloc_rbuf(pfvf, pool, &bufptr, pool_id, ptr);
   495			if (err) {
   496				err = -ENOMEM;
   497				goto pool_fail;
   498			}
   499			pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr + OTX2_HEAD_ROOM);
   500		}
   501	
   502		/* Initialize RQ and map buffers from pool_id */
   503		err = cn10k_ipsec_ingress_rq_init(pfvf, pfvf->ipsec.inb_ipsec_rq, pool_id);
   504		if (err)
   505			goto pool_fail;
   506	
   507		mutex_unlock(&pfvf->mbox.lock);
   508		return 0;
   509	
   510	pool_fail:
   511		mutex_unlock(&pfvf->mbox.lock);
   512		qmem_free(pfvf->dev, pool->stack);
   513		qmem_free(pfvf->dev, pool->fc_addr);
   514		page_pool_destroy(pool->page_pool);
   515		devm_kfree(pfvf->dev, pool->xdp);
   516		pool->xsk_pool = NULL;
   517	fail:
   518		otx2_mbox_reset(&pfvf->mbox.mbox, 0);
   519		return err;
   520	}
   521	

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki