From: Rakesh Kudurumalla <rkudurumalla@marvell.com>
Implemented mailbox to add mechanism to allocate a
rq_mask and apply to nixlf to toggle RQ context fields
for CPT second pass packets.
Signed-off-by: Rakesh Kudurumalla <rkudurumalla@marvell.com>
Signed-off-by: Tanmay Jagdale <tanmay@marvell.com>
---
.../net/ethernet/marvell/octeontx2/af/mbox.h | 23 ++++
.../net/ethernet/marvell/octeontx2/af/rvu.h | 7 +
.../ethernet/marvell/octeontx2/af/rvu_cn10k.c | 11 ++
.../ethernet/marvell/octeontx2/af/rvu_nix.c | 120 ++++++++++++++++++
.../ethernet/marvell/octeontx2/af/rvu_reg.h | 6 +
.../marvell/octeontx2/af/rvu_struct.h | 4 +-
6 files changed, 170 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index f9321084abb6..715efcc04c9e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -323,6 +323,9 @@ M(NIX_CPT_BP_DISABLE, 0x8021, nix_cpt_bp_disable, nix_bp_cfg_req, \
msg_rsp) \
M(NIX_READ_INLINE_IPSEC_CFG, 0x8023, nix_read_inline_ipsec_cfg, \
msg_req, nix_inline_ipsec_cfg) \
+M(NIX_LF_INLINE_RQ_CFG, 0x8024, nix_lf_inline_rq_cfg, \
+ nix_rq_cpt_field_mask_cfg_req, \
+ msg_rsp) \
M(NIX_MCAST_GRP_CREATE, 0x802b, nix_mcast_grp_create, nix_mcast_grp_create_req, \
nix_mcast_grp_create_rsp) \
M(NIX_MCAST_GRP_DESTROY, 0x802c, nix_mcast_grp_destroy, nix_mcast_grp_destroy_req, \
@@ -857,6 +860,7 @@ enum nix_af_status {
NIX_AF_ERR_CQ_CTX_WRITE_ERR = -429,
NIX_AF_ERR_AQ_CTX_RETRY_WRITE = -430,
NIX_AF_ERR_LINK_CREDITS = -431,
+ NIX_AF_ERR_RQ_CPT_MASK = -432,
NIX_AF_ERR_INVALID_BPID = -434,
NIX_AF_ERR_INVALID_BPID_REQ = -435,
NIX_AF_ERR_INVALID_MCAST_GRP = -436,
@@ -1178,6 +1182,25 @@ struct nix_mark_format_cfg_rsp {
u8 mark_format_idx;
};
+struct nix_rq_cpt_field_mask_cfg_req {
+ struct mbox_msghdr hdr;
+#define RQ_CTX_MASK_MAX 6
+ union {
+ u64 rq_ctx_word_set[RQ_CTX_MASK_MAX];
+ struct nix_cn10k_rq_ctx_s rq_set;
+ };
+ union {
+ u64 rq_ctx_word_mask[RQ_CTX_MASK_MAX];
+ struct nix_cn10k_rq_ctx_s rq_mask;
+ };
+ struct nix_lf_rx_ipec_cfg1_req {
+ u32 spb_cpt_aura;
+ u8 rq_mask_enable;
+ u8 spb_cpt_sizem1;
+ u8 spb_cpt_enable;
+ } ipsec_cfg1;
+};
+
struct nix_rx_mode {
struct mbox_msghdr hdr;
#define NIX_RX_MODE_UCAST BIT(0)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 6551fdb612dc..71407f6318ec 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -350,6 +350,11 @@ struct nix_lso {
u8 in_use;
};
+struct nix_rq_cpt_mask {
+ u8 total;
+ u8 in_use;
+};
+
struct nix_txvlan {
#define NIX_TX_VTAG_DEF_MAX 0x400
struct rsrc_bmap rsrc;
@@ -373,6 +378,7 @@ struct nix_hw {
struct nix_flowkey flowkey;
struct nix_mark_format mark_format;
struct nix_lso lso;
+ struct nix_rq_cpt_mask rq_msk;
struct nix_txvlan txvlan;
struct nix_ipolicer *ipolicer;
struct nix_bp bp;
@@ -398,6 +404,7 @@ struct hw_cap {
bool per_pf_mbox_regs; /* PF mbox specified in per PF registers ? */
bool programmable_chans; /* Channels programmable ? */
bool ipolicer;
+ bool second_cpt_pass;
bool nix_multiple_dwrr_mtu; /* Multiple DWRR_MTU to choose from */
bool npc_hash_extract; /* Hash extract enabled ? */
bool npc_exact_match_enabled; /* Exact match supported ? */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
index 7fa98aeb3663..18e2a48e2de1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
@@ -544,6 +544,7 @@ void rvu_program_channels(struct rvu *rvu)
void rvu_nix_block_cn10k_init(struct rvu *rvu, struct nix_hw *nix_hw)
{
+ struct rvu_hwinfo *hw = rvu->hw;
int blkaddr = nix_hw->blkaddr;
u64 cfg;
@@ -558,6 +559,16 @@ void rvu_nix_block_cn10k_init(struct rvu *rvu, struct nix_hw *nix_hw)
cfg = rvu_read64(rvu, blkaddr, NIX_AF_CFG);
cfg |= BIT_ULL(1) | BIT_ULL(2);
rvu_write64(rvu, blkaddr, NIX_AF_CFG, cfg);
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
+
+ if (!(cfg & BIT_ULL(62))) {
+ hw->cap.second_cpt_pass = false;
+ return;
+ }
+
+ hw->cap.second_cpt_pass = true;
+ nix_hw->rq_msk.total = NIX_RQ_MSK_PROFILES;
}
void rvu_apr_block_cn10k_init(struct rvu *rvu)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 6bd995c45dad..b15fd331facf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -6612,3 +6612,123 @@ int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu,
return ret;
}
+
+static inline void
+configure_rq_mask(struct rvu *rvu, int blkaddr, int nixlf,
+ u8 rq_mask, bool enable)
+{
+ u64 cfg, reg;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(nixlf));
+ reg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf));
+ if (enable) {
+ cfg |= BIT_ULL(43);
+ reg = (reg & ~GENMASK_ULL(36, 35)) | ((u64)rq_mask << 35);
+ } else {
+ cfg &= ~BIT_ULL(43);
+ reg = (reg & ~GENMASK_ULL(36, 35));
+ }
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(nixlf), cfg);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), reg);
+}
+
+static inline void
+configure_spb_cpt(struct rvu *rvu, int blkaddr, int nixlf,
+ struct nix_rq_cpt_field_mask_cfg_req *req, bool enable)
+{
+ u64 cfg;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(nixlf));
+ if (enable) {
+ cfg |= BIT_ULL(37);
+ cfg &= ~GENMASK_ULL(42, 38);
+ cfg |= ((u64)req->ipsec_cfg1.spb_cpt_sizem1 << 38);
+ cfg &= ~GENMASK_ULL(63, 44);
+ cfg |= ((u64)req->ipsec_cfg1.spb_cpt_aura << 44);
+ } else {
+ cfg &= ~BIT_ULL(37);
+ cfg &= ~GENMASK_ULL(42, 38);
+ cfg &= ~GENMASK_ULL(63, 44);
+ }
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(nixlf), cfg);
+}
+
+static
+int nix_inline_rq_mask_alloc(struct rvu *rvu,
+ struct nix_rq_cpt_field_mask_cfg_req *req,
+ struct nix_hw *nix_hw, int blkaddr)
+{
+ u8 rq_cpt_mask_select;
+ int idx, rq_idx;
+ u64 reg_mask;
+ u64 reg_set;
+
+ for (idx = 0; idx < nix_hw->rq_msk.in_use; idx++) {
+ for (rq_idx = 0; rq_idx < RQ_CTX_MASK_MAX; rq_idx++) {
+ reg_mask = rvu_read64(rvu, blkaddr,
+ NIX_AF_RX_RQX_MASKX(idx, rq_idx));
+ reg_set = rvu_read64(rvu, blkaddr,
+ NIX_AF_RX_RQX_SETX(idx, rq_idx));
+ if (reg_mask != req->rq_ctx_word_mask[rq_idx] &&
+ reg_set != req->rq_ctx_word_set[rq_idx])
+ break;
+ }
+ if (rq_idx == RQ_CTX_MASK_MAX)
+ break;
+ }
+
+ if (idx < nix_hw->rq_msk.in_use) {
+ /* Match found */
+ rq_cpt_mask_select = idx;
+ return idx;
+ }
+
+ if (nix_hw->rq_msk.in_use == nix_hw->rq_msk.total)
+ return NIX_AF_ERR_RQ_CPT_MASK;
+
+ rq_cpt_mask_select = nix_hw->rq_msk.in_use++;
+
+ for (rq_idx = 0; rq_idx < RQ_CTX_MASK_MAX; rq_idx++) {
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_RX_RQX_MASKX(rq_cpt_mask_select, rq_idx),
+ req->rq_ctx_word_mask[rq_idx]);
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_RX_RQX_SETX(rq_cpt_mask_select, rq_idx),
+ req->rq_ctx_word_set[rq_idx]);
+ }
+
+ return rq_cpt_mask_select;
+}
+
+int rvu_mbox_handler_nix_lf_inline_rq_cfg(struct rvu *rvu,
+ struct nix_rq_cpt_field_mask_cfg_req *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_hw *nix_hw;
+ int blkaddr, nixlf;
+ int rq_mask, err;
+
+ err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
+ if (err)
+ return err;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ if (!hw->cap.second_cpt_pass)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+
+ if (req->ipsec_cfg1.rq_mask_enable) {
+ rq_mask = nix_inline_rq_mask_alloc(rvu, req, nix_hw, blkaddr);
+ if (rq_mask < 0)
+ return NIX_AF_ERR_RQ_CPT_MASK;
+ }
+
+ configure_rq_mask(rvu, blkaddr, nixlf, rq_mask,
+ req->ipsec_cfg1.rq_mask_enable);
+ configure_spb_cpt(rvu, blkaddr, nixlf, req,
+ req->ipsec_cfg1.spb_cpt_enable);
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 245e69fcbff9..e5e005d5d71e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -433,6 +433,8 @@
#define NIX_AF_MDQX_IN_MD_COUNT(a) (0x14e0 | (a) << 16)
#define NIX_AF_SMQX_STATUS(a) (0x730 | (a) << 16)
#define NIX_AF_MDQX_OUT_MD_COUNT(a) (0xdb0 | (a) << 16)
+#define NIX_AF_RX_RQX_MASKX(a, b) (0x4A40 | (a) << 16 | (b) << 3)
+#define NIX_AF_RX_RQX_SETX(a, b) (0x4A80 | (a) << 16 | (b) << 3)
#define NIX_PRIV_AF_INT_CFG (0x8000000)
#define NIX_PRIV_LFX_CFG (0x8000010)
@@ -452,6 +454,10 @@
#define NIX_AF_TL3_PARENT_MASK GENMASK_ULL(23, 16)
#define NIX_AF_TL2_PARENT_MASK GENMASK_ULL(20, 16)
+#define NIX_AF_LF_CFG_SHIFT 17
+#define NIX_AF_LF_SSO_PF_FUNC_SHIFT 16
+#define NIX_RQ_MSK_PROFILES 4
+
/* SSO */
#define SSO_AF_CONST (0x1000)
#define SSO_AF_CONST1 (0x1008)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index 77ac94cb2ec4..bd37ed3a81ad 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -377,7 +377,9 @@ struct nix_cn10k_rq_ctx_s {
u64 ipsech_ena : 1;
u64 ena_wqwd : 1;
u64 cq : 20;
- u64 rsvd_36_24 : 13;
+ u64 rsvd_34_24 : 11;
+ u64 port_ol4_dis : 1;
+ u64 port_il4_dis : 1;
u64 lenerr_dis : 1;
u64 csum_il4_dis : 1;
u64 csum_ol4_dis : 1;
--
2.43.0
On Fri, May 02, 2025 at 06:49:47PM +0530, Tanmay Jagdale wrote:
> From: Rakesh Kudurumalla <rkudurumalla@marvell.com>
>
> Implemented mailbox to add mechanism to allocate a
> rq_mask and apply to nixlf to toggle RQ context fields
> for CPT second pass packets.
>
> Signed-off-by: Rakesh Kudurumalla <rkudurumalla@marvell.com>
> Signed-off-by: Tanmay Jagdale <tanmay@marvell.com>
...
> diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
> index 7fa98aeb3663..18e2a48e2de1 100644
> --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
> +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
> @@ -544,6 +544,7 @@ void rvu_program_channels(struct rvu *rvu)
>
> void rvu_nix_block_cn10k_init(struct rvu *rvu, struct nix_hw *nix_hw)
> {
> + struct rvu_hwinfo *hw = rvu->hw;
> int blkaddr = nix_hw->blkaddr;
> u64 cfg;
>
> @@ -558,6 +559,16 @@ void rvu_nix_block_cn10k_init(struct rvu *rvu, struct nix_hw *nix_hw)
> cfg = rvu_read64(rvu, blkaddr, NIX_AF_CFG);
> cfg |= BIT_ULL(1) | BIT_ULL(2);
As per my comments on an earlier patch in this series:
bits 1 and 2 have meaning. It would be nice to use a #define to
convey this meaning to the reader.
> rvu_write64(rvu, blkaddr, NIX_AF_CFG, cfg);
> +
> + cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
> +
> + if (!(cfg & BIT_ULL(62))) {
> + hw->cap.second_cpt_pass = false;
> + return;
> + }
> +
> + hw->cap.second_cpt_pass = true;
> + nix_hw->rq_msk.total = NIX_RQ_MSK_PROFILES;
> }
>
> void rvu_apr_block_cn10k_init(struct rvu *rvu)
> diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
> index 6bd995c45dad..b15fd331facf 100644
> --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
> +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
> @@ -6612,3 +6612,123 @@ int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu,
>
> return ret;
> }
> +
> +static inline void
> +configure_rq_mask(struct rvu *rvu, int blkaddr, int nixlf,
> + u8 rq_mask, bool enable)
> +{
> + u64 cfg, reg;
> +
> + cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(nixlf));
> + reg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf));
> + if (enable) {
> + cfg |= BIT_ULL(43);
> + reg = (reg & ~GENMASK_ULL(36, 35)) | ((u64)rq_mask << 35);
> + } else {
> + cfg &= ~BIT_ULL(43);
> + reg = (reg & ~GENMASK_ULL(36, 35));
> + }
Likewise for the bit, mask, and shift here.
And I think that using FIELD_PREP with another mask in place of the shift
is also appropriate here.
> + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(nixlf), cfg);
> + rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), reg);
> +}
> +
> +static inline void
> +configure_spb_cpt(struct rvu *rvu, int blkaddr, int nixlf,
> + struct nix_rq_cpt_field_mask_cfg_req *req, bool enable)
> +{
> + u64 cfg;
> +
> + cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(nixlf));
> + if (enable) {
> + cfg |= BIT_ULL(37);
> + cfg &= ~GENMASK_ULL(42, 38);
> + cfg |= ((u64)req->ipsec_cfg1.spb_cpt_sizem1 << 38);
> + cfg &= ~GENMASK_ULL(63, 44);
> + cfg |= ((u64)req->ipsec_cfg1.spb_cpt_aura << 44);
> + } else {
> + cfg &= ~BIT_ULL(37);
> + cfg &= ~GENMASK_ULL(42, 38);
> + cfg &= ~GENMASK_ULL(63, 44);
> + }
And here too.
> + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(nixlf), cfg);
> +}
...
> +int rvu_mbox_handler_nix_lf_inline_rq_cfg(struct rvu *rvu,
> + struct nix_rq_cpt_field_mask_cfg_req *req,
> + struct msg_rsp *rsp)
It would be nice to reduce this to 80 columns wide or less.
Perhaps like this?
int
rvu_mbox_handler_nix_lf_inline_rq_cfg(struct rvu *rvu,
struct nix_rq_cpt_field_mask_cfg_req *req,
struct msg_rsp *rsp)
Or perhaps by renaming nix_rq_cpt_field_mask_cfg_req to be shorter.
...
> diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
> index 245e69fcbff9..e5e005d5d71e 100644
> --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
> +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
> @@ -433,6 +433,8 @@
> #define NIX_AF_MDQX_IN_MD_COUNT(a) (0x14e0 | (a) << 16)
> #define NIX_AF_SMQX_STATUS(a) (0x730 | (a) << 16)
> #define NIX_AF_MDQX_OUT_MD_COUNT(a) (0xdb0 | (a) << 16)
> +#define NIX_AF_RX_RQX_MASKX(a, b) (0x4A40 | (a) << 16 | (b) << 3)
> +#define NIX_AF_RX_RQX_SETX(a, b) (0x4A80 | (a) << 16 | (b) << 3)
FIELD_PREP could be used here in conjunction with #defines
for appropriate masks here too.
>
> #define NIX_PRIV_AF_INT_CFG (0x8000000)
> #define NIX_PRIV_LFX_CFG (0x8000010)
...
Hi Simon,
On 2025-05-07 at 18:06:22, Simon Horman (horms@kernel.org) wrote:
> On Fri, May 02, 2025 at 06:49:47PM +0530, Tanmay Jagdale wrote:
> > From: Rakesh Kudurumalla <rkudurumalla@marvell.com>
> >
> > Implemented mailbox to add mechanism to allocate a
> > rq_mask and apply to nixlf to toggle RQ context fields
> > for CPT second pass packets.
> >
> > Signed-off-by: Rakesh Kudurumalla <rkudurumalla@marvell.com>
> > Signed-off-by: Tanmay Jagdale <tanmay@marvell.com>
>
> ...
>
> > diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
> > index 7fa98aeb3663..18e2a48e2de1 100644
> > --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
> > +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
> > @@ -544,6 +544,7 @@ void rvu_program_channels(struct rvu *rvu)
> >
> > void rvu_nix_block_cn10k_init(struct rvu *rvu, struct nix_hw *nix_hw)
> > {
> > + struct rvu_hwinfo *hw = rvu->hw;
> > int blkaddr = nix_hw->blkaddr;
> > u64 cfg;
> >
> > @@ -558,6 +559,16 @@ void rvu_nix_block_cn10k_init(struct rvu *rvu, struct nix_hw *nix_hw)
> > cfg = rvu_read64(rvu, blkaddr, NIX_AF_CFG);
> > cfg |= BIT_ULL(1) | BIT_ULL(2);
>
> As per my comments on an earlier patch in this series:
> bits 1 and 2 have meaning. It would be nice to use a #define to
> convey this meaning to the reader.
Okay sure, I will update the patch series with macros that provide a
clear meaning.
>
> > rvu_write64(rvu, blkaddr, NIX_AF_CFG, cfg);
> > +
> > + cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
> > +
> > + if (!(cfg & BIT_ULL(62))) {
> > + hw->cap.second_cpt_pass = false;
> > + return;
> > + }
> > +
> > + hw->cap.second_cpt_pass = true;
> > + nix_hw->rq_msk.total = NIX_RQ_MSK_PROFILES;
> > }
> >
> > void rvu_apr_block_cn10k_init(struct rvu *rvu)
> > diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
> > index 6bd995c45dad..b15fd331facf 100644
> > --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
> > +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
> > @@ -6612,3 +6612,123 @@ int rvu_mbox_handler_nix_mcast_grp_update(struct rvu *rvu,
> >
> > return ret;
> > }
> > +
> > +static inline void
> > +configure_rq_mask(struct rvu *rvu, int blkaddr, int nixlf,
> > + u8 rq_mask, bool enable)
> > +{
> > + u64 cfg, reg;
> > +
> > + cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(nixlf));
> > + reg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf));
> > + if (enable) {
> > + cfg |= BIT_ULL(43);
> > + reg = (reg & ~GENMASK_ULL(36, 35)) | ((u64)rq_mask << 35);
> > + } else {
> > + cfg &= ~BIT_ULL(43);
> > + reg = (reg & ~GENMASK_ULL(36, 35));
> > + }
>
> Likewise for the bit, mask, and shift here.
>
> And I think that using FIELD_PREP with another mask in place of the shift
> is also appropriate here.
ACK.
>
> > + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(nixlf), cfg);
> > + rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), reg);
> > +}
> > +
> > +static inline void
> > +configure_spb_cpt(struct rvu *rvu, int blkaddr, int nixlf,
> > + struct nix_rq_cpt_field_mask_cfg_req *req, bool enable)
> > +{
> > + u64 cfg;
> > +
> > + cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(nixlf));
> > + if (enable) {
> > + cfg |= BIT_ULL(37);
> > + cfg &= ~GENMASK_ULL(42, 38);
> > + cfg |= ((u64)req->ipsec_cfg1.spb_cpt_sizem1 << 38);
> > + cfg &= ~GENMASK_ULL(63, 44);
> > + cfg |= ((u64)req->ipsec_cfg1.spb_cpt_aura << 44);
> > + } else {
> > + cfg &= ~BIT_ULL(37);
> > + cfg &= ~GENMASK_ULL(42, 38);
> > + cfg &= ~GENMASK_ULL(63, 44);
> > + }
>
> And here too.
>
> > + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(nixlf), cfg);
> > +}
>
> ...
>
> > +int rvu_mbox_handler_nix_lf_inline_rq_cfg(struct rvu *rvu,
> > + struct nix_rq_cpt_field_mask_cfg_req *req,
> > + struct msg_rsp *rsp)
>
> It would be nice to reduce this to 80 columns wide or less.
> Perhaps like this?
>
> int
> rvu_mbox_handler_nix_lf_inline_rq_cfg(struct rvu *rvu,
> struct nix_rq_cpt_field_mask_cfg_req *req,
> struct msg_rsp *rsp)
>
> Or perhaps by renaming nix_rq_cpt_field_mask_cfg_req to be shorter.
Okay sure. I'll go ahead with the first suggestion so that the function
name is in sync with the rest of the file.
>
> ...
>
> > diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
> > index 245e69fcbff9..e5e005d5d71e 100644
> > --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
> > +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
> > @@ -433,6 +433,8 @@
> > #define NIX_AF_MDQX_IN_MD_COUNT(a) (0x14e0 | (a) << 16)
> > #define NIX_AF_SMQX_STATUS(a) (0x730 | (a) << 16)
> > #define NIX_AF_MDQX_OUT_MD_COUNT(a) (0xdb0 | (a) << 16)
> > +#define NIX_AF_RX_RQX_MASKX(a, b) (0x4A40 | (a) << 16 | (b) << 3)
> > +#define NIX_AF_RX_RQX_SETX(a, b) (0x4A80 | (a) << 16 | (b) << 3)
>
> FIELD_PREP could be used here in conjunction with #defines
> for appropriate masks here too.
ACK.
>
> >
> > #define NIX_PRIV_AF_INT_CFG (0x8000000)
> > #define NIX_PRIV_LFX_CFG (0x8000010)
>
> ...
Thanks,
Tanmay
Hi Tanmay,
kernel test robot noticed the following build warnings:
[auto build test WARNING on net-next/main]
url: https://github.com/intel-lab-lkp/linux/commits/Tanmay-Jagdale/crypto-octeontx2-Share-engine-group-info-with-AF-driver/20250502-213203
base: net-next/main
patch link: https://lore.kernel.org/r/20250502132005.611698-7-tanmay%40marvell.com
patch subject: [net-next PATCH v1 06/15] octeontx2-af: Add support for CPT second pass
config: x86_64-allyesconfig (https://download.01.org/0day-ci/archive/20250507/202505071511.neU9Siwr-lkp@intel.com/config)
compiler: clang version 20.1.2 (https://github.com/llvm/llvm-project 58df0ef89dd64126512e4ee27b4ac3fd8ddf6247)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250507/202505071511.neU9Siwr-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202505071511.neU9Siwr-lkp@intel.com/
All warnings (new ones prefixed by >>):
>> drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c:6723:6: warning: variable 'rq_mask' is used uninitialized whenever 'if' condition is false [-Wsometimes-uninitialized]
6723 | if (req->ipsec_cfg1.rq_mask_enable) {
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c:6729:41: note: uninitialized use occurs here
6729 | configure_rq_mask(rvu, blkaddr, nixlf, rq_mask,
| ^~~~~~~
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c:6723:2: note: remove the 'if' if its condition is always true
6723 | if (req->ipsec_cfg1.rq_mask_enable) {
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c:6710:13: note: initialize the variable 'rq_mask' to silence this warning
6710 | int rq_mask, err;
| ^
| = 0
1 warning generated.
vim +6723 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
6702
6703 int rvu_mbox_handler_nix_lf_inline_rq_cfg(struct rvu *rvu,
6704 struct nix_rq_cpt_field_mask_cfg_req *req,
6705 struct msg_rsp *rsp)
6706 {
6707 struct rvu_hwinfo *hw = rvu->hw;
6708 struct nix_hw *nix_hw;
6709 int blkaddr, nixlf;
6710 int rq_mask, err;
6711
6712 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
6713 if (err)
6714 return err;
6715
6716 nix_hw = get_nix_hw(rvu->hw, blkaddr);
6717 if (!nix_hw)
6718 return NIX_AF_ERR_INVALID_NIXBLK;
6719
6720 if (!hw->cap.second_cpt_pass)
6721 return NIX_AF_ERR_INVALID_NIXBLK;
6722
> 6723 if (req->ipsec_cfg1.rq_mask_enable) {
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
© 2016 - 2026 Red Hat, Inc.