[net-next PatchV2] octeontx2-af: map management port always to first PF

Hariprasad Kelam posted 1 patch 1 year, 10 months ago
.../net/ethernet/marvell/octeontx2/af/mbox.h  |  5 +-
.../ethernet/marvell/octeontx2/af/rvu_cgx.c   | 84 +++++++++++++------
2 files changed, 63 insertions(+), 26 deletions(-)
[net-next PatchV2] octeontx2-af: map management port always to first PF
Posted by Hariprasad Kelam 1 year, 10 months ago
The user can enable or disable any MAC block or a few ports of the
block. The management port's interface name varies depending on the
setup of the user if its not mapped to the first pf.

The management port mapping is now configured to always connect to the
first PF. This patch implements this change.

Signed-off-by: Hariprasad Kelam <hkelam@marvell.com>
Signed-off-by: Sunil Kovvuri Goutham <sgoutham@marvell.com>
---
v2 * Refactor code to avoid code duplication.

 .../net/ethernet/marvell/octeontx2/af/mbox.h  |  5 +-
 .../ethernet/marvell/octeontx2/af/rvu_cgx.c   | 84 +++++++++++++------
 2 files changed, 63 insertions(+), 26 deletions(-)

diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 4a77f6fe2622..88cced83bf23 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -639,7 +639,10 @@ struct cgx_lmac_fwdata_s {
 	/* Only applicable if SFP/QSFP slot is present */
 	struct sfp_eeprom_s sfp_eeprom;
 	struct phy_s phy;
-#define LMAC_FWDATA_RESERVED_MEM 1021
+	u32 lmac_type;
+	u32 portm_idx;
+	u64 mgmt_port:1;
+#define LMAC_FWDATA_RESERVED_MEM 1019
 	u64 reserved[LMAC_FWDATA_RESERVED_MEM];
 };

diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 266ecbc1b97a..8cc17d7e368d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -118,15 +118,67 @@ static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf,
 		pfvf->nix_blkaddr = BLKADDR_NIX1;
 }

-static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
+static bool rvu_cgx_is_mgmt_port(struct rvu *rvu, int cgx_id, int lmac_id)
+{
+	struct cgx_lmac_fwdata_s *fwdata;
+
+	fwdata =  &rvu->fwdata->cgx_fw_data_usx[cgx_id][lmac_id];
+	return !!fwdata->mgmt_port;
+}
+
+static void __rvu_map_cgx_lmac_pf(struct rvu *rvu, unsigned int pf,
+				  int cgx, int lmac)
 {
 	struct npc_pkind *pkind = &rvu->hw->pkind;
-	int cgx_cnt_max = rvu->cgx_cnt_max;
-	int pf = PF_CGXMAP_BASE;
+	int numvfs, hwvfs;
+	int free_pkind;
+
+	rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
+	rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
+	free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
+	pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
+	rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
+	rvu->cgx_mapped_pfs++;
+	rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs);
+	rvu->cgx_mapped_vfs += numvfs;
+}
+
+static void rvu_cgx_map_mgmt_port(struct rvu *rvu, int cgx_cnt_max,
+				  unsigned int *pf, bool req_map_mgmt)
+{
 	unsigned long lmac_bmap;
-	int size, free_pkind;
 	int cgx, lmac, iter;
-	int numvfs, hwvfs;
+
+	for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
+		if (!rvu_cgx_pdata(cgx, rvu))
+			continue;
+		lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
+		for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
+			if (iter >= MAX_LMAC_COUNT)
+				continue;
+			lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu), iter);
+			/* Map management port always to first PF */
+			if (req_map_mgmt &&
+			    rvu_cgx_is_mgmt_port(rvu, cgx, lmac)) {
+				__rvu_map_cgx_lmac_pf(rvu, *pf, cgx, lmac);
+				(*pf)++;
+				return;
+			}
+			/* Non management port mapping */
+			if (!req_map_mgmt &&
+			    !rvu_cgx_is_mgmt_port(rvu, cgx, lmac)) {
+				__rvu_map_cgx_lmac_pf(rvu, *pf, cgx, lmac);
+				(*pf)++;
+			}
+		}
+	}
+}
+
+static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
+{
+	int cgx_cnt_max = rvu->cgx_cnt_max;
+	unsigned int pf = PF_CGXMAP_BASE;
+	int size;

 	if (!cgx_cnt_max)
 		return 0;
@@ -155,26 +207,8 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
 		return -ENOMEM;

 	rvu->cgx_mapped_pfs = 0;
-	for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
-		if (!rvu_cgx_pdata(cgx, rvu))
-			continue;
-		lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
-		for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
-			if (iter >= MAX_LMAC_COUNT)
-				continue;
-			lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
-					      iter);
-			rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
-			rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
-			free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
-			pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
-			rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
-			rvu->cgx_mapped_pfs++;
-			rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs);
-			rvu->cgx_mapped_vfs += numvfs;
-			pf++;
-		}
-	}
+	rvu_cgx_map_mgmt_port(rvu, cgx_cnt_max, &pf, true);
+	rvu_cgx_map_mgmt_port(rvu, cgx_cnt_max, &pf, false);
 	return 0;
 }

--
2.17.1
Re: [net-next PatchV2] octeontx2-af: map management port always to first PF
Posted by Jakub Kicinski 1 year, 10 months ago
On Wed, 10 Apr 2024 18:55:38 +0530 Hariprasad Kelam wrote:
> The user can enable or disable any MAC block or a few ports of the
> block. The management port's interface name varies depending on the
> setup of the user if its not mapped to the first pf.

There is no concept of management port in Linux networking.
I may be missing the point, but I'm unable to review this in 
the context of the upstream Linux kernel.
Re: [net-next PatchV2] octeontx2-af: map management port always to first PF
Posted by Jacob Keller 1 year, 10 months ago

On 4/10/2024 6:25 AM, Hariprasad Kelam wrote:
> The user can enable or disable any MAC block or a few ports of the
> block. The management port's interface name varies depending on the
> setup of the user if its not mapped to the first pf.
> 
> The management port mapping is now configured to always connect to the
> first PF. This patch implements this change.
> 

nit: it's generally preferred to avoid "this patch.." language in commit
messages, as it is clear from the context.

That being said, its not worth a re-roll just to fix this.

Reviewed-by: Jacob Keller <jacob.keller@intel.com>