Allocate CP and NQ related data structures and add support to
associate NQ and CQ rings. Also, add the association of NQ, NAPI,
and interrupts.
Signed-off-by: Bhargava Marreddy <bhargava.marreddy@broadcom.com>
Reviewed-by: Vikas Gupta <vikas.gupta@broadcom.com>
Reviewed-by: Rajashekar Hudumula <rajashekar.hudumula@broadcom.com>
---
drivers/net/ethernet/broadcom/bnge/bnge.h | 1 +
.../net/ethernet/broadcom/bnge/bnge_netdev.c | 439 ++++++++++++++++++
.../net/ethernet/broadcom/bnge/bnge_netdev.h | 12 +
.../net/ethernet/broadcom/bnge/bnge_resc.c | 2 +-
4 files changed, 453 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge.h b/drivers/net/ethernet/broadcom/bnge/bnge.h
index 03e55b931f7..c536c0cc66e 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge.h
+++ b/drivers/net/ethernet/broadcom/bnge/bnge.h
@@ -215,5 +215,6 @@ static inline bool bnge_is_agg_reqd(struct bnge_dev *bd)
}
bool bnge_aux_registered(struct bnge_dev *bd);
+u16 bnge_aux_get_msix(struct bnge_dev *bd);
#endif /* _BNGE_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
index b537c66381d..207cbaba08a 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
@@ -27,6 +27,237 @@
#define BNGE_RING_TO_TC(bd, tx) \
((tx) / (bd)->tx_nr_rings_per_tc)
+#define BNGE_TC_TO_RING_BASE(bd, tc) \
+ ((tc) * (bd)->tx_nr_rings_per_tc)
+
+static void bnge_free_nq_desc_arr(struct bnge_nq_ring_info *nqr)
+{
+ struct bnge_ring_struct *ring = &nqr->ring_struct;
+
+ kfree(nqr->desc_ring);
+ nqr->desc_ring = NULL;
+ ring->ring_mem.pg_arr = NULL;
+ kfree(nqr->desc_mapping);
+ nqr->desc_mapping = NULL;
+ ring->ring_mem.dma_arr = NULL;
+}
+
+static void bnge_free_cp_desc_arr(struct bnge_cp_ring_info *cpr)
+{
+ struct bnge_ring_struct *ring = &cpr->ring_struct;
+
+ kfree(cpr->desc_ring);
+ cpr->desc_ring = NULL;
+ ring->ring_mem.pg_arr = NULL;
+ kfree(cpr->desc_mapping);
+ cpr->desc_mapping = NULL;
+ ring->ring_mem.dma_arr = NULL;
+}
+
+static int bnge_alloc_nq_desc_arr(struct bnge_nq_ring_info *nqr, int n)
+{
+ nqr->desc_ring = kcalloc(n, sizeof(*nqr->desc_ring), GFP_KERNEL);
+ if (!nqr->desc_ring)
+ return -ENOMEM;
+
+ nqr->desc_mapping = kcalloc(n, sizeof(*nqr->desc_mapping), GFP_KERNEL);
+ if (!nqr->desc_mapping)
+ goto err_free_desc_ring;
+ return 0;
+
+err_free_desc_ring:
+ kfree(nqr->desc_ring);
+ nqr->desc_ring = NULL;
+ return -ENOMEM;
+}
+
+static int bnge_alloc_cp_desc_arr(struct bnge_cp_ring_info *cpr, int n)
+{
+ cpr->desc_ring = kcalloc(n, sizeof(*cpr->desc_ring), GFP_KERNEL);
+ if (!cpr->desc_ring)
+ return -ENOMEM;
+
+ cpr->desc_mapping = kcalloc(n, sizeof(*cpr->desc_mapping), GFP_KERNEL);
+ if (!cpr->desc_mapping)
+ goto err_free_desc_ring;
+ return 0;
+
+err_free_desc_ring:
+ kfree(cpr->desc_ring);
+ cpr->desc_ring = NULL;
+ return -ENOMEM;
+}
+
+static void bnge_free_nq_arrays(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+
+ bnge_free_nq_desc_arr(&bnapi->nq_ring);
+ }
+}
+
+static int bnge_alloc_nq_arrays(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, rc;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+
+ rc = bnge_alloc_nq_desc_arr(&bnapi->nq_ring, bn->cp_nr_pages);
+ if (rc)
+ goto err_free_nq_arrays;
+ }
+ return 0;
+
+err_free_nq_arrays:
+ bnge_free_nq_arrays(bn);
+ return rc;
+}
+
+static void bnge_free_nq_tree(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ if (!bn->bnapi)
+ return;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr;
+ struct bnge_ring_struct *ring;
+ int j;
+
+ nqr = &bnapi->nq_ring;
+ ring = &nqr->ring_struct;
+
+ bnge_free_ring(bd, &ring->ring_mem);
+
+ if (!nqr->cp_ring_arr)
+ continue;
+
+ for (j = 0; j < nqr->cp_ring_count; j++) {
+ struct bnge_cp_ring_info *cpr = &nqr->cp_ring_arr[j];
+
+ ring = &cpr->ring_struct;
+ bnge_free_ring(bd, &ring->ring_mem);
+ bnge_free_cp_desc_arr(cpr);
+ }
+ kfree(nqr->cp_ring_arr);
+ nqr->cp_ring_arr = NULL;
+ nqr->cp_ring_count = 0;
+ }
+}
+
+static int alloc_one_cp_ring(struct bnge_net *bn,
+ struct bnge_cp_ring_info *cpr)
+{
+ struct bnge_ring_mem_info *rmem;
+ struct bnge_ring_struct *ring;
+ struct bnge_dev *bd = bn->bd;
+ int rc;
+
+ rc = bnge_alloc_cp_desc_arr(cpr, bn->cp_nr_pages);
+ if (rc)
+ return -ENOMEM;
+ ring = &cpr->ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bn->cp_nr_pages;
+ rmem->page_size = HW_CMPD_RING_SIZE;
+ rmem->pg_arr = (void **)cpr->desc_ring;
+ rmem->dma_arr = cpr->desc_mapping;
+ rmem->flags = BNGE_RMEM_RING_PTE_FLAG;
+ rc = bnge_alloc_ring(bd, rmem);
+ if (rc)
+ goto err_free_cp_desc_arr;
+ return rc;
+
+err_free_cp_desc_arr:
+ bnge_free_cp_desc_arr(cpr);
+ return rc;
+}
+
+static int bnge_alloc_nq_tree(struct bnge_net *bn)
+{
+ int i, j, ulp_msix, rc = -ENOMEM;
+ struct bnge_dev *bd = bn->bd;
+ int tcs = 1;
+
+ ulp_msix = bnge_aux_get_msix(bd);
+ for (i = 0, j = 0; i < bd->nq_nr_rings; i++) {
+ bool sh = !!(bd->flags & BNGE_EN_SHARED_CHNL);
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr;
+ struct bnge_cp_ring_info *cpr;
+ struct bnge_ring_struct *ring;
+ int cp_count = 0, k;
+ int rx = 0, tx = 0;
+
+ if (!bnapi)
+ continue;
+
+ nqr = &bnapi->nq_ring;
+ nqr->bnapi = bnapi;
+ ring = &nqr->ring_struct;
+
+ rc = bnge_alloc_ring(bd, &ring->ring_mem);
+ if (rc)
+ goto err_free_nq_tree;
+
+ ring->map_idx = ulp_msix + i;
+
+ if (i < bd->rx_nr_rings) {
+ cp_count++;
+ rx = 1;
+ }
+
+ if ((sh && i < bd->tx_nr_rings) ||
+ (!sh && i >= bd->rx_nr_rings)) {
+ cp_count += tcs;
+ tx = 1;
+ }
+
+ nqr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr),
+ GFP_KERNEL);
+ if (!nqr->cp_ring_arr)
+ goto err_free_nq_tree;
+
+ nqr->cp_ring_count = cp_count;
+
+ for (k = 0; k < cp_count; k++) {
+ cpr = &nqr->cp_ring_arr[k];
+ rc = alloc_one_cp_ring(bn, cpr);
+ if (rc)
+ goto err_free_nq_tree;
+
+ cpr->bnapi = bnapi;
+ cpr->cp_idx = k;
+ if (!k && rx) {
+ bn->rx_ring[i].rx_cpr = cpr;
+ cpr->cp_ring_type = BNGE_NQ_HDL_TYPE_RX;
+ } else {
+ int n, tc = k - rx;
+
+ n = BNGE_TC_TO_RING_BASE(bd, tc) + j;
+ bn->tx_ring[n].tx_cpr = cpr;
+ cpr->cp_ring_type = BNGE_NQ_HDL_TYPE_TX;
+ }
+ }
+ if (tx)
+ j++;
+ }
+ return 0;
+
+err_free_nq_tree:
+ bnge_free_nq_tree(bn);
+ return rc;
+}
+
static bool bnge_separate_head_pool(struct bnge_rx_ring_info *rxr)
{
return rxr->need_head_pool || PAGE_SIZE > BNGE_RX_PAGE_SIZE;
@@ -221,10 +452,19 @@ static int bnge_alloc_tx_rings(struct bnge_net *bn)
return rc;
}
+static void bnge_free_ring_grps(struct bnge_net *bn)
+{
+ kfree(bn->grp_info);
+ bn->grp_info = NULL;
+}
+
static void bnge_free_core(struct bnge_net *bn)
{
bnge_free_tx_rings(bn);
bnge_free_rx_rings(bn);
+ bnge_free_nq_tree(bn);
+ bnge_free_nq_arrays(bn);
+ bnge_free_ring_grps(bn);
kfree(bn->tx_ring_map);
bn->tx_ring_map = NULL;
kfree(bn->tx_ring);
@@ -311,6 +551,10 @@ static int bnge_alloc_core(struct bnge_net *bn)
txr->bnapi = bnapi2;
}
+ rc = bnge_alloc_nq_arrays(bn);
+ if (rc)
+ goto err_free_core;
+
bnge_init_ring_struct(bn);
rc = bnge_alloc_rx_rings(bn);
@@ -318,6 +562,10 @@ static int bnge_alloc_core(struct bnge_net *bn)
goto err_free_core;
rc = bnge_alloc_tx_rings(bn);
+ if (rc)
+ goto err_free_core;
+
+ rc = bnge_alloc_nq_tree(bn);
if (rc)
goto err_free_core;
return 0;
@@ -327,6 +575,181 @@ static int bnge_alloc_core(struct bnge_net *bn)
return rc;
}
+static int bnge_cp_num_to_irq_num(struct bnge_net *bn, int n)
+{
+ struct bnge_napi *bnapi = bn->bnapi[n];
+ struct bnge_nq_ring_info *nqr;
+
+ nqr = &bnapi->nq_ring;
+
+ return nqr->ring_struct.map_idx;
+}
+
+static irqreturn_t bnge_msix(int irq, void *dev_instance)
+{
+ /* NAPI scheduling to be added in a future patch */
+ return IRQ_HANDLED;
+}
+
+static void bnge_setup_msix(struct bnge_net *bn)
+{
+ struct net_device *dev = bn->netdev;
+ struct bnge_dev *bd = bn->bd;
+ int len, i;
+
+ len = sizeof(bd->irq_tbl[0].name);
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ int map_idx = bnge_cp_num_to_irq_num(bn, i);
+ char *attr;
+
+ if (bd->flags & BNGE_EN_SHARED_CHNL)
+ attr = "TxRx";
+ else if (i < bd->rx_nr_rings)
+ attr = "rx";
+ else
+ attr = "tx";
+
+ snprintf(bd->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
+ attr, i);
+ bd->irq_tbl[map_idx].handler = bnge_msix;
+ }
+}
+
+static int bnge_setup_interrupts(struct bnge_net *bn)
+{
+ struct net_device *dev = bn->netdev;
+ struct bnge_dev *bd = bn->bd;
+ int rc;
+
+ if (!bd->irq_tbl) {
+ if (bnge_alloc_irqs(bd))
+ return -ENODEV;
+ }
+
+ bnge_setup_msix(bn);
+ rc = netif_set_real_num_queues(dev, bd->tx_nr_rings, bd->rx_nr_rings);
+ if (rc)
+ goto err_free_irqs;
+ return rc;
+
+err_free_irqs:
+ bnge_free_irqs(bd);
+ return rc;
+}
+
+static int bnge_request_irq(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, rc;
+
+ rc = bnge_setup_interrupts(bn);
+ if (rc) {
+ netdev_err(bn->netdev, "bnge_setup_interrupts err: %d\n", rc);
+ return rc;
+ }
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ int map_idx = bnge_cp_num_to_irq_num(bn, i);
+ struct bnge_irq *irq = &bd->irq_tbl[map_idx];
+
+ rc = request_irq(irq->vector, irq->handler, 0, irq->name,
+ bn->bnapi[i]);
+ if (rc)
+ break;
+
+ netif_napi_set_irq_locked(&bn->bnapi[i]->napi, irq->vector);
+ irq->requested = 1;
+
+ if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
+ int numa_node = dev_to_node(&bd->pdev->dev);
+
+ irq->have_cpumask = 1;
+ cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+ irq->cpu_mask);
+ rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
+ if (rc) {
+ netdev_warn(bn->netdev,
+ "Set affinity failed, IRQ = %d\n",
+ irq->vector);
+ break;
+ }
+ }
+ }
+
+ return rc;
+}
+
+static int bnge_napi_poll(struct napi_struct *napi, int budget)
+{
+ int work_done = 0;
+
+ /* defer NAPI implementation to next patch series */
+ napi_complete_done(napi, work_done);
+
+ return work_done;
+}
+
+static void bnge_init_napi(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ struct bnge_napi *bnapi;
+ int i;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ bnapi = bn->bnapi[i];
+ netif_napi_add_config_locked(bn->netdev, &bnapi->napi,
+ bnge_napi_poll, bnapi->index);
+ }
+}
+
+static void bnge_del_napi(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ if (!bn->bnapi)
+ return;
+
+ for (i = 0; i < bd->rx_nr_rings; i++)
+ netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_RX, NULL);
+ for (i = 0; i < bd->tx_nr_rings; i++)
+ netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_TX, NULL);
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+
+ __netif_napi_del_locked(&bnapi->napi);
+ }
+
+ /* Wait for RCU grace period after removing NAPI instances */
+ synchronize_net();
+}
+
+static void bnge_free_irq(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ struct bnge_irq *irq;
+ int i;
+
+ if (!bd->irq_tbl || !bn->bnapi)
+ return;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ int map_idx = bnge_cp_num_to_irq_num(bn, i);
+
+ irq = &bd->irq_tbl[map_idx];
+ if (irq->requested) {
+ if (irq->have_cpumask) {
+ irq_set_affinity_hint(irq->vector, NULL);
+ free_cpumask_var(irq->cpu_mask);
+ irq->have_cpumask = 0;
+ }
+ free_irq(irq->vector, bn->bnapi[i]);
+ }
+
+ irq->requested = 0;
+ }
+}
+
static int bnge_open_core(struct bnge_net *bn)
{
struct bnge_dev *bd = bn->bd;
@@ -346,8 +769,20 @@ static int bnge_open_core(struct bnge_net *bn)
return rc;
}
+ bnge_init_napi(bn);
+ rc = bnge_request_irq(bn);
+ if (rc) {
+ netdev_err(bn->netdev, "bnge_request_irq err: %d\n", rc);
+ goto err_del_napi;
+ }
+
set_bit(BNGE_STATE_OPEN, &bd->state);
return 0;
+
+err_del_napi:
+ bnge_del_napi(bn);
+ bnge_free_core(bn);
+ return rc;
}
static netdev_tx_t bnge_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -374,6 +809,9 @@ static void bnge_close_core(struct bnge_net *bn)
struct bnge_dev *bd = bn->bd;
clear_bit(BNGE_STATE_OPEN, &bd->state);
+ bnge_free_irq(bn);
+ bnge_del_napi(bn);
+
bnge_free_core(bn);
}
@@ -596,6 +1034,7 @@ int bnge_netdev_alloc(struct bnge_dev *bd, int max_irqs)
bnge_init_l2_fltr_tbl(bn);
bnge_init_mac_addr(bd);
+ netdev->request_ops_lock = true;
rc = register_netdev(netdev);
if (rc) {
dev_err(bd->dev, "Register netdev failed rc: %d\n", rc);
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
index 92bae665f59..8041951da18 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
@@ -133,6 +133,9 @@ enum {
#define BNGE_NET_EN_TPA (BNGE_NET_EN_GRO | BNGE_NET_EN_LRO)
+#define BNGE_NQ_HDL_TYPE_RX 0x00
+#define BNGE_NQ_HDL_TYPE_TX 0x01
+
struct bnge_net {
struct bnge_dev *bd;
struct net_device *netdev;
@@ -172,6 +175,10 @@ struct bnge_net {
u16 *tx_ring_map;
enum dma_data_direction rx_dir;
+
+ /* grp_info indexed by napi/nq index */
+ struct bnge_ring_grp_info *grp_info;
+ int total_irqs;
};
#define BNGE_DEFAULT_RX_RING_SIZE 511
@@ -223,6 +230,8 @@ struct bnge_cp_ring_info {
dma_addr_t *desc_mapping;
struct tx_cmp **desc_ring;
struct bnge_ring_struct ring_struct;
+ u8 cp_ring_type;
+ u8 cp_idx;
};
struct bnge_nq_ring_info {
@@ -230,6 +239,9 @@ struct bnge_nq_ring_info {
dma_addr_t *desc_mapping;
struct nqe_cn **desc_ring;
struct bnge_ring_struct ring_struct;
+
+ int cp_ring_count;
+ struct bnge_cp_ring_info *cp_ring_arr;
};
struct bnge_rx_ring_info {
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_resc.c b/drivers/net/ethernet/broadcom/bnge/bnge_resc.c
index c79a3607a1b..5597af1b3b7 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_resc.c
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_resc.c
@@ -46,7 +46,7 @@ static int bnge_aux_get_dflt_msix(struct bnge_dev *bd)
return min_t(int, roce_msix, num_online_cpus() + 1);
}
-static u16 bnge_aux_get_msix(struct bnge_dev *bd)
+u16 bnge_aux_get_msix(struct bnge_dev *bd)
{
if (bnge_is_roce_en(bd))
return bd->aux_num_msix;
--
2.47.3
© 2016 - 2025 Red Hat, Inc.