From: Clark Wang <xiaoning.wang@nxp.com>
Extract enetc_int_vector_init() and enetc_int_vector_destroy() from
enetc_alloc_msix() so that the code is more concise and readable. In
addition, slightly different from before, the cleanup helper function
is used to manage dynamically allocated memory resources.
Signed-off-by: Clark Wang <xiaoning.wang@nxp.com>
Signed-off-by: Wei Fang <wei.fang@nxp.com>
---
v2 changes:
This patch is separated from v1 patch 9 ("net: enetc: optimize the
allocation of tx_bdr"). Separate enetc_int_vector_init() from the
original patch. In addition, add new help function
enetc_int_vector_destroy().
v3 changes:
1. Add the description of cleanup helper function used
enetc_int_vector_init() to the commit message.
2. Fix the 'err' uninitialized issue when enetc_int_vector_init()
returns error.
---
drivers/net/ethernet/freescale/enetc/enetc.c | 172 ++++++++++---------
1 file changed, 87 insertions(+), 85 deletions(-)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 032d8eadd003..bd725561b8a2 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -2965,6 +2965,87 @@ int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
}
EXPORT_SYMBOL_GPL(enetc_ioctl);
+static int enetc_int_vector_init(struct enetc_ndev_priv *priv, int i,
+ int v_tx_rings)
+{
+ struct enetc_int_vector *v __free(kfree);
+ struct enetc_bdr *bdr;
+ int j, err;
+
+ v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL);
+ if (!v)
+ return -ENOMEM;
+
+ bdr = &v->rx_ring;
+ bdr->index = i;
+ bdr->ndev = priv->ndev;
+ bdr->dev = priv->dev;
+ bdr->bd_count = priv->rx_bd_count;
+ bdr->buffer_offset = ENETC_RXB_PAD;
+ priv->rx_ring[i] = bdr;
+
+ err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0);
+ if (err)
+ return err;
+
+ err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq,
+ MEM_TYPE_PAGE_SHARED, NULL);
+ if (err) {
+ xdp_rxq_info_unreg(&bdr->xdp.rxq);
+ return err;
+ }
+
+ /* init defaults for adaptive IC */
+ if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) {
+ v->rx_ictt = 0x1;
+ v->rx_dim_en = true;
+ }
+
+ INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
+ netif_napi_add(priv->ndev, &v->napi, enetc_poll);
+ v->count_tx_rings = v_tx_rings;
+
+ for (j = 0; j < v_tx_rings; j++) {
+ int idx;
+
+ /* default tx ring mapping policy */
+ idx = priv->bdr_int_num * j + i;
+ __set_bit(idx, &v->tx_rings_map);
+ bdr = &v->tx_ring[j];
+ bdr->index = idx;
+ bdr->ndev = priv->ndev;
+ bdr->dev = priv->dev;
+ bdr->bd_count = priv->tx_bd_count;
+ priv->tx_ring[idx] = bdr;
+ }
+
+ priv->int_vector[i] = no_free_ptr(v);
+
+ return 0;
+}
+
+static void enetc_int_vector_destroy(struct enetc_ndev_priv *priv, int i)
+{
+ struct enetc_int_vector *v = priv->int_vector[i];
+ struct enetc_bdr *rx_ring = &v->rx_ring;
+ int j, tx_ring_index;
+
+ xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
+ xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
+ netif_napi_del(&v->napi);
+ cancel_work_sync(&v->rx_dim.work);
+
+ priv->rx_ring[i] = NULL;
+
+ for (j = 0; j < v->count_tx_rings; j++) {
+ tx_ring_index = priv->bdr_int_num * j + i;
+ priv->tx_ring[tx_ring_index] = NULL;
+ }
+
+ kfree(v);
+ priv->int_vector[i] = NULL;
+}
+
int enetc_alloc_msix(struct enetc_ndev_priv *priv)
{
struct pci_dev *pdev = priv->si->pdev;
@@ -2987,62 +3068,9 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv)
v_tx_rings = priv->num_tx_rings / priv->bdr_int_num;
for (i = 0; i < priv->bdr_int_num; i++) {
- struct enetc_int_vector *v;
- struct enetc_bdr *bdr;
- int j;
-
- v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL);
- if (!v) {
- err = -ENOMEM;
- goto fail;
- }
-
- priv->int_vector[i] = v;
-
- bdr = &v->rx_ring;
- bdr->index = i;
- bdr->ndev = priv->ndev;
- bdr->dev = priv->dev;
- bdr->bd_count = priv->rx_bd_count;
- bdr->buffer_offset = ENETC_RXB_PAD;
- priv->rx_ring[i] = bdr;
-
- err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0);
- if (err) {
- kfree(v);
- goto fail;
- }
-
- err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq,
- MEM_TYPE_PAGE_SHARED, NULL);
- if (err) {
- xdp_rxq_info_unreg(&bdr->xdp.rxq);
- kfree(v);
+ err = enetc_int_vector_init(priv, i, v_tx_rings);
+ if (err)
goto fail;
- }
-
- /* init defaults for adaptive IC */
- if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) {
- v->rx_ictt = 0x1;
- v->rx_dim_en = true;
- }
- INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
- netif_napi_add(priv->ndev, &v->napi, enetc_poll);
- v->count_tx_rings = v_tx_rings;
-
- for (j = 0; j < v_tx_rings; j++) {
- int idx;
-
- /* default tx ring mapping policy */
- idx = priv->bdr_int_num * j + i;
- __set_bit(idx, &v->tx_rings_map);
- bdr = &v->tx_ring[j];
- bdr->index = idx;
- bdr->ndev = priv->ndev;
- bdr->dev = priv->dev;
- bdr->bd_count = priv->tx_bd_count;
- priv->tx_ring[idx] = bdr;
- }
}
num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
@@ -3062,16 +3090,8 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv)
return 0;
fail:
- while (i--) {
- struct enetc_int_vector *v = priv->int_vector[i];
- struct enetc_bdr *rx_ring = &v->rx_ring;
-
- xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
- xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
- netif_napi_del(&v->napi);
- cancel_work_sync(&v->rx_dim.work);
- kfree(v);
- }
+ while (i--)
+ enetc_int_vector_destroy(priv, i);
pci_free_irq_vectors(pdev);
@@ -3083,26 +3103,8 @@ void enetc_free_msix(struct enetc_ndev_priv *priv)
{
int i;
- for (i = 0; i < priv->bdr_int_num; i++) {
- struct enetc_int_vector *v = priv->int_vector[i];
- struct enetc_bdr *rx_ring = &v->rx_ring;
-
- xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
- xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
- netif_napi_del(&v->napi);
- cancel_work_sync(&v->rx_dim.work);
- }
-
- for (i = 0; i < priv->num_rx_rings; i++)
- priv->rx_ring[i] = NULL;
-
- for (i = 0; i < priv->num_tx_rings; i++)
- priv->tx_ring[i] = NULL;
-
- for (i = 0; i < priv->bdr_int_num; i++) {
- kfree(priv->int_vector[i]);
- priv->int_vector[i] = NULL;
- }
+ for (i = 0; i < priv->bdr_int_num; i++)
+ enetc_int_vector_destroy(priv, i);
/* disable all MSIX for this device */
pci_free_irq_vectors(priv->si->pdev);
--
2.34.1
On Thu, Oct 17, 2024 at 03:46:34PM +0800, Wei Fang wrote:
> From: Clark Wang <xiaoning.wang@nxp.com>
>
> Extract enetc_int_vector_init() and enetc_int_vector_destroy() from
> enetc_alloc_msix() so that the code is more concise and readable. In
> addition, slightly different from before, the cleanup helper function
> is used to manage dynamically allocated memory resources.
>
> Signed-off-by: Clark Wang <xiaoning.wang@nxp.com>
> Signed-off-by: Wei Fang <wei.fang@nxp.com>
Reviewed-by: Frank Li <Frank.Li@nxp.com>
> ---
> v2 changes:
> This patch is separated from v1 patch 9 ("net: enetc: optimize the
> allocation of tx_bdr"). Separate enetc_int_vector_init() from the
> original patch. In addition, add new help function
> enetc_int_vector_destroy().
> v3 changes:
> 1. Add the description of cleanup helper function used
> enetc_int_vector_init() to the commit message.
> 2. Fix the 'err' uninitialized issue when enetc_int_vector_init()
> returns error.
> ---
> drivers/net/ethernet/freescale/enetc/enetc.c | 172 ++++++++++---------
> 1 file changed, 87 insertions(+), 85 deletions(-)
>
> diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
> index 032d8eadd003..bd725561b8a2 100644
> --- a/drivers/net/ethernet/freescale/enetc/enetc.c
> +++ b/drivers/net/ethernet/freescale/enetc/enetc.c
> @@ -2965,6 +2965,87 @@ int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
> }
> EXPORT_SYMBOL_GPL(enetc_ioctl);
>
> +static int enetc_int_vector_init(struct enetc_ndev_priv *priv, int i,
> + int v_tx_rings)
> +{
> + struct enetc_int_vector *v __free(kfree);
> + struct enetc_bdr *bdr;
> + int j, err;
> +
> + v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL);
> + if (!v)
> + return -ENOMEM;
> +
> + bdr = &v->rx_ring;
> + bdr->index = i;
> + bdr->ndev = priv->ndev;
> + bdr->dev = priv->dev;
> + bdr->bd_count = priv->rx_bd_count;
> + bdr->buffer_offset = ENETC_RXB_PAD;
> + priv->rx_ring[i] = bdr;
> +
> + err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0);
> + if (err)
> + return err;
> +
> + err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq,
> + MEM_TYPE_PAGE_SHARED, NULL);
> + if (err) {
> + xdp_rxq_info_unreg(&bdr->xdp.rxq);
> + return err;
> + }
> +
> + /* init defaults for adaptive IC */
> + if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) {
> + v->rx_ictt = 0x1;
> + v->rx_dim_en = true;
> + }
> +
> + INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
> + netif_napi_add(priv->ndev, &v->napi, enetc_poll);
> + v->count_tx_rings = v_tx_rings;
> +
> + for (j = 0; j < v_tx_rings; j++) {
> + int idx;
> +
> + /* default tx ring mapping policy */
> + idx = priv->bdr_int_num * j + i;
> + __set_bit(idx, &v->tx_rings_map);
> + bdr = &v->tx_ring[j];
> + bdr->index = idx;
> + bdr->ndev = priv->ndev;
> + bdr->dev = priv->dev;
> + bdr->bd_count = priv->tx_bd_count;
> + priv->tx_ring[idx] = bdr;
> + }
> +
> + priv->int_vector[i] = no_free_ptr(v);
> +
> + return 0;
> +}
> +
> +static void enetc_int_vector_destroy(struct enetc_ndev_priv *priv, int i)
> +{
> + struct enetc_int_vector *v = priv->int_vector[i];
> + struct enetc_bdr *rx_ring = &v->rx_ring;
> + int j, tx_ring_index;
> +
> + xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
> + xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
> + netif_napi_del(&v->napi);
> + cancel_work_sync(&v->rx_dim.work);
> +
> + priv->rx_ring[i] = NULL;
> +
> + for (j = 0; j < v->count_tx_rings; j++) {
> + tx_ring_index = priv->bdr_int_num * j + i;
> + priv->tx_ring[tx_ring_index] = NULL;
> + }
> +
> + kfree(v);
> + priv->int_vector[i] = NULL;
> +}
> +
> int enetc_alloc_msix(struct enetc_ndev_priv *priv)
> {
> struct pci_dev *pdev = priv->si->pdev;
> @@ -2987,62 +3068,9 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv)
> v_tx_rings = priv->num_tx_rings / priv->bdr_int_num;
>
> for (i = 0; i < priv->bdr_int_num; i++) {
> - struct enetc_int_vector *v;
> - struct enetc_bdr *bdr;
> - int j;
> -
> - v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL);
> - if (!v) {
> - err = -ENOMEM;
> - goto fail;
> - }
> -
> - priv->int_vector[i] = v;
> -
> - bdr = &v->rx_ring;
> - bdr->index = i;
> - bdr->ndev = priv->ndev;
> - bdr->dev = priv->dev;
> - bdr->bd_count = priv->rx_bd_count;
> - bdr->buffer_offset = ENETC_RXB_PAD;
> - priv->rx_ring[i] = bdr;
> -
> - err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0);
> - if (err) {
> - kfree(v);
> - goto fail;
> - }
> -
> - err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq,
> - MEM_TYPE_PAGE_SHARED, NULL);
> - if (err) {
> - xdp_rxq_info_unreg(&bdr->xdp.rxq);
> - kfree(v);
> + err = enetc_int_vector_init(priv, i, v_tx_rings);
> + if (err)
> goto fail;
> - }
> -
> - /* init defaults for adaptive IC */
> - if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) {
> - v->rx_ictt = 0x1;
> - v->rx_dim_en = true;
> - }
> - INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
> - netif_napi_add(priv->ndev, &v->napi, enetc_poll);
> - v->count_tx_rings = v_tx_rings;
> -
> - for (j = 0; j < v_tx_rings; j++) {
> - int idx;
> -
> - /* default tx ring mapping policy */
> - idx = priv->bdr_int_num * j + i;
> - __set_bit(idx, &v->tx_rings_map);
> - bdr = &v->tx_ring[j];
> - bdr->index = idx;
> - bdr->ndev = priv->ndev;
> - bdr->dev = priv->dev;
> - bdr->bd_count = priv->tx_bd_count;
> - priv->tx_ring[idx] = bdr;
> - }
> }
>
> num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
> @@ -3062,16 +3090,8 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv)
> return 0;
>
> fail:
> - while (i--) {
> - struct enetc_int_vector *v = priv->int_vector[i];
> - struct enetc_bdr *rx_ring = &v->rx_ring;
> -
> - xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
> - xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
> - netif_napi_del(&v->napi);
> - cancel_work_sync(&v->rx_dim.work);
> - kfree(v);
> - }
> + while (i--)
> + enetc_int_vector_destroy(priv, i);
>
> pci_free_irq_vectors(pdev);
>
> @@ -3083,26 +3103,8 @@ void enetc_free_msix(struct enetc_ndev_priv *priv)
> {
> int i;
>
> - for (i = 0; i < priv->bdr_int_num; i++) {
> - struct enetc_int_vector *v = priv->int_vector[i];
> - struct enetc_bdr *rx_ring = &v->rx_ring;
> -
> - xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq);
> - xdp_rxq_info_unreg(&rx_ring->xdp.rxq);
> - netif_napi_del(&v->napi);
> - cancel_work_sync(&v->rx_dim.work);
> - }
> -
> - for (i = 0; i < priv->num_rx_rings; i++)
> - priv->rx_ring[i] = NULL;
> -
> - for (i = 0; i < priv->num_tx_rings; i++)
> - priv->tx_ring[i] = NULL;
> -
> - for (i = 0; i < priv->bdr_int_num; i++) {
> - kfree(priv->int_vector[i]);
> - priv->int_vector[i] = NULL;
> - }
> + for (i = 0; i < priv->bdr_int_num; i++)
> + enetc_int_vector_destroy(priv, i);
>
> /* disable all MSIX for this device */
> pci_free_irq_vectors(priv->si->pdev);
> --
> 2.34.1
>
© 2016 - 2026 Red Hat, Inc.