[PATCH net-next 3/3] Revert "bnxt_en: bring back rtnl_lock() in the bnxt_open() path"

Stanislav Fomichev posted 3 patches 7 months ago
There is a newer version of this series
[PATCH net-next 3/3] Revert "bnxt_en: bring back rtnl_lock() in the bnxt_open() path"
Posted by Stanislav Fomichev 7 months ago
This reverts commit 325eb217e41fa14f307c7cc702bd18d0bb38fe84.

udp_tunnel infra doesn't need RTNL, should be safe to get back
to only netdev instance lock.

Cc: Michael Chan <michael.chan@broadcom.com>
Signed-off-by: Stanislav Fomichev <stfomichev@gmail.com>
---
 drivers/net/ethernet/broadcom/bnxt/bnxt.c | 36 +++++------------------
 1 file changed, 7 insertions(+), 29 deletions(-)

diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index a3dadde65b8d..1da208c36572 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -14055,28 +14055,13 @@ static void bnxt_unlock_sp(struct bnxt *bp)
 	netdev_unlock(bp->dev);
 }
 
-/* Same as bnxt_lock_sp() with additional rtnl_lock */
-static void bnxt_rtnl_lock_sp(struct bnxt *bp)
-{
-	clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
-	rtnl_lock();
-	netdev_lock(bp->dev);
-}
-
-static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
-{
-	set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
-	netdev_unlock(bp->dev);
-	rtnl_unlock();
-}
-
 /* Only called from bnxt_sp_task() */
 static void bnxt_reset(struct bnxt *bp, bool silent)
 {
-	bnxt_rtnl_lock_sp(bp);
+	bnxt_lock_sp(bp);
 	if (test_bit(BNXT_STATE_OPEN, &bp->state))
 		bnxt_reset_task(bp, silent);
-	bnxt_rtnl_unlock_sp(bp);
+	bnxt_unlock_sp(bp);
 }
 
 /* Only called from bnxt_sp_task() */
@@ -14084,9 +14069,9 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
 {
 	int i;
 
-	bnxt_rtnl_lock_sp(bp);
+	bnxt_lock_sp(bp);
 	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
-		bnxt_rtnl_unlock_sp(bp);
+		bnxt_unlock_sp(bp);
 		return;
 	}
 	/* Disable and flush TPA before resetting the RX ring */
@@ -14125,7 +14110,7 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
 	}
 	if (bp->flags & BNXT_FLAG_TPA)
 		bnxt_set_tpa(bp, true);
-	bnxt_rtnl_unlock_sp(bp);
+	bnxt_unlock_sp(bp);
 }
 
 static void bnxt_fw_fatal_close(struct bnxt *bp)
@@ -15017,17 +15002,15 @@ static void bnxt_fw_reset_task(struct work_struct *work)
 		bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
 		fallthrough;
 	case BNXT_FW_RESET_STATE_OPENING:
-		while (!rtnl_trylock()) {
+		while (!netdev_trylock(bp->dev)) {
 			bnxt_queue_fw_reset_work(bp, HZ / 10);
 			return;
 		}
-		netdev_lock(bp->dev);
 		rc = bnxt_open(bp->dev);
 		if (rc) {
 			netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
 			bnxt_fw_reset_abort(bp, rc);
 			netdev_unlock(bp->dev);
-			rtnl_unlock();
 			goto ulp_start;
 		}
 
@@ -15047,7 +15030,6 @@ static void bnxt_fw_reset_task(struct work_struct *work)
 			bnxt_dl_health_fw_status_update(bp, true);
 		}
 		netdev_unlock(bp->dev);
-		rtnl_unlock();
 		bnxt_ulp_start(bp, 0);
 		bnxt_reenable_sriov(bp);
 		netdev_lock(bp->dev);
@@ -15996,7 +15978,7 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
 		   rc);
 	napi_enable_locked(&bnapi->napi);
 	bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
-	netif_close(dev);
+	bnxt_reset_task(bp, true);
 	return rc;
 }
 
@@ -16812,7 +16794,6 @@ static int bnxt_resume(struct device *device)
 	struct bnxt *bp = netdev_priv(dev);
 	int rc = 0;
 
-	rtnl_lock();
 	netdev_lock(dev);
 	rc = pci_enable_device(bp->pdev);
 	if (rc) {
@@ -16857,7 +16838,6 @@ static int bnxt_resume(struct device *device)
 
 resume_exit:
 	netdev_unlock(bp->dev);
-	rtnl_unlock();
 	bnxt_ulp_start(bp, rc);
 	if (!rc)
 		bnxt_reenable_sriov(bp);
@@ -17023,7 +17003,6 @@ static void bnxt_io_resume(struct pci_dev *pdev)
 	int err;
 
 	netdev_info(bp->dev, "PCI Slot Resume\n");
-	rtnl_lock();
 	netdev_lock(netdev);
 
 	err = bnxt_hwrm_func_qcaps(bp);
@@ -17041,7 +17020,6 @@ static void bnxt_io_resume(struct pci_dev *pdev)
 		netif_device_attach(netdev);
 
 	netdev_unlock(netdev);
-	rtnl_unlock();
 	bnxt_ulp_start(bp, err);
 	if (!err)
 		bnxt_reenable_sriov(bp);
-- 
2.49.0
RE: [Intel-wired-lan] [PATCH net-next 3/3] Revert "bnxt_en: bring back rtnl_lock() in the bnxt_open() path"
Posted by Loktionov, Aleksandr 7 months ago

> -----Original Message-----
> From: Intel-wired-lan <intel-wired-lan-bounces@osuosl.org> On Behalf
> Of Stanislav Fomichev
> Sent: Tuesday, May 20, 2025 10:36 PM
> To: netdev@vger.kernel.org
> Cc: davem@davemloft.net; edumazet@google.com; kuba@kernel.org;
> pabeni@redhat.com; skalluru@marvell.com; manishc@marvell.com;
> andrew+netdev@lunn.ch; michael.chan@broadcom.com;
> pavan.chebbi@broadcom.com; ajit.khaparde@broadcom.com;
> sriharsha.basavapatna@broadcom.com; somnath.kotur@broadcom.com;
> Nguyen, Anthony L <anthony.l.nguyen@intel.com>; Kitszel, Przemyslaw
> <przemyslaw.kitszel@intel.com>; tariqt@nvidia.com; saeedm@nvidia.com;
> louis.peens@corigine.com; shshaikh@marvell.com; GR-Linux-NIC-
> Dev@marvell.com; ecree.xilinx@gmail.com; horms@kernel.org;
> dsahern@kernel.org; ruanjinjie@huawei.com; mheib@redhat.com;
> stfomichev@gmail.com; linux-kernel@vger.kernel.org; intel-wired-
> lan@lists.osuosl.org; linux-rdma@vger.kernel.org; oss-
> drivers@corigine.com; linux-net-drivers@amd.com; leon@kernel.org
> Subject: [Intel-wired-lan] [PATCH net-next 3/3] Revert "bnxt_en: bring
> back rtnl_lock() in the bnxt_open() path"
> 
> This reverts commit 325eb217e41fa14f307c7cc702bd18d0bb38fe84.
> 
> udp_tunnel infra doesn't need RTNL, should be safe to get back to only
> netdev instance lock.
> 
> Cc: Michael Chan <michael.chan@broadcom.com>
> Signed-off-by: Stanislav Fomichev <stfomichev@gmail.com>
Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>

> ---
>  drivers/net/ethernet/broadcom/bnxt/bnxt.c | 36 +++++-----------------
> -
>  1 file changed, 7 insertions(+), 29 deletions(-)
> 
> diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> index a3dadde65b8d..1da208c36572 100644
> --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> @@ -14055,28 +14055,13 @@ static void bnxt_unlock_sp(struct bnxt *bp)
>  	netdev_unlock(bp->dev);
>  }
> 
> -/* Same as bnxt_lock_sp() with additional rtnl_lock */ -static void
> bnxt_rtnl_lock_sp(struct bnxt *bp) -{
> -	clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
> -	rtnl_lock();
> -	netdev_lock(bp->dev);
> -}
> -
> -static void bnxt_rtnl_unlock_sp(struct bnxt *bp) -{
> -	set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
> -	netdev_unlock(bp->dev);
> -	rtnl_unlock();
> -}
> -
>  /* Only called from bnxt_sp_task() */
>  static void bnxt_reset(struct bnxt *bp, bool silent)  {
> -	bnxt_rtnl_lock_sp(bp);
> +	bnxt_lock_sp(bp);
>  	if (test_bit(BNXT_STATE_OPEN, &bp->state))
>  		bnxt_reset_task(bp, silent);
> -	bnxt_rtnl_unlock_sp(bp);
> +	bnxt_unlock_sp(bp);
>  }
> 
>  /* Only called from bnxt_sp_task() */
> @@ -14084,9 +14069,9 @@ static void bnxt_rx_ring_reset(struct bnxt
> *bp)  {
>  	int i;
> 
> -	bnxt_rtnl_lock_sp(bp);
> +	bnxt_lock_sp(bp);
>  	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
> -		bnxt_rtnl_unlock_sp(bp);
> +		bnxt_unlock_sp(bp);
>  		return;
>  	}
>  	/* Disable and flush TPA before resetting the RX ring */ @@ -
> 14125,7 +14110,7 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
>  	}
>  	if (bp->flags & BNXT_FLAG_TPA)
>  		bnxt_set_tpa(bp, true);
> -	bnxt_rtnl_unlock_sp(bp);
> +	bnxt_unlock_sp(bp);
>  }
> 
>  static void bnxt_fw_fatal_close(struct bnxt *bp) @@ -15017,17
> +15002,15 @@ static void bnxt_fw_reset_task(struct work_struct *work)
>  		bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
>  		fallthrough;
>  	case BNXT_FW_RESET_STATE_OPENING:
> -		while (!rtnl_trylock()) {
> +		while (!netdev_trylock(bp->dev)) {
>  			bnxt_queue_fw_reset_work(bp, HZ / 10);
>  			return;
>  		}
> -		netdev_lock(bp->dev);
>  		rc = bnxt_open(bp->dev);
>  		if (rc) {
>  			netdev_err(bp->dev, "bnxt_open() failed during FW
> reset\n");
>  			bnxt_fw_reset_abort(bp, rc);
>  			netdev_unlock(bp->dev);
> -			rtnl_unlock();
>  			goto ulp_start;
>  		}
> 
> @@ -15047,7 +15030,6 @@ static void bnxt_fw_reset_task(struct
> work_struct *work)
>  			bnxt_dl_health_fw_status_update(bp, true);
>  		}
>  		netdev_unlock(bp->dev);
> -		rtnl_unlock();
>  		bnxt_ulp_start(bp, 0);
>  		bnxt_reenable_sriov(bp);
>  		netdev_lock(bp->dev);
> @@ -15996,7 +15978,7 @@ static int bnxt_queue_start(struct net_device
> *dev, void *qmem, int idx)
>  		   rc);
>  	napi_enable_locked(&bnapi->napi);
>  	bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
> -	netif_close(dev);
> +	bnxt_reset_task(bp, true);
>  	return rc;
>  }
> 
> @@ -16812,7 +16794,6 @@ static int bnxt_resume(struct device *device)
>  	struct bnxt *bp = netdev_priv(dev);
>  	int rc = 0;
> 
> -	rtnl_lock();
>  	netdev_lock(dev);
>  	rc = pci_enable_device(bp->pdev);
>  	if (rc) {
> @@ -16857,7 +16838,6 @@ static int bnxt_resume(struct device *device)
> 
>  resume_exit:
>  	netdev_unlock(bp->dev);
> -	rtnl_unlock();
>  	bnxt_ulp_start(bp, rc);
>  	if (!rc)
>  		bnxt_reenable_sriov(bp);
> @@ -17023,7 +17003,6 @@ static void bnxt_io_resume(struct pci_dev
> *pdev)
>  	int err;
> 
>  	netdev_info(bp->dev, "PCI Slot Resume\n");
> -	rtnl_lock();
>  	netdev_lock(netdev);
> 
>  	err = bnxt_hwrm_func_qcaps(bp);
> @@ -17041,7 +17020,6 @@ static void bnxt_io_resume(struct pci_dev
> *pdev)
>  		netif_device_attach(netdev);
> 
>  	netdev_unlock(netdev);
> -	rtnl_unlock();
>  	bnxt_ulp_start(bp, err);
>  	if (!err)
>  		bnxt_reenable_sriov(bp);
> --
> 2.49.0