From the perspective of PREEMPT_RT, igb_msg_task() invokes functions
that are a mix of IRQ-safe and non-IRQ-safe operations.
To address this, we separate igb_msg_task() into distinct IRQ-safe and
preemptible-safe components. This is a preparatory step for upcoming
commits, where the igb_msix_other interrupt handler will be split into
IRQ and threaded handlers, each invoking the appropriate part of the
newly divided igb_msg_task().
Signed-off-by: Wander Lairson Costa <wander@redhat.com>
---
drivers/net/ethernet/intel/igb/igb_main.c | 88 +++++++++++++++++++++--
1 file changed, 81 insertions(+), 7 deletions(-)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 9b4235ec226df..5828831fd29c2 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -149,6 +149,8 @@ static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
static void igb_restore_vlan(struct igb_adapter *);
static void igb_rar_set_index(struct igb_adapter *, u32);
static void igb_ping_all_vfs(struct igb_adapter *);
+static void igb_msg_task_irq_safe(struct igb_adapter *adapter);
+static void igb_msg_task_preemptible_safe(struct igb_adapter *adapter);
static void igb_msg_task(struct igb_adapter *);
static void igb_vmm_control(struct igb_adapter *);
static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
@@ -3681,6 +3683,30 @@ static __always_inline void vfs_unlock_irqrestore(struct igb_adapter *adapter,
raw_spin_unlock_irqrestore(&adapter->raw_vfs_lock, flags);
spin_unlock_irqrestore(&adapter->vfs_lock, flags);
}
+
+static __always_inline void vfs_spin_lock_irqsave(struct igb_adapter *adapter,
+ unsigned long *flags)
+{
+ spin_lock_irqsave(&adapter->vfs_lock, *flags);
+}
+
+static __always_inline void vfs_spin_unlock_irqrestore(struct igb_adapter *adapter,
+ unsigned long flags)
+{
+ spin_unlock_irqrestore(&adapter->vfs_lock, flags);
+}
+
+static __always_inline void vfs_raw_spin_lock_irqsave(struct igb_adapter *adapter,
+ unsigned long *flags)
+{
+ raw_spin_lock_irqsave(&adapter->raw_vfs_lock, *flags);
+}
+
+static __always_inline void vfs_raw_spin_unlock_irqrestore(struct igb_adapter *adapter,
+ unsigned long flags)
+{
+ raw_spin_unlock_irqrestore(&adapter->raw_vfs_lock, flags);
+}
#else
static __always_inline void vfs_lock_init(struct igb_adapter *adapter)
{
@@ -3696,6 +3722,30 @@ static __always_inline void vfs_unlock_irqrestore(struct igb_adapter *adapter, u
{
spin_unlock_irqrestore(&adapter->vfs_lock, flags);
}
+
+static __always_inline void vfs_spin_lock_irqsave(struct igb_adapter *adapter,
+ unsigned long *flags)
+{
+ spin_lock_irqsave(&adapter->vfs_lock, *flags);
+}
+
+static __always_inline void vfs_spin_unlock_irqrestore(struct igb_adapter *adapter,
+ unsigned long flags)
+{
+ spin_unlock_irqrestore(&adapter->vfs_lock, flags);
+}
+
+static __always_inline void vfs_raw_spin_lock_irqsave(struct igb_adapter *adapter,
+ unsigned long *flags)
+{
+ spin_lock_irqsave(&adapter->vfs_lock, *flags);
+}
+
+static __always_inline void vfs_raw_spin_unlock_irqrestore(struct igb_adapter *adapter,
+ unsigned long flags)
+{
+ spin_unlock_irqrestore(&adapter->vfs_lock, flags);
+}
#endif
#ifdef CONFIG_PCI_IOV
@@ -8070,27 +8120,51 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
igb_unlock_mbx(hw, vf);
}
-static void igb_msg_task(struct igb_adapter *adapter)
+/*
+ * Note: the split of irq and preempible safe parts of igb_msg_task()
+ * only makes sense under PREEMPT_RT.
+ * The root cause of igb_rcv_msg_from_vf() is not IRQ safe is because
+ * it calls kcalloc with GFP_ATOMIC, but GFP_ATOMIC is not IRQ safe
+ * in PREEMPT_RT.
+ */
+static void igb_msg_task_irq_safe(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
unsigned long flags;
u32 vf;
- vfs_lock_irqsave(adapter, &flags);
+ vfs_raw_spin_lock_irqsave(adapter, &flags);
for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
/* process any reset requests */
if (!igb_check_for_rst(hw, vf))
igb_vf_reset_event(adapter, vf);
- /* process any messages pending */
- if (!igb_check_for_msg(hw, vf))
- igb_rcv_msg_from_vf(adapter, vf);
-
/* process any acks */
if (!igb_check_for_ack(hw, vf))
igb_rcv_ack_from_vf(adapter, vf);
}
- vfs_unlock_irqrestore(adapter, flags);
+ vfs_raw_spin_unlock_irqrestore(adapter, flags);
+}
+
+static void igb_msg_task_preemptible_safe(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ unsigned long flags;
+ u32 vf;
+
+ vfs_spin_lock_irqsave(adapter, &flags);
+ for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
+ /* process any messages pending */
+ if (!igb_check_for_msg(hw, vf))
+ igb_rcv_msg_from_vf(adapter, vf);
+ }
+ vfs_spin_unlock_irqrestore(adapter, flags);
+}
+
+static __always_inline void igb_msg_task(struct igb_adapter *adapter)
+{
+ igb_msg_task_irq_safe(adapter);
+ igb_msg_task_preemptible_safe(adapter);
}
/**
--
2.47.0
> -----Original Message----- > From: Intel-wired-lan <intel-wired-lan-bounces@osuosl.org> On Behalf Of > Wander Lairson Costa > Sent: Wednesday, December 4, 2024 12:42 PM > To: Nguyen, Anthony L <anthony.l.nguyen@intel.com>; Kitszel, Przemyslaw > <przemyslaw.kitszel@intel.com>; Andrew Lunn <andrew+netdev@lunn.ch>; > David S. Miller <davem@davemloft.net>; Eric Dumazet > <edumazet@google.com>; Jakub Kicinski <kuba@kernel.org>; Paolo Abeni > <pabeni@redhat.com>; Sebastian Andrzej Siewior <bigeasy@linutronix.de>; Clark > Williams <clrkwllms@kernel.org>; Steven Rostedt <rostedt@goodmis.org>; Auke > Kok <auke-jan.h.kok@intel.com>; Jeff Garzik <jgarzik@redhat.com>; moderated > list:INTEL ETHERNET DRIVERS <intel-wired-lan@lists.osuosl.org>; open > list:NETWORKING DRIVERS <netdev@vger.kernel.org>; open list <linux- > kernel@vger.kernel.org>; open list:Real-time Linux > (PREEMPT_RT):Keyword:PREEMPT_RT <linux-rt-devel@lists.linux.dev> > Cc: Wander Lairson Costa <wander@redhat.com> > Subject: [Intel-wired-lan] [PATCH iwl-net 3/4] igb: split igb_msg_task() > > From the perspective of PREEMPT_RT, igb_msg_task() invokes functions that are > a mix of IRQ-safe and non-IRQ-safe operations. > > To address this, we separate igb_msg_task() into distinct IRQ-safe and > preemptible-safe components. This is a preparatory step for upcoming commits, > where the igb_msix_other interrupt handler will be split into IRQ and threaded > handlers, each invoking the appropriate part of the newly divided igb_msg_task(). > > Signed-off-by: Wander Lairson Costa <wander@redhat.com> > --- > drivers/net/ethernet/intel/igb/igb_main.c | 88 +++++++++++++++++++++-- > 1 file changed, 81 insertions(+), 7 deletions(-) > > diff --git a/drivers/net/ethernet/intel/igb/igb_main.c > b/drivers/net/ethernet/intel/igb/igb_main.c > index 9b4235ec226df..5828831fd29c2 100644 > --- a/drivers/net/ethernet/intel/igb/igb_main.c > +++ b/drivers/net/ethernet/intel/igb/igb_main.c > @@ -149,6 +149,8 @@ static int igb_vlan_rx_kill_vid(struct net_device *, Tested-by: Rafal Romanowski <rafal.romanowski@intel.com>
© 2016 - 2025 Red Hat, Inc.