The only generic interface to execute asynchronously in the BH context is
tasklet; however, it's marked deprecated and has some design flaws. To
replace tasklets, BH workqueue support was recently added. A BH workqueue
behaves similarly to regular workqueues except that the queued work items
are executed in the BH context.
This patch converts drivers/infiniband/* from tasklet to BH workqueue.
Based on the work done by Tejun Heo <tj@kernel.org>
Branch: https://git.kernel.org/pub/scm/linux/kernel/git/tj/wq.git for-6.10
Signed-off-by: Allen Pais <allen.lkml@gmail.com>
---
drivers/char/ipmi/ipmi_msghandler.c | 30 ++++++++++++++---------------
1 file changed, 15 insertions(+), 15 deletions(-)
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index b0eedc4595b3..fce2a2dbdc82 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -36,12 +36,13 @@
#include <linux/nospec.h>
#include <linux/vmalloc.h>
#include <linux/delay.h>
+#include <linux/workqueue.h>
#define IPMI_DRIVER_VERSION "39.2"
static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
static int ipmi_init_msghandler(void);
-static void smi_recv_tasklet(struct tasklet_struct *t);
+static void smi_recv_work(struct work_struct *t);
static void handle_new_recv_msgs(struct ipmi_smi *intf);
static void need_waiter(struct ipmi_smi *intf);
static int handle_one_recv_msg(struct ipmi_smi *intf,
@@ -498,13 +499,13 @@ struct ipmi_smi {
/*
* Messages queued for delivery. If delivery fails (out of memory
* for instance), They will stay in here to be processed later in a
- * periodic timer interrupt. The tasklet is for handling received
+ * periodic timer interrupt. The work is for handling received
* messages directly from the handler.
*/
spinlock_t waiting_rcv_msgs_lock;
struct list_head waiting_rcv_msgs;
atomic_t watchdog_pretimeouts_to_deliver;
- struct tasklet_struct recv_tasklet;
+ struct work_struct recv_work;
spinlock_t xmit_msgs_lock;
struct list_head xmit_msgs;
@@ -704,7 +705,7 @@ static void clean_up_interface_data(struct ipmi_smi *intf)
struct cmd_rcvr *rcvr, *rcvr2;
struct list_head list;
- tasklet_kill(&intf->recv_tasklet);
+ cancel_work_sync(&intf->recv_work);
free_smi_msg_list(&intf->waiting_rcv_msgs);
free_recv_msg_list(&intf->waiting_events);
@@ -1319,7 +1320,7 @@ static void free_user(struct kref *ref)
{
struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
- /* SRCU cleanup must happen in task context. */
+ /* SRCU cleanup must happen in work context. */
queue_work(remove_work_wq, &user->remove_work);
}
@@ -3605,8 +3606,7 @@ int ipmi_add_smi(struct module *owner,
intf->curr_seq = 0;
spin_lock_init(&intf->waiting_rcv_msgs_lock);
INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
- tasklet_setup(&intf->recv_tasklet,
- smi_recv_tasklet);
+ INIT_WORK(&intf->recv_work, smi_recv_work);
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
spin_lock_init(&intf->xmit_msgs_lock);
INIT_LIST_HEAD(&intf->xmit_msgs);
@@ -4779,7 +4779,7 @@ static void handle_new_recv_msgs(struct ipmi_smi *intf)
* To preserve message order, quit if we
* can't handle a message. Add the message
* back at the head, this is safe because this
- * tasklet is the only thing that pulls the
+ * work is the only thing that pulls the
* messages.
*/
list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
@@ -4812,10 +4812,10 @@ static void handle_new_recv_msgs(struct ipmi_smi *intf)
}
}
-static void smi_recv_tasklet(struct tasklet_struct *t)
+static void smi_recv_work(struct work_struct *t)
{
unsigned long flags = 0; /* keep us warning-free. */
- struct ipmi_smi *intf = from_tasklet(intf, t, recv_tasklet);
+ struct ipmi_smi *intf = from_work(intf, t, recv_work);
int run_to_completion = intf->run_to_completion;
struct ipmi_smi_msg *newmsg = NULL;
@@ -4866,7 +4866,7 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf,
/*
* To preserve message order, we keep a queue and deliver from
- * a tasklet.
+ * a work.
*/
if (!run_to_completion)
spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
@@ -4887,9 +4887,9 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf,
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
if (run_to_completion)
- smi_recv_tasklet(&intf->recv_tasklet);
+ smi_recv_work(&intf->recv_work);
else
- tasklet_schedule(&intf->recv_tasklet);
+ queue_work(system_bh_wq, &intf->recv_work);
}
EXPORT_SYMBOL(ipmi_smi_msg_received);
@@ -4899,7 +4899,7 @@ void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
return;
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
- tasklet_schedule(&intf->recv_tasklet);
+ queue_work(system_bh_wq, &intf->recv_work);
}
EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
@@ -5068,7 +5068,7 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
flags);
}
- tasklet_schedule(&intf->recv_tasklet);
+ queue_work(system_bh_wq, &intf->recv_work);
return need_timer;
}
--
2.17.1
On Wed, Mar 27, 2024 at 04:03:11PM +0000, Allen Pais wrote:
> The only generic interface to execute asynchronously in the BH context is
> tasklet; however, it's marked deprecated and has some design flaws. To
> replace tasklets, BH workqueue support was recently added. A BH workqueue
> behaves similarly to regular workqueues except that the queued work items
> are executed in the BH context.
>
> This patch converts drivers/infiniband/* from tasklet to BH workqueue.
I think you mean drivers/char/ipmi/* here.
I believe that work queues items are execute single-threaded for a work
queue, so this should be good. I need to test this, though. It may be
that an IPMI device can have its own work queue; it may not be important
to run it in bh context.
-corey
>
> Based on the work done by Tejun Heo <tj@kernel.org>
> Branch: https://git.kernel.org/pub/scm/linux/kernel/git/tj/wq.git for-6.10
>
> Signed-off-by: Allen Pais <allen.lkml@gmail.com>
> ---
> drivers/char/ipmi/ipmi_msghandler.c | 30 ++++++++++++++---------------
> 1 file changed, 15 insertions(+), 15 deletions(-)
>
> diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
> index b0eedc4595b3..fce2a2dbdc82 100644
> --- a/drivers/char/ipmi/ipmi_msghandler.c
> +++ b/drivers/char/ipmi/ipmi_msghandler.c
> @@ -36,12 +36,13 @@
> #include <linux/nospec.h>
> #include <linux/vmalloc.h>
> #include <linux/delay.h>
> +#include <linux/workqueue.h>
>
> #define IPMI_DRIVER_VERSION "39.2"
>
> static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
> static int ipmi_init_msghandler(void);
> -static void smi_recv_tasklet(struct tasklet_struct *t);
> +static void smi_recv_work(struct work_struct *t);
> static void handle_new_recv_msgs(struct ipmi_smi *intf);
> static void need_waiter(struct ipmi_smi *intf);
> static int handle_one_recv_msg(struct ipmi_smi *intf,
> @@ -498,13 +499,13 @@ struct ipmi_smi {
> /*
> * Messages queued for delivery. If delivery fails (out of memory
> * for instance), They will stay in here to be processed later in a
> - * periodic timer interrupt. The tasklet is for handling received
> + * periodic timer interrupt. The work is for handling received
> * messages directly from the handler.
> */
> spinlock_t waiting_rcv_msgs_lock;
> struct list_head waiting_rcv_msgs;
> atomic_t watchdog_pretimeouts_to_deliver;
> - struct tasklet_struct recv_tasklet;
> + struct work_struct recv_work;
>
> spinlock_t xmit_msgs_lock;
> struct list_head xmit_msgs;
> @@ -704,7 +705,7 @@ static void clean_up_interface_data(struct ipmi_smi *intf)
> struct cmd_rcvr *rcvr, *rcvr2;
> struct list_head list;
>
> - tasklet_kill(&intf->recv_tasklet);
> + cancel_work_sync(&intf->recv_work);
>
> free_smi_msg_list(&intf->waiting_rcv_msgs);
> free_recv_msg_list(&intf->waiting_events);
> @@ -1319,7 +1320,7 @@ static void free_user(struct kref *ref)
> {
> struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
>
> - /* SRCU cleanup must happen in task context. */
> + /* SRCU cleanup must happen in work context. */
> queue_work(remove_work_wq, &user->remove_work);
> }
>
> @@ -3605,8 +3606,7 @@ int ipmi_add_smi(struct module *owner,
> intf->curr_seq = 0;
> spin_lock_init(&intf->waiting_rcv_msgs_lock);
> INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
> - tasklet_setup(&intf->recv_tasklet,
> - smi_recv_tasklet);
> + INIT_WORK(&intf->recv_work, smi_recv_work);
> atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
> spin_lock_init(&intf->xmit_msgs_lock);
> INIT_LIST_HEAD(&intf->xmit_msgs);
> @@ -4779,7 +4779,7 @@ static void handle_new_recv_msgs(struct ipmi_smi *intf)
> * To preserve message order, quit if we
> * can't handle a message. Add the message
> * back at the head, this is safe because this
> - * tasklet is the only thing that pulls the
> + * work is the only thing that pulls the
> * messages.
> */
> list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
> @@ -4812,10 +4812,10 @@ static void handle_new_recv_msgs(struct ipmi_smi *intf)
> }
> }
>
> -static void smi_recv_tasklet(struct tasklet_struct *t)
> +static void smi_recv_work(struct work_struct *t)
> {
> unsigned long flags = 0; /* keep us warning-free. */
> - struct ipmi_smi *intf = from_tasklet(intf, t, recv_tasklet);
> + struct ipmi_smi *intf = from_work(intf, t, recv_work);
> int run_to_completion = intf->run_to_completion;
> struct ipmi_smi_msg *newmsg = NULL;
>
> @@ -4866,7 +4866,7 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf,
>
> /*
> * To preserve message order, we keep a queue and deliver from
> - * a tasklet.
> + * a work.
> */
> if (!run_to_completion)
> spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
> @@ -4887,9 +4887,9 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf,
> spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
>
> if (run_to_completion)
> - smi_recv_tasklet(&intf->recv_tasklet);
> + smi_recv_work(&intf->recv_work);
> else
> - tasklet_schedule(&intf->recv_tasklet);
> + queue_work(system_bh_wq, &intf->recv_work);
> }
> EXPORT_SYMBOL(ipmi_smi_msg_received);
>
> @@ -4899,7 +4899,7 @@ void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
> return;
>
> atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
> - tasklet_schedule(&intf->recv_tasklet);
> + queue_work(system_bh_wq, &intf->recv_work);
> }
> EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
>
> @@ -5068,7 +5068,7 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
> flags);
> }
>
> - tasklet_schedule(&intf->recv_tasklet);
> + queue_work(system_bh_wq, &intf->recv_work);
>
> return need_timer;
> }
> --
> 2.17.1
>
>
On Wed, Mar 27, 2024 at 11:05 AM Corey Minyard <minyard@acm.org> wrote:
>
> On Wed, Mar 27, 2024 at 04:03:11PM +0000, Allen Pais wrote:
> > The only generic interface to execute asynchronously in the BH context is
> > tasklet; however, it's marked deprecated and has some design flaws. To
> > replace tasklets, BH workqueue support was recently added. A BH workqueue
> > behaves similarly to regular workqueues except that the queued work items
> > are executed in the BH context.
> >
> > This patch converts drivers/infiniband/* from tasklet to BH workqueue.
>
> I think you mean drivers/char/ipmi/* here.
My apologies, my scripts messed up the commit messages for this series.
Will have it fixed in v2.
>
> I believe that work queues items are execute single-threaded for a work
> queue, so this should be good. I need to test this, though. It may be
> that an IPMI device can have its own work queue; it may not be important
> to run it in bh context.
Fair point. Could you please let me know once you have had a chance to test
these changes. Meanwhile, I will work on RFC wherein IPMI will have its own
workqueue.
Thanks for taking time out to review.
- Allen
>
> -corey
>
> >
> > Based on the work done by Tejun Heo <tj@kernel.org>
> > Branch: https://git.kernel.org/pub/scm/linux/kernel/git/tj/wq.git for-6.10
> >
> > Signed-off-by: Allen Pais <allen.lkml@gmail.com>
> > ---
> > drivers/char/ipmi/ipmi_msghandler.c | 30 ++++++++++++++---------------
> > 1 file changed, 15 insertions(+), 15 deletions(-)
> >
> > diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
> > index b0eedc4595b3..fce2a2dbdc82 100644
> > --- a/drivers/char/ipmi/ipmi_msghandler.c
> > +++ b/drivers/char/ipmi/ipmi_msghandler.c
> > @@ -36,12 +36,13 @@
> > #include <linux/nospec.h>
> > #include <linux/vmalloc.h>
> > #include <linux/delay.h>
> > +#include <linux/workqueue.h>
> >
> > #define IPMI_DRIVER_VERSION "39.2"
> >
> > static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
> > static int ipmi_init_msghandler(void);
> > -static void smi_recv_tasklet(struct tasklet_struct *t);
> > +static void smi_recv_work(struct work_struct *t);
> > static void handle_new_recv_msgs(struct ipmi_smi *intf);
> > static void need_waiter(struct ipmi_smi *intf);
> > static int handle_one_recv_msg(struct ipmi_smi *intf,
> > @@ -498,13 +499,13 @@ struct ipmi_smi {
> > /*
> > * Messages queued for delivery. If delivery fails (out of memory
> > * for instance), They will stay in here to be processed later in a
> > - * periodic timer interrupt. The tasklet is for handling received
> > + * periodic timer interrupt. The work is for handling received
> > * messages directly from the handler.
> > */
> > spinlock_t waiting_rcv_msgs_lock;
> > struct list_head waiting_rcv_msgs;
> > atomic_t watchdog_pretimeouts_to_deliver;
> > - struct tasklet_struct recv_tasklet;
> > + struct work_struct recv_work;
> >
> > spinlock_t xmit_msgs_lock;
> > struct list_head xmit_msgs;
> > @@ -704,7 +705,7 @@ static void clean_up_interface_data(struct ipmi_smi *intf)
> > struct cmd_rcvr *rcvr, *rcvr2;
> > struct list_head list;
> >
> > - tasklet_kill(&intf->recv_tasklet);
> > + cancel_work_sync(&intf->recv_work);
> >
> > free_smi_msg_list(&intf->waiting_rcv_msgs);
> > free_recv_msg_list(&intf->waiting_events);
> > @@ -1319,7 +1320,7 @@ static void free_user(struct kref *ref)
> > {
> > struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
> >
> > - /* SRCU cleanup must happen in task context. */
> > + /* SRCU cleanup must happen in work context. */
> > queue_work(remove_work_wq, &user->remove_work);
> > }
> >
> > @@ -3605,8 +3606,7 @@ int ipmi_add_smi(struct module *owner,
> > intf->curr_seq = 0;
> > spin_lock_init(&intf->waiting_rcv_msgs_lock);
> > INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
> > - tasklet_setup(&intf->recv_tasklet,
> > - smi_recv_tasklet);
> > + INIT_WORK(&intf->recv_work, smi_recv_work);
> > atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
> > spin_lock_init(&intf->xmit_msgs_lock);
> > INIT_LIST_HEAD(&intf->xmit_msgs);
> > @@ -4779,7 +4779,7 @@ static void handle_new_recv_msgs(struct ipmi_smi *intf)
> > * To preserve message order, quit if we
> > * can't handle a message. Add the message
> > * back at the head, this is safe because this
> > - * tasklet is the only thing that pulls the
> > + * work is the only thing that pulls the
> > * messages.
> > */
> > list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
> > @@ -4812,10 +4812,10 @@ static void handle_new_recv_msgs(struct ipmi_smi *intf)
> > }
> > }
> >
> > -static void smi_recv_tasklet(struct tasklet_struct *t)
> > +static void smi_recv_work(struct work_struct *t)
> > {
> > unsigned long flags = 0; /* keep us warning-free. */
> > - struct ipmi_smi *intf = from_tasklet(intf, t, recv_tasklet);
> > + struct ipmi_smi *intf = from_work(intf, t, recv_work);
> > int run_to_completion = intf->run_to_completion;
> > struct ipmi_smi_msg *newmsg = NULL;
> >
> > @@ -4866,7 +4866,7 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf,
> >
> > /*
> > * To preserve message order, we keep a queue and deliver from
> > - * a tasklet.
> > + * a work.
> > */
> > if (!run_to_completion)
> > spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
> > @@ -4887,9 +4887,9 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf,
> > spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
> >
> > if (run_to_completion)
> > - smi_recv_tasklet(&intf->recv_tasklet);
> > + smi_recv_work(&intf->recv_work);
> > else
> > - tasklet_schedule(&intf->recv_tasklet);
> > + queue_work(system_bh_wq, &intf->recv_work);
> > }
> > EXPORT_SYMBOL(ipmi_smi_msg_received);
> >
> > @@ -4899,7 +4899,7 @@ void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
> > return;
> >
> > atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
> > - tasklet_schedule(&intf->recv_tasklet);
> > + queue_work(system_bh_wq, &intf->recv_work);
> > }
> > EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
> >
> > @@ -5068,7 +5068,7 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
> > flags);
> > }
> >
> > - tasklet_schedule(&intf->recv_tasklet);
> > + queue_work(system_bh_wq, &intf->recv_work);
> >
> > return need_timer;
> > }
> > --
> > 2.17.1
> >
> >
>
--
- Allen
On Thu, Mar 28, 2024 at 10:52:16AM -0700, Allen wrote:
> On Wed, Mar 27, 2024 at 11:05 AM Corey Minyard <minyard@acm.org> wrote:
> >
> > I believe that work queues items are execute single-threaded for a work
> > queue, so this should be good. I need to test this, though. It may be
> > that an IPMI device can have its own work queue; it may not be important
> > to run it in bh context.
>
> Fair point. Could you please let me know once you have had a chance to test
> these changes. Meanwhile, I will work on RFC wherein IPMI will have its own
> workqueue.
>
> Thanks for taking time out to review.
After looking and thinking about it a bit, a BH context is still
probably the best for this.
I have tested this patch under load and various scenarios and it seems
to work ok. So:
Tested-by: Corey Minyard <cminyard@mvista.com>
Acked-by: Corey Minyard <cminyard@mvista.com>
Or I can take this into my tree.
-corey
>
> - Allen
>
> >
> > -corey
> >
> > >
> > > Based on the work done by Tejun Heo <tj@kernel.org>
> > > Branch: https://git.kernel.org/pub/scm/linux/kernel/git/tj/wq.git for-610
> > >
> > > Signed-off-by: Allen Pais <allen.lkml@gmail.com>
> > > ---
> > > drivers/char/ipmi/ipmi_msghandler.c | 30 ++++++++++++++---------------
> > > 1 file changed, 15 insertions(+), 15 deletions(-)
> > >
> > > diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
> > > index b0eedc4595b3..fce2a2dbdc82 100644
> > > --- a/drivers/char/ipmi/ipmi_msghandler.c
> > > +++ b/drivers/char/ipmi/ipmi_msghandler.c
> > > @@ -36,12 +36,13 @@
> > > #include <linux/nospec.h>
> > > #include <linux/vmalloc.h>
> > > #include <linux/delay.h>
> > > +#include <linux/workqueue.h>
> > >
> > > #define IPMI_DRIVER_VERSION "39.2"
> > >
> > > static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
> > > static int ipmi_init_msghandler(void);
> > > -static void smi_recv_tasklet(struct tasklet_struct *t);
> > > +static void smi_recv_work(struct work_struct *t);
> > > static void handle_new_recv_msgs(struct ipmi_smi *intf);
> > > static void need_waiter(struct ipmi_smi *intf);
> > > static int handle_one_recv_msg(struct ipmi_smi *intf,
> > > @@ -498,13 +499,13 @@ struct ipmi_smi {
> > > /*
> > > * Messages queued for delivery. If delivery fails (out of memory
> > > * for instance), They will stay in here to be processed later in a
> > > - * periodic timer interrupt. The tasklet is for handling received
> > > + * periodic timer interrupt. The work is for handling received
> > > * messages directly from the handler.
> > > */
> > > spinlock_t waiting_rcv_msgs_lock;
> > > struct list_head waiting_rcv_msgs;
> > > atomic_t watchdog_pretimeouts_to_deliver;
> > > - struct tasklet_struct recv_tasklet;
> > > + struct work_struct recv_work;
> > >
> > > spinlock_t xmit_msgs_lock;
> > > struct list_head xmit_msgs;
> > > @@ -704,7 +705,7 @@ static void clean_up_interface_data(struct ipmi_smi *intf)
> > > struct cmd_rcvr *rcvr, *rcvr2;
> > > struct list_head list;
> > >
> > > - tasklet_kill(&intf->recv_tasklet);
> > > + cancel_work_sync(&intf->recv_work);
> > >
> > > free_smi_msg_list(&intf->waiting_rcv_msgs);
> > > free_recv_msg_list(&intf->waiting_events);
> > > @@ -1319,7 +1320,7 @@ static void free_user(struct kref *ref)
> > > {
> > > struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
> > >
> > > - /* SRCU cleanup must happen in task context. */
> > > + /* SRCU cleanup must happen in work context. */
> > > queue_work(remove_work_wq, &user->remove_work);
> > > }
> > >
> > > @@ -3605,8 +3606,7 @@ int ipmi_add_smi(struct module *owner,
> > > intf->curr_seq = 0;
> > > spin_lock_init(&intf->waiting_rcv_msgs_lock);
> > > INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
> > > - tasklet_setup(&intf->recv_tasklet,
> > > - smi_recv_tasklet);
> > > + INIT_WORK(&intf->recv_work, smi_recv_work);
> > > atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
> > > spin_lock_init(&intf->xmit_msgs_lock);
> > > INIT_LIST_HEAD(&intf->xmit_msgs);
> > > @@ -4779,7 +4779,7 @@ static void handle_new_recv_msgs(struct ipmi_smi *intf)
> > > * To preserve message order, quit if we
> > > * can't handle a message. Add the message
> > > * back at the head, this is safe because this
> > > - * tasklet is the only thing that pulls the
> > > + * work is the only thing that pulls the
> > > * messages.
> > > */
> > > list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
> > > @@ -4812,10 +4812,10 @@ static void handle_new_recv_msgs(struct ipmi_smi *intf)
> > > }
> > > }
> > >
> > > -static void smi_recv_tasklet(struct tasklet_struct *t)
> > > +static void smi_recv_work(struct work_struct *t)
> > > {
> > > unsigned long flags = 0; /* keep us warning-free. */
> > > - struct ipmi_smi *intf = from_tasklet(intf, t, recv_tasklet);
> > > + struct ipmi_smi *intf = from_work(intf, t, recv_work);
> > > int run_to_completion = intf->run_to_completion;
> > > struct ipmi_smi_msg *newmsg = NULL;
> > >
> > > @@ -4866,7 +4866,7 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf,
> > >
> > > /*
> > > * To preserve message order, we keep a queue and deliver from
> > > - * a tasklet.
> > > + * a work.
> > > */
> > > if (!run_to_completion)
> > > spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
> > > @@ -4887,9 +4887,9 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf,
> > > spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
> > >
> > > if (run_to_completion)
> > > - smi_recv_tasklet(&intf->recv_tasklet);
> > > + smi_recv_work(&intf->recv_work);
> > > else
> > > - tasklet_schedule(&intf->recv_tasklet);
> > > + queue_work(system_bh_wq, &intf->recv_work);
> > > }
> > > EXPORT_SYMBOL(ipmi_smi_msg_received);
> > >
> > > @@ -4899,7 +4899,7 @@ void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
> > > return;
> > >
> > > atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
> > > - tasklet_schedule(&intf->recv_tasklet);
> > > + queue_work(system_bh_wq, &intf->recv_work);
> > > }
> > > EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
> > >
> > > @@ -5068,7 +5068,7 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
> > > flags);
> > > }
> > >
> > > - tasklet_schedule(&intf->recv_tasklet);
> > > + queue_work(system_bh_wq, &intf->recv_work);
> > >
> > > return need_timer;
> > > }
> > > --
> > > 2.17.1
> > >
> > >
> >
>
>
> --
> - Allen
>
> > > I believe that work queues items are execute single-threaded for a work > > > queue, so this should be good. I need to test this, though. It may be > > > that an IPMI device can have its own work queue; it may not be important > > > to run it in bh context. > > > > Fair point. Could you please let me know once you have had a chance to test > > these changes. Meanwhile, I will work on RFC wherein IPMI will have its own > > workqueue. > > > > Thanks for taking time out to review. > > After looking and thinking about it a bit, a BH context is still > probably the best for this. > > I have tested this patch under load and various scenarios and it seems > to work ok. So: > > Tested-by: Corey Minyard <cminyard@mvista.com> > Acked-by: Corey Minyard <cminyard@mvista.com> > > Or I can take this into my tree. > > -corey Thank you very much. I think it should be okay for you to carry it into your tree. - Allen
On Thu, Mar 28, 2024 at 12:41:22PM -0700, Allen wrote: > > > > I believe that work queues items are execute single-threaded for a work > > > > queue, so this should be good. I need to test this, though. It may be > > > > that an IPMI device can have its own work queue; it may not be important > > > > to run it in bh context. > > > > > > Fair point. Could you please let me know once you have had a chance to test > > > these changes. Meanwhile, I will work on RFC wherein IPMI will have its own > > > workqueue. > > > > > > Thanks for taking time out to review. > > > > After looking and thinking about it a bit, a BH context is still > > probably the best for this. > > > > I have tested this patch under load and various scenarios and it seems > > to work ok. So: > > > > Tested-by: Corey Minyard <cminyard@mvista.com> > > Acked-by: Corey Minyard <cminyard@mvista.com> > > > > Or I can take this into my tree. > > > > -corey > > Thank you very much. I think it should be okay for you to carry it into > your tree. Ok, it's in my for-next tree. I fixed the directory reference, and I changed all the comments where you changed "tasklet" to "work" to instead say "workqueue". -corey > > - Allen >
> > > > > > > > Fair point. Could you please let me know once you have had a chance to test > > > > these changes. Meanwhile, I will work on RFC wherein IPMI will have its own > > > > workqueue. > > > > > > > > Thanks for taking time out to review. > > > > > > After looking and thinking about it a bit, a BH context is still > > > probably the best for this. > > > > > > I have tested this patch under load and various scenarios and it seems > > > to work ok. So: > > > > > > Tested-by: Corey Minyard <cminyard@mvista.com> > > > Acked-by: Corey Minyard <cminyard@mvista.com> > > > > > > Or I can take this into my tree. > > > > > > -corey > > > > Thank you very much. I think it should be okay for you to carry it into > > your tree. > > Ok, it's in my for-next tree. I fixed the directory reference, and I > changed all the comments where you changed "tasklet" to "work" to > instead say "workqueue". > Thank you very much for fixing it. - Allen
© 2016 - 2026 Red Hat, Inc.