kernel/sched/wait.c | 69 ++++++++++++++++----------------------------- 1 file changed, 25 insertions(+), 44 deletions(-)
Using guard(spinlock_irqsave) for "wq_head->lock" helps eliminate the
local "flags" variable, the local "ret" variable in some cases, and
helps simplify the flow in prepare_to_wait_event().
Signed-off-by: K Prateek Nayak <kprateek.nayak@amd.com>
---
kernel/sched/wait.c | 69 ++++++++++++++++-----------------------------
1 file changed, 25 insertions(+), 44 deletions(-)
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 51e38f5f4701..2cf7687e00e0 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -16,44 +16,35 @@ EXPORT_SYMBOL(__init_waitqueue_head);
void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
{
- unsigned long flags;
-
wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
- spin_lock_irqsave(&wq_head->lock, flags);
+
+ guard(spinlock_irqsave)(&wq_head->lock);
__add_wait_queue(wq_head, wq_entry);
- spin_unlock_irqrestore(&wq_head->lock, flags);
}
EXPORT_SYMBOL(add_wait_queue);
void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
{
- unsigned long flags;
-
wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
- spin_lock_irqsave(&wq_head->lock, flags);
+
+ guard(spinlock_irqsave)(&wq_head->lock);
__add_wait_queue_entry_tail(wq_head, wq_entry);
- spin_unlock_irqrestore(&wq_head->lock, flags);
}
EXPORT_SYMBOL(add_wait_queue_exclusive);
void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
{
- unsigned long flags;
-
wq_entry->flags |= WQ_FLAG_EXCLUSIVE | WQ_FLAG_PRIORITY;
- spin_lock_irqsave(&wq_head->lock, flags);
+
+ guard(spinlock_irqsave)(&wq_head->lock);
__add_wait_queue(wq_head, wq_entry);
- spin_unlock_irqrestore(&wq_head->lock, flags);
}
EXPORT_SYMBOL_GPL(add_wait_queue_priority);
void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
{
- unsigned long flags;
-
- spin_lock_irqsave(&wq_head->lock, flags);
+ guard(spinlock_irqsave)(&wq_head->lock);
__remove_wait_queue(wq_head, wq_entry);
- spin_unlock_irqrestore(&wq_head->lock, flags);
}
EXPORT_SYMBOL(remove_wait_queue);
@@ -99,13 +90,11 @@ static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
static int __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
int nr_exclusive, int wake_flags, void *key)
{
- unsigned long flags;
int remaining;
- spin_lock_irqsave(&wq_head->lock, flags);
+ guard(spinlock_irqsave)(&wq_head->lock);
remaining = __wake_up_common(wq_head, mode, nr_exclusive, wake_flags,
key);
- spin_unlock_irqrestore(&wq_head->lock, flags);
return nr_exclusive - remaining;
}
@@ -228,14 +217,12 @@ void __wake_up_pollfree(struct wait_queue_head *wq_head)
void
prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
{
- unsigned long flags;
-
wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
- spin_lock_irqsave(&wq_head->lock, flags);
+
+ guard(spinlock_irqsave)(&wq_head->lock);
if (list_empty(&wq_entry->entry))
__add_wait_queue(wq_head, wq_entry);
set_current_state(state);
- spin_unlock_irqrestore(&wq_head->lock, flags);
}
EXPORT_SYMBOL(prepare_to_wait);
@@ -243,17 +230,17 @@ EXPORT_SYMBOL(prepare_to_wait);
bool
prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
{
- unsigned long flags;
bool was_empty = false;
wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
- spin_lock_irqsave(&wq_head->lock, flags);
+
+ guard(spinlock_irqsave)(&wq_head->lock);
if (list_empty(&wq_entry->entry)) {
was_empty = list_empty(&wq_head->head);
__add_wait_queue_entry_tail(wq_head, wq_entry);
}
set_current_state(state);
- spin_unlock_irqrestore(&wq_head->lock, flags);
+
return was_empty;
}
EXPORT_SYMBOL(prepare_to_wait_exclusive);
@@ -269,10 +256,8 @@ EXPORT_SYMBOL(init_wait_entry);
long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
{
- unsigned long flags;
- long ret = 0;
+ guard(spinlock_irqsave)(&wq_head->lock);
- spin_lock_irqsave(&wq_head->lock, flags);
if (signal_pending_state(state, current)) {
/*
* Exclusive waiter must not fail if it was selected by wakeup,
@@ -287,19 +272,18 @@ long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_en
* we fail.
*/
list_del_init(&wq_entry->entry);
- ret = -ERESTARTSYS;
- } else {
- if (list_empty(&wq_entry->entry)) {
- if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
- __add_wait_queue_entry_tail(wq_head, wq_entry);
- else
- __add_wait_queue(wq_head, wq_entry);
- }
- set_current_state(state);
+ return -ERESTARTSYS;
}
- spin_unlock_irqrestore(&wq_head->lock, flags);
- return ret;
+ if (list_empty(&wq_entry->entry)) {
+ if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
+ __add_wait_queue_entry_tail(wq_head, wq_entry);
+ else
+ __add_wait_queue(wq_head, wq_entry);
+ }
+ set_current_state(state);
+
+ return 0;
}
EXPORT_SYMBOL(prepare_to_wait_event);
@@ -355,8 +339,6 @@ EXPORT_SYMBOL(do_wait_intr_irq);
*/
void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
{
- unsigned long flags;
-
__set_current_state(TASK_RUNNING);
/*
* We can check for list emptiness outside the lock
@@ -372,9 +354,8 @@ void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_en
* the list).
*/
if (!list_empty_careful(&wq_entry->entry)) {
- spin_lock_irqsave(&wq_head->lock, flags);
+ guard(spinlock_irqsave)(&wq_head->lock);
list_del_init(&wq_entry->entry);
- spin_unlock_irqrestore(&wq_head->lock, flags);
}
}
EXPORT_SYMBOL(finish_wait);
base-commit: 328802738e1cd091d04076317f3c2174125c5916
--
2.34.1
On 4/3/25 00:08, K Prateek Nayak wrote:
> Using guard(spinlock_irqsave) for "wq_head->lock" helps eliminate the
> local "flags" variable, the local "ret" variable in some cases, and
> helps simplify the flow in prepare_to_wait_event().
>
flags variable becomes implicit now, only ret is eliminated in some cases.
> Signed-off-by: K Prateek Nayak <kprateek.nayak@amd.com>
> ---
> kernel/sched/wait.c | 69 ++++++++++++++++-----------------------------
> 1 file changed, 25 insertions(+), 44 deletions(-)
>
> diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
> index 51e38f5f4701..2cf7687e00e0 100644
> --- a/kernel/sched/wait.c
> +++ b/kernel/sched/wait.c
> @@ -16,44 +16,35 @@ EXPORT_SYMBOL(__init_waitqueue_head);
>
> void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
> {
> - unsigned long flags;
> -
> wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
> - spin_lock_irqsave(&wq_head->lock, flags);
> +
> + guard(spinlock_irqsave)(&wq_head->lock);
> __add_wait_queue(wq_head, wq_entry);
> - spin_unlock_irqrestore(&wq_head->lock, flags);
> }
> EXPORT_SYMBOL(add_wait_queue);
>
> void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
> {
> - unsigned long flags;
> -
> wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
> - spin_lock_irqsave(&wq_head->lock, flags);
> +
> + guard(spinlock_irqsave)(&wq_head->lock);
> __add_wait_queue_entry_tail(wq_head, wq_entry);
> - spin_unlock_irqrestore(&wq_head->lock, flags);
> }
> EXPORT_SYMBOL(add_wait_queue_exclusive);
>
> void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
> {
> - unsigned long flags;
> -
> wq_entry->flags |= WQ_FLAG_EXCLUSIVE | WQ_FLAG_PRIORITY;
> - spin_lock_irqsave(&wq_head->lock, flags);
> +
> + guard(spinlock_irqsave)(&wq_head->lock);
> __add_wait_queue(wq_head, wq_entry);
> - spin_unlock_irqrestore(&wq_head->lock, flags);
> }
> EXPORT_SYMBOL_GPL(add_wait_queue_priority);
>
> void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
> {
> - unsigned long flags;
> -
> - spin_lock_irqsave(&wq_head->lock, flags);
> + guard(spinlock_irqsave)(&wq_head->lock);
> __remove_wait_queue(wq_head, wq_entry);
> - spin_unlock_irqrestore(&wq_head->lock, flags);
> }
> EXPORT_SYMBOL(remove_wait_queue);
>
> @@ -99,13 +90,11 @@ static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
> static int __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
> int nr_exclusive, int wake_flags, void *key)
> {
> - unsigned long flags;
> int remaining;
>
> - spin_lock_irqsave(&wq_head->lock, flags);
> + guard(spinlock_irqsave)(&wq_head->lock);
> remaining = __wake_up_common(wq_head, mode, nr_exclusive, wake_flags,
> key);
> - spin_unlock_irqrestore(&wq_head->lock, flags);
>
> return nr_exclusive - remaining;
> }
> @@ -228,14 +217,12 @@ void __wake_up_pollfree(struct wait_queue_head *wq_head)
> void
> prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
> {
> - unsigned long flags;
> -
> wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
> - spin_lock_irqsave(&wq_head->lock, flags);
> +
> + guard(spinlock_irqsave)(&wq_head->lock);
> if (list_empty(&wq_entry->entry))
> __add_wait_queue(wq_head, wq_entry);
> set_current_state(state);
> - spin_unlock_irqrestore(&wq_head->lock, flags);
> }
> EXPORT_SYMBOL(prepare_to_wait);
>
> @@ -243,17 +230,17 @@ EXPORT_SYMBOL(prepare_to_wait);
> bool
> prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
> {
> - unsigned long flags;
> bool was_empty = false;
>
> wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
> - spin_lock_irqsave(&wq_head->lock, flags);
> +
> + guard(spinlock_irqsave)(&wq_head->lock);
> if (list_empty(&wq_entry->entry)) {
> was_empty = list_empty(&wq_head->head);
> __add_wait_queue_entry_tail(wq_head, wq_entry);
> }
> set_current_state(state);
> - spin_unlock_irqrestore(&wq_head->lock, flags);
> +
> return was_empty;
> }
> EXPORT_SYMBOL(prepare_to_wait_exclusive);
> @@ -269,10 +256,8 @@ EXPORT_SYMBOL(init_wait_entry);
>
> long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
> {
> - unsigned long flags;
> - long ret = 0;
> + guard(spinlock_irqsave)(&wq_head->lock);
>
> - spin_lock_irqsave(&wq_head->lock, flags);
> if (signal_pending_state(state, current)) {
> /*
> * Exclusive waiter must not fail if it was selected by wakeup,
> @@ -287,19 +272,18 @@ long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_en
> * we fail.
> */
> list_del_init(&wq_entry->entry);
> - ret = -ERESTARTSYS;
> - } else {
> - if (list_empty(&wq_entry->entry)) {
> - if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
> - __add_wait_queue_entry_tail(wq_head, wq_entry);
> - else
> - __add_wait_queue(wq_head, wq_entry);
> - }
> - set_current_state(state);
> + return -ERESTARTSYS;
> }
> - spin_unlock_irqrestore(&wq_head->lock, flags);
>
> - return ret;
> + if (list_empty(&wq_entry->entry)) {
> + if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
> + __add_wait_queue_entry_tail(wq_head, wq_entry);
> + else
> + __add_wait_queue(wq_head, wq_entry);
> + }
> + set_current_state(state);
> +
> + return 0;
> }
> EXPORT_SYMBOL(prepare_to_wait_event);
>
> @@ -355,8 +339,6 @@ EXPORT_SYMBOL(do_wait_intr_irq);
> */
> void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
> {
> - unsigned long flags;
> -
> __set_current_state(TASK_RUNNING);
> /*
> * We can check for list emptiness outside the lock
> @@ -372,9 +354,8 @@ void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_en
> * the list).
> */
> if (!list_empty_careful(&wq_entry->entry)) {
> - spin_lock_irqsave(&wq_head->lock, flags);
> + guard(spinlock_irqsave)(&wq_head->lock);
> list_del_init(&wq_entry->entry);
> - spin_unlock_irqrestore(&wq_head->lock, flags);
> }
> }
> EXPORT_SYMBOL(finish_wait);
>
> base-commit: 328802738e1cd091d04076317f3c2174125c5916
Reviewed-by: Shrikanth Hegde <sshegde@linux.ibm.com>
© 2016 - 2026 Red Hat, Inc.