kernel/sched/core.c | 2 +- kernel/sched/cputime.c | 2 +- kernel/sched/fair.c | 8 ++++---- kernel/sched/wait_bit.c | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-)
From: "jianyun.gao" <jianyungao89@gmail.com>
The following are some spelling mistakes existing in the scheduler
module. Just fix it!
slection -> selection
achitectures -> architectures
excempt -> except
incorectly -> incorrectly
litle -> little
faireness -> fairness
condtion -> condition
Signed-off-by: jianyun.gao <jianyungao89@gmail.com>
---
V2:
Delete the incorrect modifications for "borken" in V1.
The previous version is here:
https://lore.kernel.org/lkml/20250926092832.1457477-1-jianyungao89@gmail.com/
kernel/sched/core.c | 2 +-
kernel/sched/cputime.c | 2 +-
kernel/sched/fair.c | 8 ++++----
kernel/sched/wait_bit.c | 2 +-
4 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 7f1e5cb94c53..af5076e40567 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6858,7 +6858,7 @@ static void __sched notrace __schedule(int sched_mode)
/*
* We pass task_is_blocked() as the should_block arg
* in order to keep mutex-blocked tasks on the runqueue
- * for slection with proxy-exec (without proxy-exec
+ * for selection with proxy-exec (without proxy-exec
* task_is_blocked() will always be false).
*/
try_to_block_task(rq, prev, &prev_state,
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 7097de2c8cda..2429be5a5e40 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -585,7 +585,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
stime = mul_u64_u64_div_u64(stime, rtime, stime + utime);
/*
* Because mul_u64_u64_div_u64() can approximate on some
- * achitectures; enforce the constraint that: a*b/(b+c) <= a.
+ * architectures; enforce the constraint that: a*b/(b+c) <= a.
*/
if (unlikely(stime > rtime))
stime = rtime;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 18a30ae35441..20fe5899b247 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5381,7 +5381,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
bool delay = sleep;
/*
* DELAY_DEQUEUE relies on spurious wakeups, special task
- * states must not suffer spurious wakeups, excempt them.
+ * states must not suffer spurious wakeups, except them.
*/
if (flags & (DEQUEUE_SPECIAL | DEQUEUE_THROTTLE))
delay = false;
@@ -5842,7 +5842,7 @@ static bool enqueue_throttled_task(struct task_struct *p)
* target cfs_rq's limbo list.
*
* Do not do that when @p is current because the following race can
- * cause @p's group_node to be incorectly re-insterted in its rq's
+ * cause @p's group_node to be incorrectly re-insterted in its rq's
* cfs_tasks list, despite being throttled:
*
* cpuX cpuY
@@ -12161,7 +12161,7 @@ static inline bool update_newidle_cost(struct sched_domain *sd, u64 cost)
* sched_balance_newidle() bumps the cost whenever newidle
* balance fails, and we don't want things to grow out of
* control. Use the sysctl_sched_migration_cost as the upper
- * limit, plus a litle extra to avoid off by ones.
+ * limit, plus a little extra to avoid off by ones.
*/
sd->max_newidle_lb_cost =
min(cost, sysctl_sched_migration_cost + 200);
@@ -13176,7 +13176,7 @@ static void propagate_entity_cfs_rq(struct sched_entity *se)
* If a task gets attached to this cfs_rq and before being queued,
* it gets migrated to another CPU due to reasons like affinity
* change, make sure this cfs_rq stays on leaf cfs_rq list to have
- * that removed load decayed or it can cause faireness problem.
+ * that removed load decayed or it can cause fairness problem.
*/
if (!cfs_rq_pelt_clock_throttled(cfs_rq))
list_add_leaf_cfs_rq(cfs_rq);
diff --git a/kernel/sched/wait_bit.c b/kernel/sched/wait_bit.c
index 1088d3b7012c..47ab3bcd2ebc 100644
--- a/kernel/sched/wait_bit.c
+++ b/kernel/sched/wait_bit.c
@@ -207,7 +207,7 @@ EXPORT_SYMBOL(init_wait_var_entry);
* given variable to change. wait_var_event() can be waiting for an
* arbitrary condition to be true and associates that condition with an
* address. Calling wake_up_var() suggests that the condition has been
- * made true, but does not strictly require the condtion to use the
+ * made true, but does not strictly require the condition to use the
* address given.
*
* The wake-up is sent to tasks in a waitqueue selected by hash from a
--
2.34.1
On 9/29/25 07:12, Jianyun Gao wrote: > From: "jianyun.gao" <jianyungao89@gmail.com> > > The following are some spelling mistakes existing in the scheduler > module. Just fix it! > > slection -> selection > achitectures -> architectures > excempt -> except > incorectly -> incorrectly > litle -> little > faireness -> fairness > condtion -> condition > > Signed-off-by: jianyun.gao <jianyungao89@gmail.com> > --- > V2: > Delete the incorrect modifications for "borken" in V1. > The previous version is here: > > https://lore.kernel.org/lkml/20250926092832.1457477-1-jianyungao89@gmail.com/ > > kernel/sched/core.c | 2 +- > kernel/sched/cputime.c | 2 +- > kernel/sched/fair.c | 8 ++++---- > kernel/sched/wait_bit.c | 2 +- > 4 files changed, 7 insertions(+), 7 deletions(-) > > diff --git a/kernel/sched/core.c b/kernel/sched/core.c > index 7f1e5cb94c53..af5076e40567 100644 > --- a/kernel/sched/core.c > +++ b/kernel/sched/core.c > @@ -6858,7 +6858,7 @@ static void __sched notrace __schedule(int sched_mode) > /* > * We pass task_is_blocked() as the should_block arg > * in order to keep mutex-blocked tasks on the runqueue > - * for slection with proxy-exec (without proxy-exec > + * for selection with proxy-exec (without proxy-exec > * task_is_blocked() will always be false). > */ > try_to_block_task(rq, prev, &prev_state, > diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c > index 7097de2c8cda..2429be5a5e40 100644 > --- a/kernel/sched/cputime.c > +++ b/kernel/sched/cputime.c > @@ -585,7 +585,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, > stime = mul_u64_u64_div_u64(stime, rtime, stime + utime); > /* > * Because mul_u64_u64_div_u64() can approximate on some > - * achitectures; enforce the constraint that: a*b/(b+c) <= a. > + * architectures; enforce the constraint that: a*b/(b+c) <= a. > */ > if (unlikely(stime > rtime)) > stime = rtime; > diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c > index 18a30ae35441..20fe5899b247 100644 > --- a/kernel/sched/fair.c > +++ b/kernel/sched/fair.c > @@ -5381,7 +5381,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) > bool delay = sleep; > /* > * DELAY_DEQUEUE relies on spurious wakeups, special task > - * states must not suffer spurious wakeups, excempt them. > + * states must not suffer spurious wakeups, except them. This should be exempt, no?
On Tue, Sep 30, 2025 at 09:26:59AM +0100 Christian Loehle wrote: > On 9/29/25 07:12, Jianyun Gao wrote: > > From: "jianyun.gao" <jianyungao89@gmail.com> > > > > The following are some spelling mistakes existing in the scheduler > > module. Just fix it! > > > > slection -> selection > > achitectures -> architectures > > excempt -> except > > incorectly -> incorrectly > > litle -> little > > faireness -> fairness > > condtion -> condition > > > > Signed-off-by: jianyun.gao <jianyungao89@gmail.com> > > --- > > V2: > > Delete the incorrect modifications for "borken" in V1. > > The previous version is here: > > > > https://lore.kernel.org/lkml/20250926092832.1457477-1-jianyungao89@gmail.com/ > > > > kernel/sched/core.c | 2 +- > > kernel/sched/cputime.c | 2 +- > > kernel/sched/fair.c | 8 ++++---- > > kernel/sched/wait_bit.c | 2 +- > > 4 files changed, 7 insertions(+), 7 deletions(-) > > > > diff --git a/kernel/sched/core.c b/kernel/sched/core.c > > index 7f1e5cb94c53..af5076e40567 100644 > > --- a/kernel/sched/core.c > > +++ b/kernel/sched/core.c > > @@ -6858,7 +6858,7 @@ static void __sched notrace __schedule(int sched_mode) > > /* > > * We pass task_is_blocked() as the should_block arg > > * in order to keep mutex-blocked tasks on the runqueue > > - * for slection with proxy-exec (without proxy-exec > > + * for selection with proxy-exec (without proxy-exec > > * task_is_blocked() will always be false). > > */ > > try_to_block_task(rq, prev, &prev_state, > > diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c > > index 7097de2c8cda..2429be5a5e40 100644 > > --- a/kernel/sched/cputime.c > > +++ b/kernel/sched/cputime.c > > @@ -585,7 +585,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, > > stime = mul_u64_u64_div_u64(stime, rtime, stime + utime); > > /* > > * Because mul_u64_u64_div_u64() can approximate on some > > - * achitectures; enforce the constraint that: a*b/(b+c) <= a. > > + * architectures; enforce the constraint that: a*b/(b+c) <= a. > > */ > > if (unlikely(stime > rtime)) > > stime = rtime; > > diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c > > index 18a30ae35441..20fe5899b247 100644 > > --- a/kernel/sched/fair.c > > +++ b/kernel/sched/fair.c > > @@ -5381,7 +5381,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) > > bool delay = sleep; > > /* > > * DELAY_DEQUEUE relies on spurious wakeups, special task > > - * states must not suffer spurious wakeups, excempt them. > > + * states must not suffer spurious wakeups, except them. > > This should be exempt, no? > I had the same thought then decded that "except" as a verb worked too. We are making an exception for the special states, right? I think either works, but not both at once :) But that said, I'm not sure we should bother as these don't seem to effect the meaning (at least to me as a native 'merican speaker). Cheers, Phil --
Hi Phil, I agree with you. As a non-native English speaker, I think both of them(except or exempt) are acceptable. So, do I still need to change "except" to "exempt"? On Tue, Sep 30, 2025 at 8:30 PM Phil Auld <pauld@redhat.com> wrote: > > On Tue, Sep 30, 2025 at 09:26:59AM +0100 Christian Loehle wrote: > > On 9/29/25 07:12, Jianyun Gao wrote: > > > From: "jianyun.gao" <jianyungao89@gmail.com> > > > > > > The following are some spelling mistakes existing in the scheduler > > > module. Just fix it! > > > > > > slection -> selection > > > achitectures -> architectures > > > excempt -> except > > > incorectly -> incorrectly > > > litle -> little > > > faireness -> fairness > > > condtion -> condition > > > > > > Signed-off-by: jianyun.gao <jianyungao89@gmail.com> > > > --- > > > V2: > > > Delete the incorrect modifications for "borken" in V1. > > > The previous version is here: > > > > > > https://lore.kernel.org/lkml/20250926092832.1457477-1-jianyungao89@gmail.com/ > > > > > > kernel/sched/core.c | 2 +- > > > kernel/sched/cputime.c | 2 +- > > > kernel/sched/fair.c | 8 ++++---- > > > kernel/sched/wait_bit.c | 2 +- > > > 4 files changed, 7 insertions(+), 7 deletions(-) > > > > > > diff --git a/kernel/sched/core.c b/kernel/sched/core.c > > > index 7f1e5cb94c53..af5076e40567 100644 > > > --- a/kernel/sched/core.c > > > +++ b/kernel/sched/core.c > > > @@ -6858,7 +6858,7 @@ static void __sched notrace __schedule(int sched_mode) > > > /* > > > * We pass task_is_blocked() as the should_block arg > > > * in order to keep mutex-blocked tasks on the runqueue > > > - * for slection with proxy-exec (without proxy-exec > > > + * for selection with proxy-exec (without proxy-exec > > > * task_is_blocked() will always be false). > > > */ > > > try_to_block_task(rq, prev, &prev_state, > > > diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c > > > index 7097de2c8cda..2429be5a5e40 100644 > > > --- a/kernel/sched/cputime.c > > > +++ b/kernel/sched/cputime.c > > > @@ -585,7 +585,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, > > > stime = mul_u64_u64_div_u64(stime, rtime, stime + utime); > > > /* > > > * Because mul_u64_u64_div_u64() can approximate on some > > > - * achitectures; enforce the constraint that: a*b/(b+c) <= a. > > > + * architectures; enforce the constraint that: a*b/(b+c) <= a. > > > */ > > > if (unlikely(stime > rtime)) > > > stime = rtime; > > > diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c > > > index 18a30ae35441..20fe5899b247 100644 > > > --- a/kernel/sched/fair.c > > > +++ b/kernel/sched/fair.c > > > @@ -5381,7 +5381,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) > > > bool delay = sleep; > > > /* > > > * DELAY_DEQUEUE relies on spurious wakeups, special task > > > - * states must not suffer spurious wakeups, excempt them. > > > + * states must not suffer spurious wakeups, except them. > > > > This should be exempt, no? > > > > I had the same thought then decded that "except" as a verb worked too. > We are making an exception for the special states, right? I think either > works, but not both at once :) > > But that said, I'm not sure we should bother as these don't seem to > effect the meaning (at least to me as a native 'merican speaker). > > Cheers, > Phil > > > > -- >
Oh, yes. You are right. I will fix that in the next version. Thank you very much! On Tue, Sep 30, 2025 at 4:27 PM Christian Loehle <christian.loehle@arm.com> wrote: > > On 9/29/25 07:12, Jianyun Gao wrote: > > From: "jianyun.gao" <jianyungao89@gmail.com> > > > > The following are some spelling mistakes existing in the scheduler > > module. Just fix it! > > > > slection -> selection > > achitectures -> architectures > > excempt -> except > > incorectly -> incorrectly > > litle -> little > > faireness -> fairness > > condtion -> condition > > > > Signed-off-by: jianyun.gao <jianyungao89@gmail.com> > > --- > > V2: > > Delete the incorrect modifications for "borken" in V1. > > The previous version is here: > > > > https://lore.kernel.org/lkml/20250926092832.1457477-1-jianyungao89@gmail.com/ > > > > kernel/sched/core.c | 2 +- > > kernel/sched/cputime.c | 2 +- > > kernel/sched/fair.c | 8 ++++---- > > kernel/sched/wait_bit.c | 2 +- > > 4 files changed, 7 insertions(+), 7 deletions(-) > > > > diff --git a/kernel/sched/core.c b/kernel/sched/core.c > > index 7f1e5cb94c53..af5076e40567 100644 > > --- a/kernel/sched/core.c > > +++ b/kernel/sched/core.c > > @@ -6858,7 +6858,7 @@ static void __sched notrace __schedule(int sched_mode) > > /* > > * We pass task_is_blocked() as the should_block arg > > * in order to keep mutex-blocked tasks on the runqueue > > - * for slection with proxy-exec (without proxy-exec > > + * for selection with proxy-exec (without proxy-exec > > * task_is_blocked() will always be false). > > */ > > try_to_block_task(rq, prev, &prev_state, > > diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c > > index 7097de2c8cda..2429be5a5e40 100644 > > --- a/kernel/sched/cputime.c > > +++ b/kernel/sched/cputime.c > > @@ -585,7 +585,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, > > stime = mul_u64_u64_div_u64(stime, rtime, stime + utime); > > /* > > * Because mul_u64_u64_div_u64() can approximate on some > > - * achitectures; enforce the constraint that: a*b/(b+c) <= a. > > + * architectures; enforce the constraint that: a*b/(b+c) <= a. > > */ > > if (unlikely(stime > rtime)) > > stime = rtime; > > diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c > > index 18a30ae35441..20fe5899b247 100644 > > --- a/kernel/sched/fair.c > > +++ b/kernel/sched/fair.c > > @@ -5381,7 +5381,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) > > bool delay = sleep; > > /* > > * DELAY_DEQUEUE relies on spurious wakeups, special task > > - * states must not suffer spurious wakeups, excempt them. > > + * states must not suffer spurious wakeups, except them. > > This should be exempt, no?
© 2016 - 2025 Red Hat, Inc.