kernel/sched/core.c | 2 +- kernel/sched/cputime.c | 2 +- kernel/sched/topology.c | 2 +- kernel/sched/wait_bit.c | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-)
Signed-off-by: Atul Kumar Pant <atulpant.linux@gmail.com>
---
kernel/sched/core.c | 2 +-
kernel/sched/cputime.c | 2 +-
kernel/sched/topology.c | 2 +-
kernel/sched/wait_bit.c | 2 +-
4 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 3e5a6bf58..451e30249 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8998,7 +8998,7 @@ void sched_release_group(struct task_group *tg)
* that tg_unthrottle_up() won't add decayed cfs_rq's to it.
*
* We therefore defer calling unregister_fair_sched_group() to
- * sched_unregister_group() which is guarantied to get called only after the
+ * sched_unregister_group() which is guaranteed to get called only after the
* current RCU grace period has expired.
*/
spin_lock_irqsave(&task_group_lock, flags);
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 0bed0fa1a..f8ada8239 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -584,7 +584,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
stime = mul_u64_u64_div_u64(stime, rtime, stime + utime);
/*
* Because mul_u64_u64_div_u64() can approximate on some
- * achitectures; enforce the constraint that: a*b/(b+c) <= a.
+ * architectures; enforce the constraint that: a*b/(b+c) <= a.
*/
if (unlikely(stime > rtime))
stime = rtime;
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 9748a4c8d..a9f4fe81a 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -2337,7 +2337,7 @@ static struct sched_domain *build_sched_domain(struct sched_domain_topology_leve
if (!cpumask_subset(sched_domain_span(child),
sched_domain_span(sd))) {
- pr_err("BUG: arch topology borken\n");
+ pr_err("BUG: arch topology broken\n");
#ifdef CONFIG_SCHED_DEBUG
pr_err(" the %s domain not a subset of the %s domain\n",
child->name, sd->name);
diff --git a/kernel/sched/wait_bit.c b/kernel/sched/wait_bit.c
index b410b61ce..4197f5ed2 100644
--- a/kernel/sched/wait_bit.c
+++ b/kernel/sched/wait_bit.c
@@ -204,7 +204,7 @@ EXPORT_SYMBOL(init_wait_var_entry);
* given variable to change. wait_var_event() can be waiting for an
* arbitrary condition to be true and associates that condition with an
* address. Calling wake_up_var() suggests that the condition has been
- * made true, but does not strictly require the condtion to use the
+ * made true, but does not strictly require the condition to use the
* address given.
*
* The wake-up is sent to tasks in a waitqueue selected by hash from a
--
2.25.1
On 1/18/25 14:50, Atul Kumar Pant wrote:
Not even a single line in changelog?
Also, did you do this manually or using any tool?
> Signed-off-by: Atul Kumar Pant <atulpant.linux@gmail.com>
> ---
> kernel/sched/core.c | 2 +-
> kernel/sched/cputime.c | 2 +-
> kernel/sched/topology.c | 2 +-
> kernel/sched/wait_bit.c | 2 +-
> 4 files changed, 4 insertions(+), 4 deletions(-)
>
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 3e5a6bf58..451e30249 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -8998,7 +8998,7 @@ void sched_release_group(struct task_group *tg)
> * that tg_unthrottle_up() won't add decayed cfs_rq's to it.
> *
> * We therefore defer calling unregister_fair_sched_group() to
> - * sched_unregister_group() which is guarantied to get called only after the
> + * sched_unregister_group() which is guaranteed to get called only after the
> * current RCU grace period has expired.
> */
> spin_lock_irqsave(&task_group_lock, flags);
> diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
> index 0bed0fa1a..f8ada8239 100644
> --- a/kernel/sched/cputime.c
> +++ b/kernel/sched/cputime.c
> @@ -584,7 +584,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
> stime = mul_u64_u64_div_u64(stime, rtime, stime + utime);
> /*
> * Because mul_u64_u64_div_u64() can approximate on some
> - * achitectures; enforce the constraint that: a*b/(b+c) <= a.
> + * architectures; enforce the constraint that: a*b/(b+c) <= a.
fine.
> */
> if (unlikely(stime > rtime))
> stime = rtime;
> diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
> index 9748a4c8d..a9f4fe81a 100644
> --- a/kernel/sched/topology.c
> +++ b/kernel/sched/topology.c
> @@ -2337,7 +2337,7 @@ static struct sched_domain *build_sched_domain(struct sched_domain_topology_leve
>
> if (!cpumask_subset(sched_domain_span(child),
> sched_domain_span(sd))) {
> - pr_err("BUG: arch topology borken\n");
> + pr_err("BUG: arch topology broken\n");
This is intended. Please look up its meaning and see the earlier mails
on lkml.
> #ifdef CONFIG_SCHED_DEBUG
> pr_err(" the %s domain not a subset of the %s domain\n",
> child->name, sd->name);
> diff --git a/kernel/sched/wait_bit.c b/kernel/sched/wait_bit.c
> index b410b61ce..4197f5ed2 100644
> --- a/kernel/sched/wait_bit.c
> +++ b/kernel/sched/wait_bit.c
> @@ -204,7 +204,7 @@ EXPORT_SYMBOL(init_wait_var_entry);
> * given variable to change. wait_var_event() can be waiting for an
> * arbitrary condition to be true and associates that condition with an
> * address. Calling wake_up_var() suggests that the condition has been
> - * made true, but does not strictly require the condtion to use the
> + * made true, but does not strictly require the condition to use the
fine.
> * address given.
> *
> * The wake-up is sent to tasks in a waitqueue selected by hash from a
On Mon, Jan 20, 2025 at 09:38:35AM +0530, Shrikanth Hegde wrote:
>
>
> On 1/18/25 14:50, Atul Kumar Pant wrote:
>
> Not even a single line in changelog?
Sorry, missed adding it.
> Also, did you do this manually or using any tool?
Came through couple of mistakes while reading the code and one using
codespell tool.
>
>
> > Signed-off-by: Atul Kumar Pant <atulpant.linux@gmail.com>
> > ---
> > kernel/sched/core.c | 2 +-
> > kernel/sched/cputime.c | 2 +-
> > kernel/sched/topology.c | 2 +-
> > kernel/sched/wait_bit.c | 2 +-
> > 4 files changed, 4 insertions(+), 4 deletions(-)
> >
> > diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> > index 3e5a6bf58..451e30249 100644
> > --- a/kernel/sched/core.c
> > +++ b/kernel/sched/core.c
> > @@ -8998,7 +8998,7 @@ void sched_release_group(struct task_group *tg)
> > * that tg_unthrottle_up() won't add decayed cfs_rq's to it.
> > *
> > * We therefore defer calling unregister_fair_sched_group() to
> > - * sched_unregister_group() which is guarantied to get called only after the
> > + * sched_unregister_group() which is guaranteed to get called only after the
> > * current RCU grace period has expired.
> > */
> > spin_lock_irqsave(&task_group_lock, flags);
> > diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
> > index 0bed0fa1a..f8ada8239 100644
> > --- a/kernel/sched/cputime.c
> > +++ b/kernel/sched/cputime.c
> > @@ -584,7 +584,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
> > stime = mul_u64_u64_div_u64(stime, rtime, stime + utime);
> > /*
> > * Because mul_u64_u64_div_u64() can approximate on some
> > - * achitectures; enforce the constraint that: a*b/(b+c) <= a.
> > + * architectures; enforce the constraint that: a*b/(b+c) <= a.
>
> fine.
>
> > */
> > if (unlikely(stime > rtime))
> > stime = rtime;
> > diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
> > index 9748a4c8d..a9f4fe81a 100644
> > --- a/kernel/sched/topology.c
> > +++ b/kernel/sched/topology.c
> > @@ -2337,7 +2337,7 @@ static struct sched_domain *build_sched_domain(struct sched_domain_topology_leve
> > if (!cpumask_subset(sched_domain_span(child),
> > sched_domain_span(sd))) {
> > - pr_err("BUG: arch topology borken\n");
> > + pr_err("BUG: arch topology broken\n");
>
>
> This is intended. Please look up its meaning and see the earlier mails on
> lkml.
Thank you pointing this. Removed this in second patch
>
> > #ifdef CONFIG_SCHED_DEBUG
> > pr_err(" the %s domain not a subset of the %s domain\n",
> > child->name, sd->name);
> > diff --git a/kernel/sched/wait_bit.c b/kernel/sched/wait_bit.c
> > index b410b61ce..4197f5ed2 100644
> > --- a/kernel/sched/wait_bit.c
> > +++ b/kernel/sched/wait_bit.c
> > @@ -204,7 +204,7 @@ EXPORT_SYMBOL(init_wait_var_entry);
> > * given variable to change. wait_var_event() can be waiting for an
> > * arbitrary condition to be true and associates that condition with an
> > * address. Calling wake_up_var() suggests that the condition has been
> > - * made true, but does not strictly require the condtion to use the
> > + * made true, but does not strictly require the condition to use the
>
> fine.
>
> > * address given.
> > *
> > * The wake-up is sent to tasks in a waitqueue selected by hash from a
>
© 2016 - 2026 Red Hat, Inc.