In preparation for storing two separate iowait states in there, bump the
size from a 32-bit to a 64-bit size, for 64-bit kernels.
Note that on 32-bit, the number of tasks are limited to 0x8000, which
fits just fine in even half of the existiing 32-bit atomic_t. For 64-bit,
no such limit exists, hence play it safe and make it a 64-bit atomic.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
kernel/sched/core.c | 14 +++++++++++++-
kernel/sched/sched.h | 4 ++++
2 files changed, 17 insertions(+), 1 deletion(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 977bb08a33d2..6a6c985220b1 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3723,17 +3723,29 @@ static inline cpumask_t *alloc_user_cpus_ptr(int node)
static void task_iowait_inc(struct task_struct *p)
{
+#ifdef CONFIG_64BIT
+ atomic_long_inc(&task_rq(p)->nr_iowait);
+#else
atomic_inc(&task_rq(p)->nr_iowait);
+#endif
}
static void task_iowait_dec(struct task_struct *p)
{
+#ifdef CONFIG_64BIT
+ atomic_long_dec(&task_rq(p)->nr_iowait);
+#else
atomic_dec(&task_rq(p)->nr_iowait);
+#endif
}
int rq_iowait(struct rq *rq)
{
+#ifdef CONFIG_64BIT
+ return atomic_long_read(&rq->nr_iowait);
+#else
return atomic_read(&rq->nr_iowait);
+#endif
}
static void
@@ -10065,7 +10077,7 @@ void __init sched_init(void)
#endif
#endif /* CONFIG_SMP */
hrtick_rq_init(rq);
- atomic_set(&rq->nr_iowait, 0);
+ atomic_long_set(&rq->nr_iowait, 0);
#ifdef CONFIG_SCHED_CORE
rq->core = rq;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 387f67ddf18a..c2802d066615 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1049,7 +1049,11 @@ struct rq {
u64 clock_idle_copy;
#endif
+#ifdef CONFIG_64BIT
+ atomic_long_t nr_iowait;
+#else
atomic_t nr_iowait;
+#endif
#ifdef CONFIG_SCHED_DEBUG
u64 last_seen_need_resched_ns;
--
2.43.0
On Tue, Apr 16, 2024 at 06:11:19AM -0600, Jens Axboe wrote:
> In preparation for storing two separate iowait states in there, bump the
> size from a 32-bit to a 64-bit size, for 64-bit kernels.
>
> Note that on 32-bit, the number of tasks are limited to 0x8000, which
> fits just fine in even half of the existiing 32-bit atomic_t. For 64-bit,
> no such limit exists, hence play it safe and make it a 64-bit atomic.
We still have the tid limit, no? Anyway...
>
> Signed-off-by: Jens Axboe <axboe@kernel.dk>
> ---
> kernel/sched/core.c | 14 +++++++++++++-
> kernel/sched/sched.h | 4 ++++
> 2 files changed, 17 insertions(+), 1 deletion(-)
>
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 977bb08a33d2..6a6c985220b1 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -3723,17 +3723,29 @@ static inline cpumask_t *alloc_user_cpus_ptr(int node)
>
> static void task_iowait_inc(struct task_struct *p)
> {
> +#ifdef CONFIG_64BIT
> + atomic_long_inc(&task_rq(p)->nr_iowait);
> +#else
> atomic_inc(&task_rq(p)->nr_iowait);
> +#endif
> }
>
> static void task_iowait_dec(struct task_struct *p)
> {
> +#ifdef CONFIG_64BIT
> + atomic_long_dec(&task_rq(p)->nr_iowait);
> +#else
> atomic_dec(&task_rq(p)->nr_iowait);
> +#endif
> }
>
> int rq_iowait(struct rq *rq)
> {
> +#ifdef CONFIG_64BIT
> + return atomic_long_read(&rq->nr_iowait);
> +#else
> return atomic_read(&rq->nr_iowait);
> +#endif
> }
>
> static void
> @@ -10065,7 +10077,7 @@ void __init sched_init(void)
> #endif
> #endif /* CONFIG_SMP */
> hrtick_rq_init(rq);
> - atomic_set(&rq->nr_iowait, 0);
> + atomic_long_set(&rq->nr_iowait, 0);
>
This one site lacks the ifdeffery, which seems superfluous anyway, since
long is already 32bit / 64bit like you want. Hmm?
> #ifdef CONFIG_SCHED_CORE
> rq->core = rq;
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index 387f67ddf18a..c2802d066615 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -1049,7 +1049,11 @@ struct rq {
> u64 clock_idle_copy;
> #endif
>
> +#ifdef CONFIG_64BIT
> + atomic_long_t nr_iowait;
> +#else
> atomic_t nr_iowait;
> +#endif
>
> #ifdef CONFIG_SCHED_DEBUG
> u64 last_seen_need_resched_ns;
> --
> 2.43.0
>
© 2016 - 2026 Red Hat, Inc.