In preparation for storing two separate iowait states in there, bump the
size from a 32-bit to a 64-bit size, for 64-bit kernels. On 32-bit
archs, the type remains a 32-bit size.
Note that on 32-bit, the number of tasks are limited to 0x8000, which
fits just fine in even half of the existing 32-bit atomic_t. For 64-bit,
no such limit exists, hence play it safe and make it a 64-bit atomic.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
kernel/sched/core.c | 8 ++++----
kernel/sched/sched.h | 2 +-
2 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 9bf1b67818d0..7e04b84dcc55 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3543,17 +3543,17 @@ static inline bool rq_has_pinned_tasks(struct rq *rq)
static void task_iowait_inc(struct task_struct *p)
{
- atomic_inc(&task_rq(p)->nr_iowait);
+ atomic_long_inc(&task_rq(p)->nr_iowait);
}
static void task_iowait_dec(struct task_struct *p)
{
- atomic_dec(&task_rq(p)->nr_iowait);
+ atomic_long_dec(&task_rq(p)->nr_iowait);
}
int rq_iowait(struct rq *rq)
{
- return atomic_read(&rq->nr_iowait);
+ return atomic_long_read(&rq->nr_iowait);
}
static void
@@ -8372,7 +8372,7 @@ void __init sched_init(void)
#endif
#endif /* CONFIG_SMP */
hrtick_rq_init(rq);
- atomic_set(&rq->nr_iowait, 0);
+ atomic_long_set(&rq->nr_iowait, 0);
fair_server_init(rq);
#ifdef CONFIG_SCHED_CORE
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b6b3b565bcb1..da2e67621f39 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1082,7 +1082,7 @@ struct rq {
u64 clock_idle_copy;
#endif
- atomic_t nr_iowait;
+ atomic_long_t nr_iowait;
#ifdef CONFIG_SCHED_DEBUG
u64 last_seen_need_resched_ns;
--
2.43.0
在 2024/8/19 23:39, Jens Axboe 写道:
> In preparation for storing two separate iowait states in there, bump the
> size from a 32-bit to a 64-bit size, for 64-bit kernels. On 32-bit
> archs, the type remains a 32-bit size.
>
> Note that on 32-bit, the number of tasks are limited to 0x8000, which
> fits just fine in even half of the existing 32-bit atomic_t. For 64-bit,
> no such limit exists, hence play it safe and make it a 64-bit atomic.
>
> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Reviewed-by: Zhang Qiao <zhangqiao22@huawei.com>
> ---
> kernel/sched/core.c | 8 ++++----
> kernel/sched/sched.h | 2 +-
> 2 files changed, 5 insertions(+), 5 deletions(-)
>
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 9bf1b67818d0..7e04b84dcc55 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -3543,17 +3543,17 @@ static inline bool rq_has_pinned_tasks(struct rq *rq)
>
> static void task_iowait_inc(struct task_struct *p)
> {
> - atomic_inc(&task_rq(p)->nr_iowait);
> + atomic_long_inc(&task_rq(p)->nr_iowait);
> }
>
> static void task_iowait_dec(struct task_struct *p)
> {
> - atomic_dec(&task_rq(p)->nr_iowait);
> + atomic_long_dec(&task_rq(p)->nr_iowait);
> }
>
> int rq_iowait(struct rq *rq)
> {
> - return atomic_read(&rq->nr_iowait);
> + return atomic_long_read(&rq->nr_iowait);
> }
>
> static void
> @@ -8372,7 +8372,7 @@ void __init sched_init(void)
> #endif
> #endif /* CONFIG_SMP */
> hrtick_rq_init(rq);
> - atomic_set(&rq->nr_iowait, 0);
> + atomic_long_set(&rq->nr_iowait, 0);
> fair_server_init(rq);
>
> #ifdef CONFIG_SCHED_CORE
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index b6b3b565bcb1..da2e67621f39 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -1082,7 +1082,7 @@ struct rq {
> u64 clock_idle_copy;
> #endif
>
> - atomic_t nr_iowait;
> + atomic_long_t nr_iowait;>
> #ifdef CONFIG_SCHED_DEBUG
> u64 last_seen_need_resched_ns;
© 2016 - 2026 Red Hat, Inc.