kernel/sched/core.c | 8 +++++--- kernel/sched/fair.c | 16 +++++++++------- kernel/sched/sched.h | 22 ++++++++++------------ 3 files changed, 24 insertions(+), 22 deletions(-)
With a modified container_of() that preserves constness, the compiler
finds some pointers which should have been marked as const. task_of()
also needs to become const-preserving for the !FAIR_GROUP_SCHED case so
that cfs_rq_of() can take a const argument. No change to generated code.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
v2: Also update the !FAIR_GROUP_SCHED case
kernel/sched/core.c | 8 +++++---
kernel/sched/fair.c | 16 +++++++++-------
kernel/sched/sched.h | 22 ++++++++++------------
3 files changed, 24 insertions(+), 22 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 25b582b6ee5f..853188cb6c84 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -152,7 +152,7 @@ __read_mostly int scheduler_running;
DEFINE_STATIC_KEY_FALSE(__sched_core_enabled);
/* kernel prio, less is more */
-static inline int __task_prio(struct task_struct *p)
+static inline int __task_prio(const struct task_struct *p)
{
if (p->sched_class == &stop_sched_class) /* trumps deadline */
return -2;
@@ -174,7 +174,8 @@ static inline int __task_prio(struct task_struct *p)
*/
/* real prio, less is less */
-static inline bool prio_less(struct task_struct *a, struct task_struct *b, bool in_fi)
+static inline bool prio_less(const struct task_struct *a,
+ const struct task_struct *b, bool in_fi)
{
int pa = __task_prio(a), pb = __task_prio(b);
@@ -194,7 +195,8 @@ static inline bool prio_less(struct task_struct *a, struct task_struct *b, bool
return false;
}
-static inline bool __sched_core_less(struct task_struct *a, struct task_struct *b)
+static inline bool __sched_core_less(const struct task_struct *a,
+ const struct task_struct *b)
{
if (a->core_cookie < b->core_cookie)
return true;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c36aa54ae071..855470310903 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -468,7 +468,7 @@ is_same_group(struct sched_entity *se, struct sched_entity *pse)
return NULL;
}
-static inline struct sched_entity *parent_entity(struct sched_entity *se)
+static inline struct sched_entity *parent_entity(const struct sched_entity *se)
{
return se->parent;
}
@@ -595,8 +595,8 @@ static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
return min_vruntime;
}
-static inline bool entity_before(struct sched_entity *a,
- struct sched_entity *b)
+static inline bool entity_before(const struct sched_entity *a,
+ const struct sched_entity *b)
{
return (s64)(a->vruntime - b->vruntime) < 0;
}
@@ -11728,7 +11728,8 @@ static inline void task_tick_core(struct rq *rq, struct task_struct *curr)
/*
* se_fi_update - Update the cfs_rq->min_vruntime_fi in a CFS hierarchy if needed.
*/
-static void se_fi_update(struct sched_entity *se, unsigned int fi_seq, bool forceidle)
+static void se_fi_update(const struct sched_entity *se, unsigned int fi_seq,
+ bool forceidle)
{
for_each_sched_entity(se) {
struct cfs_rq *cfs_rq = cfs_rq_of(se);
@@ -11753,11 +11754,12 @@ void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi)
se_fi_update(se, rq->core->core_forceidle_seq, in_fi);
}
-bool cfs_prio_less(struct task_struct *a, struct task_struct *b, bool in_fi)
+bool cfs_prio_less(const struct task_struct *a, const struct task_struct *b,
+ bool in_fi)
{
struct rq *rq = task_rq(a);
- struct sched_entity *sea = &a->se;
- struct sched_entity *seb = &b->se;
+ const struct sched_entity *sea = &a->se;
+ const struct sched_entity *seb = &b->se;
struct cfs_rq *cfs_rqa;
struct cfs_rq *cfs_rqb;
s64 delta;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 771f8ddb7053..cdf9f248e5bd 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -248,7 +248,7 @@ static inline void update_avg(u64 *avg, u64 sample)
#define SCHED_DL_FLAGS (SCHED_FLAG_RECLAIM | SCHED_FLAG_DL_OVERRUN | SCHED_FLAG_SUGOV)
-static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se)
+static inline bool dl_entity_is_special(const struct sched_dl_entity *dl_se)
{
#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
return unlikely(dl_se->flags & SCHED_FLAG_SUGOV);
@@ -260,8 +260,8 @@ static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se)
/*
* Tells if entity @a should preempt entity @b.
*/
-static inline bool
-dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
+static inline bool dl_entity_preempt(const struct sched_dl_entity *a,
+ const struct sched_dl_entity *b)
{
return dl_entity_is_special(a) ||
dl_time_before(a->deadline, b->deadline);
@@ -1236,7 +1236,8 @@ static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
return &rq->__lock;
}
-bool cfs_prio_less(struct task_struct *a, struct task_struct *b, bool fi);
+bool cfs_prio_less(const struct task_struct *a, const struct task_struct *b,
+ bool fi);
/*
* Helpers to check if the CPU's core cookie matches with the task's cookie
@@ -1415,7 +1416,7 @@ static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
}
/* runqueue on which this entity is (to be) queued */
-static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
+static inline struct cfs_rq *cfs_rq_of(const struct sched_entity *se)
{
return se->cfs_rq;
}
@@ -1428,19 +1429,16 @@ static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
#else
-static inline struct task_struct *task_of(struct sched_entity *se)
-{
- return container_of(se, struct task_struct, se);
-}
+#define task_of(_se) container_of(_se, struct task_struct, se)
-static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
+static inline struct cfs_rq *task_cfs_rq(const struct task_struct *p)
{
return &task_rq(p)->cfs;
}
-static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
+static inline struct cfs_rq *cfs_rq_of(const struct sched_entity *se)
{
- struct task_struct *p = task_of(se);
+ const struct task_struct *p = task_of(se);
struct rq *rq = task_rq(p);
return &rq->cfs;
--
2.35.1
On Mon, Dec 12, 2022 at 02:49:46PM +0000, Matthew Wilcox (Oracle) wrote: > With a modified container_of() that preserves constness, the compiler > finds some pointers which should have been marked as const. task_of() > also needs to become const-preserving for the !FAIR_GROUP_SCHED case so > that cfs_rq_of() can take a const argument. No change to generated code. More const more better I suppose.. Thanks! Happen to have a sha for the container_of() commit handy?
On Wed, Dec 14, 2022 at 10:11:01AM +0100, Peter Zijlstra wrote: > On Mon, Dec 12, 2022 at 02:49:46PM +0000, Matthew Wilcox (Oracle) wrote: > > With a modified container_of() that preserves constness, the compiler > > finds some pointers which should have been marked as const. task_of() > > also needs to become const-preserving for the !FAIR_GROUP_SCHED case so > > that cfs_rq_of() can take a const argument. No change to generated code. > > More const more better I suppose.. Thanks! > > Happen to have a sha for the container_of() commit handy? There isn't one yet. Obviously we can't make container_of() const-preserving until we've fixed all the places which would warn. The diff I have in my tree looks like this: diff --git a/include/linux/container_of.h b/include/linux/container_of.h index 1d898f9158b4..9416e6cc8c88 100644 --- a/include/linux/container_of.h +++ b/include/linux/container_of.h @@ -20,7 +20,10 @@ static_assert(__same_type(*(ptr), ((type *)0)->member) || \ __same_type(*(ptr), void), \ "pointer type mismatch in container_of()"); \ - ((type *)(__mptr - offsetof(type, member))); }) + __mptr -= offsetof(type, member); \ + _Generic(ptr, \ + const typeof(*(ptr)) *: (const type *)__mptr, \ + default: ((type *)__mptr)); }) /** * container_of_const - cast a member of a structure out to the containing I have all of fs/ and net/ compiling cleanly now. There are a few places which really need the const-removing properties, and I've made those call a new macro called container_of_not_const(), but I don't like that name.
On Wed, Dec 14, 2022 at 06:03:04PM +0000, Matthew Wilcox wrote: > On Wed, Dec 14, 2022 at 10:11:01AM +0100, Peter Zijlstra wrote: > > On Mon, Dec 12, 2022 at 02:49:46PM +0000, Matthew Wilcox (Oracle) wrote: > > > With a modified container_of() that preserves constness, the compiler > > > finds some pointers which should have been marked as const. task_of() > > > also needs to become const-preserving for the !FAIR_GROUP_SCHED case so > > > that cfs_rq_of() can take a const argument. No change to generated code. > > > > More const more better I suppose.. Thanks! > > > > Happen to have a sha for the container_of() commit handy? > > There isn't one yet. Obviously we can't make container_of() > const-preserving until we've fixed all the places which would warn. > The diff I have in my tree looks like this: > > diff --git a/include/linux/container_of.h b/include/linux/container_of.h > index 1d898f9158b4..9416e6cc8c88 100644 > --- a/include/linux/container_of.h > +++ b/include/linux/container_of.h > @@ -20,7 +20,10 @@ > static_assert(__same_type(*(ptr), ((type *)0)->member) || \ > __same_type(*(ptr), void), \ > "pointer type mismatch in container_of()"); \ > - ((type *)(__mptr - offsetof(type, member))); }) > + __mptr -= offsetof(type, member); \ > + _Generic(ptr, \ > + const typeof(*(ptr)) *: (const type *)__mptr, \ > + default: ((type *)__mptr)); }) > > /** > * container_of_const - cast a member of a structure out to the containing > > I have all of fs/ and net/ compiling cleanly now. There are a few > places which really need the const-removing properties, and I've made > those call a new macro called container_of_not_const(), but I don't > like that name. #define const_cast(T, exp) _Generic((exp), const T : (T)(exp), default: (exp)) perhaps? Then one can write something like: struct task_struct *p = const_cast(struct task_struct *, constainer_of(node, struct task_struct *, run_node)); The repetition is a bit naf, but at least the construct is more generally useful. (I really wish there was a qualifier stripping typeof() variant -- and yes, I know about the _Atomic thing).
On Wed, Dec 14, 2022 at 06:03:04PM +0000, Matthew Wilcox wrote: > On Wed, Dec 14, 2022 at 10:11:01AM +0100, Peter Zijlstra wrote: > > On Mon, Dec 12, 2022 at 02:49:46PM +0000, Matthew Wilcox (Oracle) wrote: > > > With a modified container_of() that preserves constness, the compiler > > > finds some pointers which should have been marked as const. task_of() > > > also needs to become const-preserving for the !FAIR_GROUP_SCHED case so > > > that cfs_rq_of() can take a const argument. No change to generated code. > > > > More const more better I suppose.. Thanks! > > > > Happen to have a sha for the container_of() commit handy? > > There isn't one yet. Obviously we can't make container_of() > const-preserving until we've fixed all the places which would warn. > The diff I have in my tree looks like this: > > diff --git a/include/linux/container_of.h b/include/linux/container_of.h > index 1d898f9158b4..9416e6cc8c88 100644 > --- a/include/linux/container_of.h > +++ b/include/linux/container_of.h > @@ -20,7 +20,10 @@ > static_assert(__same_type(*(ptr), ((type *)0)->member) || \ > __same_type(*(ptr), void), \ > "pointer type mismatch in container_of()"); \ > - ((type *)(__mptr - offsetof(type, member))); }) > + __mptr -= offsetof(type, member); \ > + _Generic(ptr, \ > + const typeof(*(ptr)) *: (const type *)__mptr, \ > + default: ((type *)__mptr)); }) Ah nice. Thanks!
The following commit has been merged into the sched/core branch of tip:
Commit-ID: 904cbab71dda1689d41a240541179f21ff433c40
Gitweb: https://git.kernel.org/tip/904cbab71dda1689d41a240541179f21ff433c40
Author: Matthew Wilcox (Oracle) <willy@infradead.org>
AuthorDate: Mon, 12 Dec 2022 14:49:46
Committer: Peter Zijlstra <peterz@infradead.org>
CommitterDate: Tue, 27 Dec 2022 12:52:16 +01:00
sched: Make const-safe
With a modified container_of() that preserves constness, the compiler
finds some pointers which should have been marked as const. task_of()
also needs to become const-preserving for the !FAIR_GROUP_SCHED case so
that cfs_rq_of() can take a const argument. No change to generated code.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20221212144946.2657785-1-willy@infradead.org
---
kernel/sched/core.c | 8 +++++---
kernel/sched/fair.c | 16 +++++++++-------
kernel/sched/sched.h | 22 ++++++++++------------
3 files changed, 24 insertions(+), 22 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 75830b7..1f3259c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -152,7 +152,7 @@ __read_mostly int scheduler_running;
DEFINE_STATIC_KEY_FALSE(__sched_core_enabled);
/* kernel prio, less is more */
-static inline int __task_prio(struct task_struct *p)
+static inline int __task_prio(const struct task_struct *p)
{
if (p->sched_class == &stop_sched_class) /* trumps deadline */
return -2;
@@ -174,7 +174,8 @@ static inline int __task_prio(struct task_struct *p)
*/
/* real prio, less is less */
-static inline bool prio_less(struct task_struct *a, struct task_struct *b, bool in_fi)
+static inline bool prio_less(const struct task_struct *a,
+ const struct task_struct *b, bool in_fi)
{
int pa = __task_prio(a), pb = __task_prio(b);
@@ -194,7 +195,8 @@ static inline bool prio_less(struct task_struct *a, struct task_struct *b, bool
return false;
}
-static inline bool __sched_core_less(struct task_struct *a, struct task_struct *b)
+static inline bool __sched_core_less(const struct task_struct *a,
+ const struct task_struct *b)
{
if (a->core_cookie < b->core_cookie)
return true;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index ea81d48..d0e2a48 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -468,7 +468,7 @@ is_same_group(struct sched_entity *se, struct sched_entity *pse)
return NULL;
}
-static inline struct sched_entity *parent_entity(struct sched_entity *se)
+static inline struct sched_entity *parent_entity(const struct sched_entity *se)
{
return se->parent;
}
@@ -595,8 +595,8 @@ static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
return min_vruntime;
}
-static inline bool entity_before(struct sched_entity *a,
- struct sched_entity *b)
+static inline bool entity_before(const struct sched_entity *a,
+ const struct sched_entity *b)
{
return (s64)(a->vruntime - b->vruntime) < 0;
}
@@ -11852,7 +11852,8 @@ static inline void task_tick_core(struct rq *rq, struct task_struct *curr)
/*
* se_fi_update - Update the cfs_rq->min_vruntime_fi in a CFS hierarchy if needed.
*/
-static void se_fi_update(struct sched_entity *se, unsigned int fi_seq, bool forceidle)
+static void se_fi_update(const struct sched_entity *se, unsigned int fi_seq,
+ bool forceidle)
{
for_each_sched_entity(se) {
struct cfs_rq *cfs_rq = cfs_rq_of(se);
@@ -11877,11 +11878,12 @@ void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi)
se_fi_update(se, rq->core->core_forceidle_seq, in_fi);
}
-bool cfs_prio_less(struct task_struct *a, struct task_struct *b, bool in_fi)
+bool cfs_prio_less(const struct task_struct *a, const struct task_struct *b,
+ bool in_fi)
{
struct rq *rq = task_rq(a);
- struct sched_entity *sea = &a->se;
- struct sched_entity *seb = &b->se;
+ const struct sched_entity *sea = &a->se;
+ const struct sched_entity *seb = &b->se;
struct cfs_rq *cfs_rqa;
struct cfs_rq *cfs_rqb;
s64 delta;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c2d7467..1072502 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -248,7 +248,7 @@ static inline void update_avg(u64 *avg, u64 sample)
#define SCHED_DL_FLAGS (SCHED_FLAG_RECLAIM | SCHED_FLAG_DL_OVERRUN | SCHED_FLAG_SUGOV)
-static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se)
+static inline bool dl_entity_is_special(const struct sched_dl_entity *dl_se)
{
#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
return unlikely(dl_se->flags & SCHED_FLAG_SUGOV);
@@ -260,8 +260,8 @@ static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se)
/*
* Tells if entity @a should preempt entity @b.
*/
-static inline bool
-dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
+static inline bool dl_entity_preempt(const struct sched_dl_entity *a,
+ const struct sched_dl_entity *b)
{
return dl_entity_is_special(a) ||
dl_time_before(a->deadline, b->deadline);
@@ -1244,7 +1244,8 @@ static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
return &rq->__lock;
}
-bool cfs_prio_less(struct task_struct *a, struct task_struct *b, bool fi);
+bool cfs_prio_less(const struct task_struct *a, const struct task_struct *b,
+ bool fi);
/*
* Helpers to check if the CPU's core cookie matches with the task's cookie
@@ -1423,7 +1424,7 @@ static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
}
/* runqueue on which this entity is (to be) queued */
-static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
+static inline struct cfs_rq *cfs_rq_of(const struct sched_entity *se)
{
return se->cfs_rq;
}
@@ -1436,19 +1437,16 @@ static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
#else
-static inline struct task_struct *task_of(struct sched_entity *se)
-{
- return container_of(se, struct task_struct, se);
-}
+#define task_of(_se) container_of(_se, struct task_struct, se)
-static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
+static inline struct cfs_rq *task_cfs_rq(const struct task_struct *p)
{
return &task_rq(p)->cfs;
}
-static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
+static inline struct cfs_rq *cfs_rq_of(const struct sched_entity *se)
{
- struct task_struct *p = task_of(se);
+ const struct task_struct *p = task_of(se);
struct rq *rq = task_rq(p);
return &rq->cfs;
© 2016 - 2025 Red Hat, Inc.