kernel/sched/core.c | 48 +++++++++++++++++++++++++++++------------------- kernel/sched/sched.h | 5 +++++ 2 files changed, 34 insertions(+), 19 deletions(-)
On Thu, 2024-11-07 at 15:09 +0100, Peter Zijlstra wrote:
> On Thu, Nov 07, 2024 at 03:02:36PM +0100, Mike Galbraith wrote:
> > On Thu, 2024-11-07 at 10:46 +0100, Mike Galbraith wrote:
> > > On Thu, 2024-11-07 at 05:03 +0100, Mike Galbraith wrote:
> > > >
> > > > I built that patch out of curiosity, and yeah, set_next_task_fair()
> > > > finding a cfs_rq->curr ends play time pretty quickly.
> > >
> > > The below improved uptime, and trace_printk() says it's doing the
> > > intended, so I suppose I'll add a feature and see what falls out.
> >
> > From netperf, I got.. number tabulation practice. Three runs of each
> > test with and without produced nothing but variance/noise.
>
> Make it go away then.
>
> If you could write a Changelog for you inspired bit and stick my cleaned
> up version under it, I'd be much obliged.
Salut, much obliged for eyeball relief.
---snip---
Phil Auld (Redhat) reported an fio benchmark regression having been found
to have been caused by addition of the DELAY_DEQUEUE feature, suggested it
may be related to wakees losing the ability to migrate, and confirmed that
restoration of same indeed did restore previous performance.
(de-uglified-a-lot-by)
Reported-by: Phil Auld <pauld@redhat.com>
Fixes: 152e11f6df29 ("sched/fair: Implement delayed dequeue")
Link: https://lore.kernel.org/lkml/20241101124715.GA689589@pauld.westford.csb/
Signed-off-by: Mike Galbraith <efault@gmx.de>
---
kernel/sched/core.c | 48 +++++++++++++++++++++++++++++-------------------
kernel/sched/sched.h | 5 +++++
2 files changed, 34 insertions(+), 19 deletions(-)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3783,28 +3783,38 @@ ttwu_do_activate(struct rq *rq, struct t
*/
static int ttwu_runnable(struct task_struct *p, int wake_flags)
{
- struct rq_flags rf;
- struct rq *rq;
- int ret = 0;
-
- rq = __task_rq_lock(p, &rf);
- if (task_on_rq_queued(p)) {
- update_rq_clock(rq);
- if (p->se.sched_delayed)
- enqueue_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_DELAYED);
- if (!task_on_cpu(rq, p)) {
- /*
- * When on_rq && !on_cpu the task is preempted, see if
- * it should preempt the task that is current now.
- */
- wakeup_preempt(rq, p, wake_flags);
+ CLASS(__task_rq_lock, rq_guard)(p);
+ struct rq *rq = rq_guard.rq;
+
+ if (!task_on_rq_queued(p))
+ return 0;
+
+ update_rq_clock(rq);
+ if (p->se.sched_delayed) {
+ int queue_flags = ENQUEUE_DELAYED | ENQUEUE_NOCLOCK;
+
+ /*
+ * Since sched_delayed means we cannot be current anywhere,
+ * dequeue it here and have it fall through to the
+ * select_task_rq() case further along the ttwu() path.
+ */
+ if (rq->nr_running > 1 && p->nr_cpus_allowed > 1) {
+ dequeue_task(rq, p, DEQUEUE_SLEEP | queue_flags);
+ return 0;
}
- ttwu_do_wakeup(p);
- ret = 1;
+
+ enqueue_task(rq, p, queue_flags);
+ }
+ if (!task_on_cpu(rq, p)) {
+ /*
+ * When on_rq && !on_cpu the task is preempted, see if
+ * it should preempt the task that is current now.
+ */
+ wakeup_preempt(rq, p, wake_flags);
}
- __task_rq_unlock(rq, &rf);
+ ttwu_do_wakeup(p);
- return ret;
+ return 1;
}
#ifdef CONFIG_SMP
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1779,6 +1779,11 @@ task_rq_unlock(struct rq *rq, struct tas
raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
}
+DEFINE_LOCK_GUARD_1(__task_rq_lock, struct task_struct,
+ _T->rq = __task_rq_lock(_T->lock, &_T->rf),
+ __task_rq_unlock(_T->rq, &_T->rf),
+ struct rq *rq; struct rq_flags rf)
+
DEFINE_LOCK_GUARD_1(task_rq_lock, struct task_struct,
_T->rq = task_rq_lock(_T->lock, &_T->rf),
task_rq_unlock(_T->rq, _T->lock, &_T->rf),
On Fri, 2024-11-08 at 01:24 +0100, Mike Galbraith wrote: > On Thu, 2024-11-07 at 15:09 +0100, Peter Zijlstra wrote: > > On Thu, Nov 07, 2024 at 03:02:36PM +0100, Mike Galbraith wrote: > > > On Thu, 2024-11-07 at 10:46 +0100, Mike Galbraith wrote: > > > > On Thu, 2024-11-07 at 05:03 +0100, Mike Galbraith wrote: > > > > > > > > > > I built that patch out of curiosity, and yeah, set_next_task_fair() > > > > > finding a cfs_rq->curr ends play time pretty quickly. > > > > > > > > The below improved uptime, and trace_printk() says it's doing the > > > > intended, so I suppose I'll add a feature and see what falls out. > > > > > > From netperf, I got.. number tabulation practice. Three runs of each > > > test with and without produced nothing but variance/noise. > > > > Make it go away then. > > > > If you could write a Changelog for you inspired bit and stick my cleaned > > up version under it, I'd be much obliged. > > Salut, much obliged for eyeball relief. Unfortunate change log place holder below aside, I think this patch may need to be yanked as trading one not readily repeatable regression for at least one that definitely is, and likely multiple others. (adds knob) tbench 8 NO_MIGRATE_DELAYED 3613.49 MB/sec MIGRATE_DELAYED 3145.59 MB/sec NO_DELAY_DEQUEUE 3355.42 MB/sec First line is DELAY_DEQUEUE restoring pre-EEVDF tbench throughput as I've mentioned it doing, but $subject promptly did away with that and then some. I thought I might be able to do away with the reservation like side effect of DELAY_DEQUEUE by borrowing h_nr_delayed from... sched/eevdf: More PELT vs DELAYED_DEQUEUE ...for cgroups free test config, but Q/D poke at idle_cpu() helped not at all. > ---snip--- > > Phil Auld (Redhat) reported an fio benchmark regression having been found > to have been caused by addition of the DELAY_DEQUEUE feature, suggested it > may be related to wakees losing the ability to migrate, and confirmed that > restoration of same indeed did restore previous performance. > > (de-uglified-a-lot-by) > > Reported-by: Phil Auld <pauld@redhat.com> > Fixes: 152e11f6df29 ("sched/fair: Implement delayed dequeue") > Link: https://lore.kernel.org/lkml/20241101124715.GA689589@pauld.westford.csb/ > Signed-off-by: Mike Galbraith <efault@gmx.de> > --- > kernel/sched/core.c | 48 +++++++++++++++++++++++++++++------------------- > kernel/sched/sched.h | 5 +++++ > 2 files changed, 34 insertions(+), 19 deletions(-) > > --- a/kernel/sched/core.c > +++ b/kernel/sched/core.c > @@ -3783,28 +3783,38 @@ ttwu_do_activate(struct rq *rq, struct t > */ > static int ttwu_runnable(struct task_struct *p, int wake_flags) > { > - struct rq_flags rf; > - struct rq *rq; > - int ret = 0; > - > - rq = __task_rq_lock(p, &rf); > - if (task_on_rq_queued(p)) { > - update_rq_clock(rq); > - if (p->se.sched_delayed) > - enqueue_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_DELAYED); > - if (!task_on_cpu(rq, p)) { > - /* > - * When on_rq && !on_cpu the task is preempted, see if > - * it should preempt the task that is current now. > - */ > - wakeup_preempt(rq, p, wake_flags); > + CLASS(__task_rq_lock, rq_guard)(p); > + struct rq *rq = rq_guard.rq; > + > + if (!task_on_rq_queued(p)) > + return 0; > + > + update_rq_clock(rq); > + if (p->se.sched_delayed) { > + int queue_flags = ENQUEUE_DELAYED | ENQUEUE_NOCLOCK; > + > + /* > + * Since sched_delayed means we cannot be current anywhere, > + * dequeue it here and have it fall through to the > + * select_task_rq() case further along the ttwu() path. > + */ > + if (rq->nr_running > 1 && p->nr_cpus_allowed > 1) { > + dequeue_task(rq, p, DEQUEUE_SLEEP | queue_flags); > + return 0; > } > - ttwu_do_wakeup(p); > - ret = 1; > + > + enqueue_task(rq, p, queue_flags); > + } > + if (!task_on_cpu(rq, p)) { > + /* > + * When on_rq && !on_cpu the task is preempted, see if > + * it should preempt the task that is current now. > + */ > + wakeup_preempt(rq, p, wake_flags); > } > - __task_rq_unlock(rq, &rf); > + ttwu_do_wakeup(p); > > - return ret; > + return 1; > } > > #ifdef CONFIG_SMP > --- a/kernel/sched/sched.h > +++ b/kernel/sched/sched.h > @@ -1779,6 +1779,11 @@ task_rq_unlock(struct rq *rq, struct tas > raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); > } > > +DEFINE_LOCK_GUARD_1(__task_rq_lock, struct task_struct, > + _T->rq = __task_rq_lock(_T->lock, &_T->rf), > + __task_rq_unlock(_T->rq, &_T->rf), > + struct rq *rq; struct rq_flags rf) > + > DEFINE_LOCK_GUARD_1(task_rq_lock, struct task_struct, > _T->rq = task_rq_lock(_T->lock, &_T->rf), > task_rq_unlock(_T->rq, _T->lock, &_T->rf), > >
On Tue, Nov 12, 2024 at 08:05:04AM +0100 Mike Galbraith wrote: > On Fri, 2024-11-08 at 01:24 +0100, Mike Galbraith wrote: > > On Thu, 2024-11-07 at 15:09 +0100, Peter Zijlstra wrote: > > > On Thu, Nov 07, 2024 at 03:02:36PM +0100, Mike Galbraith wrote: > > > > On Thu, 2024-11-07 at 10:46 +0100, Mike Galbraith wrote: > > > > > On Thu, 2024-11-07 at 05:03 +0100, Mike Galbraith wrote: > > > > > > > > > > > > I built that patch out of curiosity, and yeah, set_next_task_fair() > > > > > > finding a cfs_rq->curr ends play time pretty quickly. > > > > > > > > > > The below improved uptime, and trace_printk() says it's doing the > > > > > intended, so I suppose I'll add a feature and see what falls out. > > > > > > > > From netperf, I got.. number tabulation practice. Three runs of each > > > > test with and without produced nothing but variance/noise. > > > > > > Make it go away then. > > > > > > If you could write a Changelog for you inspired bit and stick my cleaned > > > up version under it, I'd be much obliged. > > > > Salut, much obliged for eyeball relief. > > Unfortunate change log place holder below aside, I think this patch may > need to be yanked as trading one not readily repeatable regression for > at least one that definitely is, and likely multiple others. > > (adds knob) > Yes, I ws just coming here to reply. I have the results from the first version of the patch (I don't think the later one fundemtally changed enough that it will matter but those results are still pending). Not entirely surprisingly we've traded a ~10% rand write regression for 5-10% rand read regression. This makes sense to me since the reads are more likely to be synchronous and thus be more buddy-like and benefit from flipping back and forth on the same cpu. I'd probably have to take the reads over the writes in such a trade off :) > tbench 8 > > NO_MIGRATE_DELAYED 3613.49 MB/sec > MIGRATE_DELAYED 3145.59 MB/sec > NO_DELAY_DEQUEUE 3355.42 MB/sec > > First line is DELAY_DEQUEUE restoring pre-EEVDF tbench throughput as > I've mentioned it doing, but $subject promptly did away with that and > then some. > Yep, that's not pretty. > I thought I might be able to do away with the reservation like side > effect of DELAY_DEQUEUE by borrowing h_nr_delayed from... > > sched/eevdf: More PELT vs DELAYED_DEQUEUE > > ...for cgroups free test config, but Q/D poke at idle_cpu() helped not > at all. > I wonder if the last_wakee stuff could be leveraged here (an idle thought, so to speak). Haven't looked closely enough. Cheers, Phil > > ---snip--- > > > > Phil Auld (Redhat) reported an fio benchmark regression having been found > > to have been caused by addition of the DELAY_DEQUEUE feature, suggested it > > may be related to wakees losing the ability to migrate, and confirmed that > > restoration of same indeed did restore previous performance. > > > > (de-uglified-a-lot-by) > > > > Reported-by: Phil Auld <pauld@redhat.com> > > Fixes: 152e11f6df29 ("sched/fair: Implement delayed dequeue") > > Link: https://lore.kernel.org/lkml/20241101124715.GA689589@pauld.westford.csb/ > > Signed-off-by: Mike Galbraith <efault@gmx.de> > > --- > > kernel/sched/core.c | 48 +++++++++++++++++++++++++++++------------------- > > kernel/sched/sched.h | 5 +++++ > > 2 files changed, 34 insertions(+), 19 deletions(-) > > > > --- a/kernel/sched/core.c > > +++ b/kernel/sched/core.c > > @@ -3783,28 +3783,38 @@ ttwu_do_activate(struct rq *rq, struct t > > */ > > static int ttwu_runnable(struct task_struct *p, int wake_flags) > > { > > - struct rq_flags rf; > > - struct rq *rq; > > - int ret = 0; > > - > > - rq = __task_rq_lock(p, &rf); > > - if (task_on_rq_queued(p)) { > > - update_rq_clock(rq); > > - if (p->se.sched_delayed) > > - enqueue_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_DELAYED); > > - if (!task_on_cpu(rq, p)) { > > - /* > > - * When on_rq && !on_cpu the task is preempted, see if > > - * it should preempt the task that is current now. > > - */ > > - wakeup_preempt(rq, p, wake_flags); > > + CLASS(__task_rq_lock, rq_guard)(p); > > + struct rq *rq = rq_guard.rq; > > + > > + if (!task_on_rq_queued(p)) > > + return 0; > > + > > + update_rq_clock(rq); > > + if (p->se.sched_delayed) { > > + int queue_flags = ENQUEUE_DELAYED | ENQUEUE_NOCLOCK; > > + > > + /* > > + * Since sched_delayed means we cannot be current anywhere, > > + * dequeue it here and have it fall through to the > > + * select_task_rq() case further along the ttwu() path. > > + */ > > + if (rq->nr_running > 1 && p->nr_cpus_allowed > 1) { > > + dequeue_task(rq, p, DEQUEUE_SLEEP | queue_flags); > > + return 0; > > } > > - ttwu_do_wakeup(p); > > - ret = 1; > > + > > + enqueue_task(rq, p, queue_flags); > > + } > > + if (!task_on_cpu(rq, p)) { > > + /* > > + * When on_rq && !on_cpu the task is preempted, see if > > + * it should preempt the task that is current now. > > + */ > > + wakeup_preempt(rq, p, wake_flags); > > } > > - __task_rq_unlock(rq, &rf); > > + ttwu_do_wakeup(p); > > > > - return ret; > > + return 1; > > } > > > > #ifdef CONFIG_SMP > > --- a/kernel/sched/sched.h > > +++ b/kernel/sched/sched.h > > @@ -1779,6 +1779,11 @@ task_rq_unlock(struct rq *rq, struct tas > > raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); > > } > > > > +DEFINE_LOCK_GUARD_1(__task_rq_lock, struct task_struct, > > + _T->rq = __task_rq_lock(_T->lock, &_T->rf), > > + __task_rq_unlock(_T->rq, &_T->rf), > > + struct rq *rq; struct rq_flags rf) > > + > > DEFINE_LOCK_GUARD_1(task_rq_lock, struct task_struct, > > _T->rq = task_rq_lock(_T->lock, &_T->rf), > > task_rq_unlock(_T->rq, _T->lock, &_T->rf), > > > > > --
On Tue, Nov 12, 2024 at 07:41:17AM -0500, Phil Auld wrote: > On Tue, Nov 12, 2024 at 08:05:04AM +0100 Mike Galbraith wrote: > > On Fri, 2024-11-08 at 01:24 +0100, Mike Galbraith wrote: > > > On Thu, 2024-11-07 at 15:09 +0100, Peter Zijlstra wrote: > > > > On Thu, Nov 07, 2024 at 03:02:36PM +0100, Mike Galbraith wrote: > > > > > On Thu, 2024-11-07 at 10:46 +0100, Mike Galbraith wrote: > > > > > > On Thu, 2024-11-07 at 05:03 +0100, Mike Galbraith wrote: > > > > > > > > > > > > > > I built that patch out of curiosity, and yeah, set_next_task_fair() > > > > > > > finding a cfs_rq->curr ends play time pretty quickly. > > > > > > > > > > > > The below improved uptime, and trace_printk() says it's doing the > > > > > > intended, so I suppose I'll add a feature and see what falls out. > > > > > > > > > > From netperf, I got.. number tabulation practice. Three runs of each > > > > > test with and without produced nothing but variance/noise. > > > > > > > > Make it go away then. > > > > > > > > If you could write a Changelog for you inspired bit and stick my cleaned > > > > up version under it, I'd be much obliged. > > > > > > Salut, much obliged for eyeball relief. > > > > Unfortunate change log place holder below aside, I think this patch may > > need to be yanked as trading one not readily repeatable regression for > > at least one that definitely is, and likely multiple others. > > > > (adds knob) > > > > Yes, I ws just coming here to reply. I have the results from the first > version of the patch (I don't think the later one fundemtally changed > enough that it will matter but those results are still pending). > > Not entirely surprisingly we've traded a ~10% rand write regression for > 5-10% rand read regression. This makes sense to me since the reads are > more likely to be synchronous and thus be more buddy-like and benefit > from flipping back and forth on the same cpu. OK, so I'm going to make this commit disappear.
On Tue, 2024-11-12 at 07:41 -0500, Phil Auld wrote: > On Tue, Nov 12, 2024 at 08:05:04AM +0100 Mike Galbraith wrote: > > > > Unfortunate change log place holder below aside, I think this patch may > > need to be yanked as trading one not readily repeatable regression for > > at least one that definitely is, and likely multiple others. > > > > (adds knob) > > > > Yes, I ws just coming here to reply. I have the results from the first > version of the patch (I don't think the later one fundemtally changed > enough that it will matter but those results are still pending). > > Not entirely surprisingly we've traded a ~10% rand write regression for > 5-10% rand read regression. This makes sense to me since the reads are > more likely to be synchronous and thus be more buddy-like and benefit > from flipping back and forth on the same cpu. Ok, that would seem to second "shoot it". > I'd probably have to take the reads over the writes in such a trade off :) > > > tbench 8 > > > > NO_MIGRATE_DELAYED 3613.49 MB/sec > > MIGRATE_DELAYED 3145.59 MB/sec > > NO_DELAY_DEQUEUE 3355.42 MB/sec > > > > First line is DELAY_DEQUEUE restoring pre-EEVDF tbench throughput as > > I've mentioned it doing, but $subject promptly did away with that and > > then some. > > > > Yep, that's not pretty. Yeah, not to mention annoying. I get the "adds bounce cache pain" aspect, but not why pre-EEVDF wouldn't be just as heavily affected, it having nothing blocking high frequency migration (the eternal scheduler boogieman:). Bottom line would appear to be that these survivors should be left where they ended up, either due to LB or more likely bog standard prev_cpu locality, for they are part and parcel of a progression. > > I thought I might be able to do away with the reservation like side > > effect of DELAY_DEQUEUE by borrowing h_nr_delayed from... > > > > sched/eevdf: More PELT vs DELAYED_DEQUEUE > > > > ...for cgroups free test config, but Q/D poke at idle_cpu() helped not > > at all. We don't however have to let sched_delayed block SIS though. Rendering them transparent in idle_cpu() did NOT wreck the progression, so maaaybe could help your regression. > I wonder if the last_wakee stuff could be leveraged here (an idle thought, > so to speak). Haven't looked closely enough. If you mean heuristics, the less of those we have, the better off we are.. they _always_ find a way to embed their teeth in your backside. -Mike
On Tue, Nov 12, 2024 at 03:23:38PM +0100 Mike Galbraith wrote: > On Tue, 2024-11-12 at 07:41 -0500, Phil Auld wrote: > > On Tue, Nov 12, 2024 at 08:05:04AM +0100 Mike Galbraith wrote: > > > > > > Unfortunate change log place holder below aside, I think this patch may > > > need to be yanked as trading one not readily repeatable regression for > > > at least one that definitely is, and likely multiple others. > > > > > > (adds knob) > > > > > > > Yes, I ws just coming here to reply. I have the results from the first > > version of the patch (I don't think the later one fundemtally changed > > enough that it will matter but those results are still pending). > > > > Not entirely surprisingly we've traded a ~10% rand write regression for > > 5-10% rand read regression. This makes sense to me since the reads are > > more likely to be synchronous and thus be more buddy-like and benefit > > from flipping back and forth on the same cpu. > > Ok, that would seem to second "shoot it". > Yes, drop it please, I think. Thanks! > > I'd probably have to take the reads over the writes in such a trade off :) > > > > > tbench 8 > > > > > > NO_MIGRATE_DELAYED 3613.49 MB/sec > > > MIGRATE_DELAYED 3145.59 MB/sec > > > NO_DELAY_DEQUEUE 3355.42 MB/sec > > > > > > First line is DELAY_DEQUEUE restoring pre-EEVDF tbench throughput as > > > I've mentioned it doing, but $subject promptly did away with that and > > > then some. > > > > > > > Yep, that's not pretty. > > Yeah, not to mention annoying. > > I get the "adds bounce cache pain" aspect, but not why pre-EEVDF > wouldn't be just as heavily affected, it having nothing blocking high > frequency migration (the eternal scheduler boogieman:). > > Bottom line would appear to be that these survivors should be left > where they ended up, either due to LB or more likely bog standard > prev_cpu locality, for they are part and parcel of a progression. > > > > I thought I might be able to do away with the reservation like side > > > effect of DELAY_DEQUEUE by borrowing h_nr_delayed from... > > > > > > sched/eevdf: More PELT vs DELAYED_DEQUEUE > > > > > > ...for cgroups free test config, but Q/D poke at idle_cpu() helped not > > > at all. > > We don't however have to let sched_delayed block SIS though. Rendering > them transparent in idle_cpu() did NOT wreck the progression, so > maaaybe could help your regression. > You mean something like: if (rq->nr_running > rq->h_nr_delayed) return 0; in idle_cpu() instead of the straight rq->nr_running check? I don't have the h_nr_delayed stuff yet but can look for it. I'm not sure that will help with the delayees being sticky. But I can try to try that if I'm understanding you right. I'll try to dig into it some more regardless. > > I wonder if the last_wakee stuff could be leveraged here (an idle thought, > > so to speak). Haven't looked closely enough. > > If you mean heuristics, the less of those we have, the better off we > are.. they _always_ find a way to embed their teeth in your backside. > Sure, I get that. But when you have a trade-off like this being "smarter" about when to do the dequeue might help. But yes, that could go wrong. I'm not a fan of knobs either but we could do your patch with the feat and default it off. Cheers, Phil > -Mike > --
On Tue, 2024-11-12 at 10:41 -0500, Phil Auld wrote: > On Tue, Nov 12, 2024 at 03:23:38PM +0100 Mike Galbraith wrote: > > > > > We don't however have to let sched_delayed block SIS though. Rendering > > them transparent in idle_cpu() did NOT wreck the progression, so > > maaaybe could help your regression. > > > > You mean something like: > > if (rq->nr_running > rq->h_nr_delayed) > return 0; > > in idle_cpu() instead of the straight rq->nr_running check? Yeah, close enough. -Mike
On Tue, 2024-11-12 at 17:15 +0100, Mike Galbraith wrote: > On Tue, 2024-11-12 at 10:41 -0500, Phil Auld wrote: > > On Tue, Nov 12, 2024 at 03:23:38PM +0100 Mike Galbraith wrote: > > > > > > > > We don't however have to let sched_delayed block SIS though. Rendering > > > them transparent in idle_cpu() did NOT wreck the progression, so > > > maaaybe could help your regression. > > > > > > > You mean something like: > > > > if (rq->nr_running > rq->h_nr_delayed) > > return 0; > > > > in idle_cpu() instead of the straight rq->nr_running check? > > Yeah, close enough. The below is all you need. Watching blockage rate during part of a netperf scaling run without, a bit over 2/sec was the highest it got, but with, that drops to the same zero as turning off the feature, so... relevance highly unlikely but not quite impossible? --- kernel/sched/fair.c | 4 ++++ 1 file changed, 4 insertions(+) --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -9454,11 +9454,15 @@ int can_migrate_task(struct task_struct /* * We do not migrate tasks that are: + * 0) not runnable (not useful here/now, but are annoying), or * 1) throttled_lb_pair, or * 2) cannot be migrated to this CPU due to cpus_ptr, or * 3) running (obviously), or * 4) are cache-hot on their current CPU. */ + if (p->se.sched_delayed) + return 0; + if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) return 0;
On Thu, Nov 14, 2024 at 12:07:03PM +0100 Mike Galbraith wrote: > On Tue, 2024-11-12 at 17:15 +0100, Mike Galbraith wrote: > > On Tue, 2024-11-12 at 10:41 -0500, Phil Auld wrote: > > > On Tue, Nov 12, 2024 at 03:23:38PM +0100 Mike Galbraith wrote: > > > > > > > > > > > We don't however have to let sched_delayed block SIS though. Rendering > > > > them transparent in idle_cpu() did NOT wreck the progression, so > > > > maaaybe could help your regression. > > > > > > > > > > You mean something like: > > > > > > if (rq->nr_running > rq->h_nr_delayed) > > > return 0; > > > > > > in idle_cpu() instead of the straight rq->nr_running check? > > > > Yeah, close enough. > > The below is all you need. > > Watching blockage rate during part of a netperf scaling run without, a > bit over 2/sec was the highest it got, but with, that drops to the same > zero as turning off the feature, so... relevance highly unlikely but > not quite impossible? > I'll give this a try on my issue. This'll be simpler than the other way. Thanks! Cheers, Phil > --- > kernel/sched/fair.c | 4 ++++ > 1 file changed, 4 insertions(+) > > --- a/kernel/sched/fair.c > +++ b/kernel/sched/fair.c > @@ -9454,11 +9454,15 @@ int can_migrate_task(struct task_struct > > /* > * We do not migrate tasks that are: > + * 0) not runnable (not useful here/now, but are annoying), or > * 1) throttled_lb_pair, or > * 2) cannot be migrated to this CPU due to cpus_ptr, or > * 3) running (obviously), or > * 4) are cache-hot on their current CPU. > */ > + if (p->se.sched_delayed) > + return 0; > + > if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) > return 0; > > --
On Thu, Nov 14, 2024 at 06:28:54AM -0500 Phil Auld wrote: > On Thu, Nov 14, 2024 at 12:07:03PM +0100 Mike Galbraith wrote: > > On Tue, 2024-11-12 at 17:15 +0100, Mike Galbraith wrote: > > > On Tue, 2024-11-12 at 10:41 -0500, Phil Auld wrote: > > > > On Tue, Nov 12, 2024 at 03:23:38PM +0100 Mike Galbraith wrote: > > > > > > > > > > > > > > We don't however have to let sched_delayed block SIS though. Rendering > > > > > them transparent in idle_cpu() did NOT wreck the progression, so > > > > > maaaybe could help your regression. > > > > > > > > > > > > > You mean something like: > > > > > > > > if (rq->nr_running > rq->h_nr_delayed) > > > > return 0; > > > > > > > > in idle_cpu() instead of the straight rq->nr_running check? > > > > > > Yeah, close enough. > > > > The below is all you need. > > > > Watching blockage rate during part of a netperf scaling run without, a > > bit over 2/sec was the highest it got, but with, that drops to the same > > zero as turning off the feature, so... relevance highly unlikely but > > not quite impossible? > > > > I'll give this a try on my issue. This'll be simpler than the other way. > This, below, by itself, did not do help and caused a small slowdown on some other tests. Did this need to be on top of the wakeup change? Cheers, Phil > Thanks! > > > > Cheers, > Phil > > > > --- > > kernel/sched/fair.c | 4 ++++ > > 1 file changed, 4 insertions(+) > > > > --- a/kernel/sched/fair.c > > +++ b/kernel/sched/fair.c > > @@ -9454,11 +9454,15 @@ int can_migrate_task(struct task_struct > > > > /* > > * We do not migrate tasks that are: > > + * 0) not runnable (not useful here/now, but are annoying), or > > * 1) throttled_lb_pair, or > > * 2) cannot be migrated to this CPU due to cpus_ptr, or > > * 3) running (obviously), or > > * 4) are cache-hot on their current CPU. > > */ > > + if (p->se.sched_delayed) > > + return 0; > > + > > if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) > > return 0; > > > > > > -- > > --
On Tue, 2024-11-19 at 06:30 -0500, Phil Auld wrote: > > This, below, by itself, did not do help and caused a small slowdown on some > other tests. Did this need to be on top of the wakeup change? No, that made a mess. The numbers said it was quite a reach, no surprise it fell flat. -Mike
On Tue, 2024-11-19 at 12:51 +0100, Mike Galbraith wrote: > On Tue, 2024-11-19 at 06:30 -0500, Phil Auld wrote: > > > > This, below, by itself, did not do help and caused a small slowdown on some > > other tests. Did this need to be on top of the wakeup change? > > No, that made a mess. Rashly speculating that turning mobile kthread component loose is what helped your write regression... You could try adding (p->flags & PF_KTHREAD) to the wakeup patch to only turn hard working kthreads loose to try to dodge service latency. Seems unlikely wakeup frequency * instances would combine to shred fio the way turning tbench loose did. -Mike
On Wed, Nov 20, 2024 at 07:37:39PM +0100 Mike Galbraith wrote: > On Tue, 2024-11-19 at 12:51 +0100, Mike Galbraith wrote: > > On Tue, 2024-11-19 at 06:30 -0500, Phil Auld wrote: > > > > > > This, below, by itself, did not do help and caused a small slowdown on some > > > other tests. Did this need to be on top of the wakeup change? > > > > No, that made a mess. > > Rashly speculating that turning mobile kthread component loose is what > helped your write regression... > > You could try adding (p->flags & PF_KTHREAD) to the wakeup patch to > only turn hard working kthreads loose to try to dodge service latency. > Seems unlikely wakeup frequency * instances would combine to shred fio > the way turning tbench loose did. > Thanks, I'll try that. Cheers, Phil > -Mike > --
On Thu, Nov 21, 2024 at 06:56:28AM -0500 Phil Auld wrote: > On Wed, Nov 20, 2024 at 07:37:39PM +0100 Mike Galbraith wrote: > > On Tue, 2024-11-19 at 12:51 +0100, Mike Galbraith wrote: > > > On Tue, 2024-11-19 at 06:30 -0500, Phil Auld wrote: > > > > > > > > This, below, by itself, did not do help and caused a small slowdown on some > > > > other tests. Did this need to be on top of the wakeup change? > > > > > > No, that made a mess. > > > > Rashly speculating that turning mobile kthread component loose is what > > helped your write regression... > > > > You could try adding (p->flags & PF_KTHREAD) to the wakeup patch to > > only turn hard working kthreads loose to try to dodge service latency. > > Seems unlikely wakeup frequency * instances would combine to shred fio > > the way turning tbench loose did. > > > > Thanks, I'll try that. > Also, fwiw, I think there is another report here https://lore.kernel.org/lkml/392209D9-5AC6-4FDE-8D84-FB8A82AD9AEF@oracle.com/ which seems to be the same thing but mis-bisected. I've asked them to try with NO_DELAY_DEQUEUE just to be sure. But it looks like a duck. Cheers, Phil --
On Thu, Nov 21, 2024 at 07:07:04AM -0500 Phil Auld wrote: > On Thu, Nov 21, 2024 at 06:56:28AM -0500 Phil Auld wrote: > > On Wed, Nov 20, 2024 at 07:37:39PM +0100 Mike Galbraith wrote: > > > On Tue, 2024-11-19 at 12:51 +0100, Mike Galbraith wrote: > > > > On Tue, 2024-11-19 at 06:30 -0500, Phil Auld wrote: > > > > > > > > > > This, below, by itself, did not do help and caused a small slowdown on some > > > > > other tests. Did this need to be on top of the wakeup change? > > > > > > > > No, that made a mess. > > > > > > Rashly speculating that turning mobile kthread component loose is what > > > helped your write regression... > > > > > > You could try adding (p->flags & PF_KTHREAD) to the wakeup patch to > > > only turn hard working kthreads loose to try to dodge service latency. > > > Seems unlikely wakeup frequency * instances would combine to shred fio > > > the way turning tbench loose did. > > > > > > > Thanks, I'll try that. > > > > Also, fwiw, I think there is another report here > > https://lore.kernel.org/lkml/392209D9-5AC6-4FDE-8D84-FB8A82AD9AEF@oracle.com/ > > which seems to be the same thing but mis-bisected. I've asked them to try > with NO_DELAY_DEQUEUE just to be sure. But it looks like a duck. > But it does not quack like one. Their issue did not go away with NO_DELAY_DEQUEUE so something different is causing that one. > > Cheers, > Phil > > -- > > --
On Thu, 2024-11-21 at 06:56 -0500, Phil Auld wrote:
> On Wed, Nov 20, 2024 at 07:37:39PM +0100 Mike Galbraith wrote:
> > On Tue, 2024-11-19 at 12:51 +0100, Mike Galbraith wrote:
> > > On Tue, 2024-11-19 at 06:30 -0500, Phil Auld wrote:
> > > >
> > > > This, below, by itself, did not do help and caused a small slowdown on some
> > > > other tests. Did this need to be on top of the wakeup change?
> > >
> > > No, that made a mess.
> >
> > Rashly speculating that turning mobile kthread component loose is what
> > helped your write regression...
> >
> > You could try adding (p->flags & PF_KTHREAD) to the wakeup patch to
> > only turn hard working kthreads loose to try to dodge service latency.
> > Seems unlikely wakeup frequency * instances would combine to shred fio
> > the way turning tbench loose did.
> >
>
> Thanks, I'll try that.
You may still want to try that, but my box says probably not. Playing
with your write command line, the players I see are pinned kworkers and
mobile fio instances.
Maybe try the below instead. The changelog is obsolete BS unless you
say otherwise, but while twiddled V2 will still migrate tbench a bit,
and per trace_printk() does still let all kinds of stuff wander off to
roll the SIS dice, it does not even scratch the paint of the formerly
obliterated tbench progression.
Question: did wiping off the evil leave any meaningful goodness behind?
---
sched/fair: Dequeue sched_delayed tasks when waking to a busy CPU
Phil Auld (Redhat) reported an fio benchmark regression having been found
to have been caused by addition of the DELAY_DEQUEUE feature, suggested it
may be related to wakees losing the ability to migrate, and confirmed that
restoration of same indeed did restore previous performance.
V2: do not rip buddies apart, convenient on/off switch
Fixes: 152e11f6df29 ("sched/fair: Implement delayed dequeue")
Signed-off-by: Mike Galbraith <efault@gmx.de>
---
kernel/sched/core.c | 51 ++++++++++++++++++++++++++++++------------------
kernel/sched/features.h | 5 ++++
kernel/sched/sched.h | 5 ++++
3 files changed, 42 insertions(+), 19 deletions(-)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3783,28 +3783,41 @@ ttwu_do_activate(struct rq *rq, struct t
*/
static int ttwu_runnable(struct task_struct *p, int wake_flags)
{
- struct rq_flags rf;
- struct rq *rq;
- int ret = 0;
-
- rq = __task_rq_lock(p, &rf);
- if (task_on_rq_queued(p)) {
- update_rq_clock(rq);
- if (p->se.sched_delayed)
- enqueue_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_DELAYED);
- if (!task_on_cpu(rq, p)) {
- /*
- * When on_rq && !on_cpu the task is preempted, see if
- * it should preempt the task that is current now.
- */
- wakeup_preempt(rq, p, wake_flags);
+ CLASS(__task_rq_lock, rq_guard)(p);
+ struct rq *rq = rq_guard.rq;
+
+ if (!task_on_rq_queued(p))
+ return 0;
+
+ update_rq_clock(rq);
+ if (p->se.sched_delayed) {
+ int queue_flags = ENQUEUE_DELAYED | ENQUEUE_NOCLOCK;
+ int dequeue = sched_feat(DEQUEUE_DELAYED);
+
+ /*
+ * Since sched_delayed means we cannot be current anywhere,
+ * dequeue it here and have it fall through to the
+ * select_task_rq() case further along in ttwu() path.
+ * Note: Do not rip buddies apart else chaos follows.
+ */
+ if (dequeue && rq->nr_running > 1 && p->nr_cpus_allowed > 1 &&
+ !(rq->curr->last_wakee == p || p->last_wakee == rq->curr)) {
+ dequeue_task(rq, p, DEQUEUE_SLEEP | queue_flags);
+ return 0;
}
- ttwu_do_wakeup(p);
- ret = 1;
+
+ enqueue_task(rq, p, queue_flags);
+ }
+ if (!task_on_cpu(rq, p)) {
+ /*
+ * When on_rq && !on_cpu the task is preempted, see if
+ * it should preempt the task that is current now.
+ */
+ wakeup_preempt(rq, p, wake_flags);
}
- __task_rq_unlock(rq, &rf);
+ ttwu_do_wakeup(p);
- return ret;
+ return 1;
}
#ifdef CONFIG_SMP
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -47,6 +47,11 @@ SCHED_FEAT(CACHE_HOT_BUDDY, true)
* DELAY_ZERO clips the lag on dequeue (or wakeup) to 0.
*/
SCHED_FEAT(DELAY_DEQUEUE, true)
+/*
+ * Free ONLY non-buddy delayed tasks to wakeup-migrate to avoid taking.
+ * an unnecessary latency hit. Rending buddies asunder inflicts harm.
+ */
+SCHED_FEAT(DEQUEUE_DELAYED, true)
SCHED_FEAT(DELAY_ZERO, true)
/*
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1783,6 +1783,11 @@ task_rq_unlock(struct rq *rq, struct tas
raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
}
+DEFINE_LOCK_GUARD_1(__task_rq_lock, struct task_struct,
+ _T->rq = __task_rq_lock(_T->lock, &_T->rf),
+ __task_rq_unlock(_T->rq, &_T->rf),
+ struct rq *rq; struct rq_flags rf)
+
DEFINE_LOCK_GUARD_1(task_rq_lock, struct task_struct,
_T->rq = task_rq_lock(_T->lock, &_T->rf),
task_rq_unlock(_T->rq, _T->lock, &_T->rf),
Hi Mike and Peter, On Fri, Nov 8, 2024 at 8:28 AM Mike Galbraith <efault@gmx.de> wrote: > > On Thu, 2024-11-07 at 15:09 +0100, Peter Zijlstra wrote: > > On Thu, Nov 07, 2024 at 03:02:36PM +0100, Mike Galbraith wrote: > > > On Thu, 2024-11-07 at 10:46 +0100, Mike Galbraith wrote: > > > > On Thu, 2024-11-07 at 05:03 +0100, Mike Galbraith wrote: > > > > > > > > > > I built that patch out of curiosity, and yeah, set_next_task_fair() > > > > > finding a cfs_rq->curr ends play time pretty quickly. > > > > > > > > The below improved uptime, and trace_printk() says it's doing the > > > > intended, so I suppose I'll add a feature and see what falls out. > > > > > > From netperf, I got.. number tabulation practice. Three runs of each > > > test with and without produced nothing but variance/noise. > > > > Make it go away then. > > > > If you could write a Changelog for you inspired bit and stick my cleaned > > up version under it, I'd be much obliged. > > Salut, much obliged for eyeball relief. > > ---snip--- > > Phil Auld (Redhat) reported an fio benchmark regression having been found > to have been caused by addition of the DELAY_DEQUEUE feature, suggested it > may be related to wakees losing the ability to migrate, and confirmed that > restoration of same indeed did restore previous performance. > > (de-uglified-a-lot-by) > > Reported-by: Phil Auld <pauld@redhat.com> > Fixes: 152e11f6df29 ("sched/fair: Implement delayed dequeue") > Link: https://lore.kernel.org/lkml/20241101124715.GA689589@pauld.westford.csb/ > Signed-off-by: Mike Galbraith <efault@gmx.de> > --- > kernel/sched/core.c | 48 +++++++++++++++++++++++++++++------------------- > kernel/sched/sched.h | 5 +++++ > 2 files changed, 34 insertions(+), 19 deletions(-) > > --- a/kernel/sched/core.c > +++ b/kernel/sched/core.c > @@ -3783,28 +3783,38 @@ ttwu_do_activate(struct rq *rq, struct t > */ > static int ttwu_runnable(struct task_struct *p, int wake_flags) > { > - struct rq_flags rf; > - struct rq *rq; > - int ret = 0; > - > - rq = __task_rq_lock(p, &rf); > - if (task_on_rq_queued(p)) { > - update_rq_clock(rq); > - if (p->se.sched_delayed) > - enqueue_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_DELAYED); > - if (!task_on_cpu(rq, p)) { > - /* > - * When on_rq && !on_cpu the task is preempted, see if > - * it should preempt the task that is current now. > - */ > - wakeup_preempt(rq, p, wake_flags); > + CLASS(__task_rq_lock, rq_guard)(p); > + struct rq *rq = rq_guard.rq; > + > + if (!task_on_rq_queued(p)) > + return 0; > + > + update_rq_clock(rq); > + if (p->se.sched_delayed) { > + int queue_flags = ENQUEUE_DELAYED | ENQUEUE_NOCLOCK; > + > + /* > + * Since sched_delayed means we cannot be current anywhere, > + * dequeue it here and have it fall through to the > + * select_task_rq() case further along the ttwu() path. > + */ > + if (rq->nr_running > 1 && p->nr_cpus_allowed > 1) { For sched_asym_cpucapacity system, need we consider the task_fits_cpu_capacity there? > + dequeue_task(rq, p, DEQUEUE_SLEEP | queue_flags); > + return 0; > } > - ttwu_do_wakeup(p); > - ret = 1; > + > + enqueue_task(rq, p, queue_flags); > + } > + if (!task_on_cpu(rq, p)) { > + /* > + * When on_rq && !on_cpu the task is preempted, see if > + * it should preempt the task that is current now. > + */ > + wakeup_preempt(rq, p, wake_flags); > } > - __task_rq_unlock(rq, &rf); > + ttwu_do_wakeup(p); > > - return ret; > + return 1; > } > > #ifdef CONFIG_SMP > --- a/kernel/sched/sched.h > +++ b/kernel/sched/sched.h > @@ -1779,6 +1779,11 @@ task_rq_unlock(struct rq *rq, struct tas > raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); > } > > +DEFINE_LOCK_GUARD_1(__task_rq_lock, struct task_struct, > + _T->rq = __task_rq_lock(_T->lock, &_T->rf), > + __task_rq_unlock(_T->rq, &_T->rf), > + struct rq *rq; struct rq_flags rf) > + > DEFINE_LOCK_GUARD_1(task_rq_lock, struct task_struct, > _T->rq = task_rq_lock(_T->lock, &_T->rf), > task_rq_unlock(_T->rq, _T->lock, &_T->rf), > > >
On Mon, 2024-11-11 at 10:46 +0800, Xuewen Yan wrote: > > > > + if (p->se.sched_delayed) { > > + int queue_flags = ENQUEUE_DELAYED | ENQUEUE_NOCLOCK; > > + > > + /* > > + * Since sched_delayed means we cannot be current anywhere, > > + * dequeue it here and have it fall through to the > > + * select_task_rq() case further along the ttwu() path. > > + */ > > + if (rq->nr_running > 1 && p->nr_cpus_allowed > 1) { > > For sched_asym_cpucapacity system, need we consider the > task_fits_cpu_capacity there? I don't think so. Wakeup placement logic is what we're deflecting the wakee toward, this is not the right spot to add any complexity. -Mike
On Fri, Nov 08, 2024 at 01:24:35AM +0100 Mike Galbraith wrote: > On Thu, 2024-11-07 at 15:09 +0100, Peter Zijlstra wrote: > > On Thu, Nov 07, 2024 at 03:02:36PM +0100, Mike Galbraith wrote: > > > On Thu, 2024-11-07 at 10:46 +0100, Mike Galbraith wrote: > > > > On Thu, 2024-11-07 at 05:03 +0100, Mike Galbraith wrote: > > > > > > > > > > I built that patch out of curiosity, and yeah, set_next_task_fair() > > > > > finding a cfs_rq->curr ends play time pretty quickly. > > > > > > > > The below improved uptime, and trace_printk() says it's doing the > > > > intended, so I suppose I'll add a feature and see what falls out. > > > > > > From netperf, I got.. number tabulation practice. Three runs of each > > > test with and without produced nothing but variance/noise. > > > > Make it go away then. > > > > If you could write a Changelog for you inspired bit and stick my cleaned > > up version under it, I'd be much obliged. > > Salut, much obliged for eyeball relief. > Thanks Mike (and Peter). We have our full perf tests running on Mike's original verion of this patch. Results probably Monday (there's a long queue). We'll see if this blows up anything else then. I'll queue up a build with this cleaned up version as well but the results will be late next week, probably. At that point maybe some or all of these: Suggested-by: Phil Auld <pauld@redhat.com> Reviewed-by: Phil Auld <pauld@redhat.com> Tested-by: Jirka Hladky <jhladky@redhat.com> Cheers, Phil > ---snip--- > > Phil Auld (Redhat) reported an fio benchmark regression having been found > to have been caused by addition of the DELAY_DEQUEUE feature, suggested it > may be related to wakees losing the ability to migrate, and confirmed that > restoration of same indeed did restore previous performance. > > (de-uglified-a-lot-by) > > Reported-by: Phil Auld <pauld@redhat.com> > Fixes: 152e11f6df29 ("sched/fair: Implement delayed dequeue") > Link: https://lore.kernel.org/lkml/20241101124715.GA689589@pauld.westford.csb/ > Signed-off-by: Mike Galbraith <efault@gmx.de> > --- > kernel/sched/core.c | 48 +++++++++++++++++++++++++++++------------------- > kernel/sched/sched.h | 5 +++++ > 2 files changed, 34 insertions(+), 19 deletions(-) > > --- a/kernel/sched/core.c > +++ b/kernel/sched/core.c > @@ -3783,28 +3783,38 @@ ttwu_do_activate(struct rq *rq, struct t > */ > static int ttwu_runnable(struct task_struct *p, int wake_flags) > { > - struct rq_flags rf; > - struct rq *rq; > - int ret = 0; > - > - rq = __task_rq_lock(p, &rf); > - if (task_on_rq_queued(p)) { > - update_rq_clock(rq); > - if (p->se.sched_delayed) > - enqueue_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_DELAYED); > - if (!task_on_cpu(rq, p)) { > - /* > - * When on_rq && !on_cpu the task is preempted, see if > - * it should preempt the task that is current now. > - */ > - wakeup_preempt(rq, p, wake_flags); > + CLASS(__task_rq_lock, rq_guard)(p); > + struct rq *rq = rq_guard.rq; > + > + if (!task_on_rq_queued(p)) > + return 0; > + > + update_rq_clock(rq); > + if (p->se.sched_delayed) { > + int queue_flags = ENQUEUE_DELAYED | ENQUEUE_NOCLOCK; > + > + /* > + * Since sched_delayed means we cannot be current anywhere, > + * dequeue it here and have it fall through to the > + * select_task_rq() case further along the ttwu() path. > + */ > + if (rq->nr_running > 1 && p->nr_cpus_allowed > 1) { > + dequeue_task(rq, p, DEQUEUE_SLEEP | queue_flags); > + return 0; > } > - ttwu_do_wakeup(p); > - ret = 1; > + > + enqueue_task(rq, p, queue_flags); > + } > + if (!task_on_cpu(rq, p)) { > + /* > + * When on_rq && !on_cpu the task is preempted, see if > + * it should preempt the task that is current now. > + */ > + wakeup_preempt(rq, p, wake_flags); > } > - __task_rq_unlock(rq, &rf); > + ttwu_do_wakeup(p); > > - return ret; > + return 1; > } > > #ifdef CONFIG_SMP > --- a/kernel/sched/sched.h > +++ b/kernel/sched/sched.h > @@ -1779,6 +1779,11 @@ task_rq_unlock(struct rq *rq, struct tas > raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); > } > > +DEFINE_LOCK_GUARD_1(__task_rq_lock, struct task_struct, > + _T->rq = __task_rq_lock(_T->lock, &_T->rf), > + __task_rq_unlock(_T->rq, &_T->rf), > + struct rq *rq; struct rq_flags rf) > + > DEFINE_LOCK_GUARD_1(task_rq_lock, struct task_struct, > _T->rq = task_rq_lock(_T->lock, &_T->rf), > task_rq_unlock(_T->rq, _T->lock, &_T->rf), > > --
© 2016 - 2024 Red Hat, Inc.