xen/common/sched/core.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-)
There are still several instances of cpumask_t on the stack in
scheduling code. Avoid them as far as possible.
Signed-off-by: Juergen Gross <jgross@suse.com>
---
xen/common/sched/core.c | 14 +++++---------
1 file changed, 5 insertions(+), 9 deletions(-)
diff --git a/xen/common/sched/core.c b/xen/common/sched/core.c
index 944164d78a..73799c2508 100644
--- a/xen/common/sched/core.c
+++ b/xen/common/sched/core.c
@@ -1178,7 +1178,6 @@ int cpu_disable_scheduler(unsigned int cpu)
{
struct domain *d;
const struct cpupool *c;
- cpumask_t online_affinity;
int ret = 0;
rcu_read_lock(&sched_res_rculock);
@@ -1196,8 +1195,7 @@ int cpu_disable_scheduler(unsigned int cpu)
unsigned long flags;
spinlock_t *lock = unit_schedule_lock_irqsave(unit, &flags);
- cpumask_and(&online_affinity, unit->cpu_hard_affinity, c->cpu_valid);
- if ( cpumask_empty(&online_affinity) &&
+ if ( !cpumask_intersects(unit->cpu_hard_affinity, c->cpu_valid) &&
cpumask_test_cpu(cpu, unit->cpu_hard_affinity) )
{
if ( sched_check_affinity_broken(unit) )
@@ -1336,12 +1334,10 @@ static int vcpu_set_affinity(
int vcpu_set_hard_affinity(struct vcpu *v, const cpumask_t *affinity)
{
- cpumask_t online_affinity;
cpumask_t *online;
online = VCPU2ONLINE(v);
- cpumask_and(&online_affinity, affinity, online);
- if ( cpumask_empty(&online_affinity) )
+ if ( !cpumask_intersects(online, affinity) )
return -EINVAL;
return vcpu_set_affinity(v, affinity, v->sched_unit->cpu_hard_affinity);
@@ -2586,11 +2582,11 @@ static void schedule(void)
if ( gran > 1 )
{
- cpumask_t mask;
+ cpumask_t *mask = cpumask_scratch_cpu(cpu);
prev->rendezvous_in_cnt = gran;
- cpumask_andnot(&mask, sr->cpus, cpumask_of(cpu));
- cpumask_raise_softirq(&mask, SCHED_SLAVE_SOFTIRQ);
+ cpumask_andnot(mask, sr->cpus, cpumask_of(cpu));
+ cpumask_raise_softirq(mask, SCHED_SLAVE_SOFTIRQ);
next = sched_wait_rendezvous_in(prev, &lock, cpu, now);
if ( !next )
return;
--
2.16.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
On Thu, 2020-01-23 at 10:03 +0100, Juergen Gross wrote: > There are still several instances of cpumask_t on the stack in > scheduling code. Avoid them as far as possible. > > Signed-off-by: Juergen Gross <jgross@suse.com> > Reviewed-by: Dario Faggioli <dfaggioli@suse.com> Just curious... > --- a/xen/common/sched/core.c > +++ b/xen/common/sched/core.c > @@ -2586,11 +2582,11 @@ static void schedule(void) > > if ( gran > 1 ) > { > - cpumask_t mask; > + cpumask_t *mask = cpumask_scratch_cpu(cpu); > > prev->rendezvous_in_cnt = gran; > - cpumask_andnot(&mask, sr->cpus, cpumask_of(cpu)); > - cpumask_raise_softirq(&mask, SCHED_SLAVE_SOFTIRQ); > + cpumask_andnot(mask, sr->cpus, cpumask_of(cpu)); > + cpumask_raise_softirq(mask, SCHED_SLAVE_SOFTIRQ); > ... why are we keeping the temporary variable (mask) ? Thanks and Regards -- Dario Faggioli, Ph.D http://about.me/dario.faggioli Virtualization Software Engineer SUSE Labs, SUSE https://www.suse.com/ ------------------------------------------------------------------- <<This happens because _I_ choose it to happen!>> (Raistlin Majere) _______________________________________________ Xen-devel mailing list Xen-devel@lists.xenproject.org https://lists.xenproject.org/mailman/listinfo/xen-devel
On 24.01.20 01:01, Dario Faggioli wrote: > On Thu, 2020-01-23 at 10:03 +0100, Juergen Gross wrote: >> There are still several instances of cpumask_t on the stack in >> scheduling code. Avoid them as far as possible. >> >> Signed-off-by: Juergen Gross <jgross@suse.com> >> > Reviewed-by: Dario Faggioli <dfaggioli@suse.com> > > Just curious... > >> --- a/xen/common/sched/core.c >> +++ b/xen/common/sched/core.c >> @@ -2586,11 +2582,11 @@ static void schedule(void) >> >> if ( gran > 1 ) >> { >> - cpumask_t mask; >> + cpumask_t *mask = cpumask_scratch_cpu(cpu); >> >> prev->rendezvous_in_cnt = gran; >> - cpumask_andnot(&mask, sr->cpus, cpumask_of(cpu)); >> - cpumask_raise_softirq(&mask, SCHED_SLAVE_SOFTIRQ); >> + cpumask_andnot(mask, sr->cpus, cpumask_of(cpu)); >> + cpumask_raise_softirq(mask, SCHED_SLAVE_SOFTIRQ); >> > ... why are we keeping the temporary variable (mask) ? per_cpu accesses are more expensive than those to local variables. mask is used twice. Juergen _______________________________________________ Xen-devel mailing list Xen-devel@lists.xenproject.org https://lists.xenproject.org/mailman/listinfo/xen-devel
© 2016 - 2024 Red Hat, Inc.