xen/common/sched/cpupool.c | 51 +++++++++++++++++++++++++++++++------- 1 file changed, 42 insertions(+), 9 deletions(-)
Currently it might be not obvious which scheduling mode (e.g. core-
scheduling) is being used by the scheduler. Alleviate this by printing
additional information about the selected granularity per-cpupool.
Note: per-cpupool granularity selection is not implemented yet. Every
cpupool gets its granularity from the single global value.
Take this opportunity to introduce struct sched_gran_name array and
refactor sched_select_granularity().
Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>
---
v4:
- use char[8]
v3:
- use const char*
- use sched_gran_name array instead of switch
- updated commit message
v2:
- print information on a separate line
- use per-cpupool granularity
- updated commit message
CC: Juergen Gross <jgross@suse.com>
CC: Dario Faggioli <dfaggioli@suse.com>
CC: George Dunlap <george.dunlap@citrix.com>
CC: Jan Beulich <jbeulich@suse.com>
---
xen/common/sched/cpupool.c | 51 +++++++++++++++++++++++++++++++-------
1 file changed, 42 insertions(+), 9 deletions(-)
diff --git a/xen/common/sched/cpupool.c b/xen/common/sched/cpupool.c
index d40345b585..97c2d5b3c1 100644
--- a/xen/common/sched/cpupool.c
+++ b/xen/common/sched/cpupool.c
@@ -40,19 +40,50 @@ static DEFINE_SPINLOCK(cpupool_lock);
static enum sched_gran __read_mostly opt_sched_granularity = SCHED_GRAN_cpu;
static unsigned int __read_mostly sched_granularity = 1;
+struct sched_gran_name {
+ enum sched_gran mode;
+ char name[8];
+};
+
+static const struct sched_gran_name sg_name[] = {
+ {SCHED_GRAN_cpu, "cpu"},
+ {SCHED_GRAN_core, "core"},
+ {SCHED_GRAN_socket, "socket"},
+};
+
+static void sched_gran_print(enum sched_gran mode, unsigned int gran)
+{
+ const char *name = "";
+ unsigned int i;
+
+ for ( i = 0; i < ARRAY_SIZE(sg_name); i++ )
+ {
+ if ( mode == sg_name[i].mode )
+ {
+ name = sg_name[i].name;
+ break;
+ }
+ }
+
+ printk("Scheduling granularity: %s, %u CPU%s per sched-resource\n",
+ name, gran, gran == 1 ? "" : "s");
+}
+
#ifdef CONFIG_HAS_SCHED_GRANULARITY
static int __init sched_select_granularity(const char *str)
{
- if ( strcmp("cpu", str) == 0 )
- opt_sched_granularity = SCHED_GRAN_cpu;
- else if ( strcmp("core", str) == 0 )
- opt_sched_granularity = SCHED_GRAN_core;
- else if ( strcmp("socket", str) == 0 )
- opt_sched_granularity = SCHED_GRAN_socket;
- else
- return -EINVAL;
+ unsigned int i;
- return 0;
+ for ( i = 0; i < ARRAY_SIZE(sg_name); i++ )
+ {
+ if ( strcmp(sg_name[i].name, str) == 0 )
+ {
+ opt_sched_granularity = sg_name[i].mode;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
}
custom_param("sched-gran", sched_select_granularity);
#endif
@@ -115,6 +146,7 @@ static void __init cpupool_gran_init(void)
warning_add(fallback);
sched_granularity = gran;
+ sched_gran_print(opt_sched_granularity, sched_granularity);
}
unsigned int cpupool_get_granularity(const struct cpupool *c)
@@ -911,6 +943,7 @@ void dump_runq(unsigned char key)
{
printk("Cpupool %d:\n", (*c)->cpupool_id);
printk("Cpus: %*pbl\n", CPUMASK_PR((*c)->cpu_valid));
+ sched_gran_print((*c)->gran, cpupool_get_granularity(*c));
schedule_dump(*c);
}
--
2.17.1
On Wed, 2020-05-06 at 11:00 +0100, Sergey Dyasli wrote: > Currently it might be not obvious which scheduling mode (e.g. core- > scheduling) is being used by the scheduler. Alleviate this by > printing > additional information about the selected granularity per-cpupool. > > Note: per-cpupool granularity selection is not implemented yet. Every > cpupool gets its granularity from the single global value. > > Take this opportunity to introduce struct sched_gran_name array and > refactor sched_select_granularity(). > > Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com> > Acked-by: Dario Faggioli <dfaggioli@suse.com> Thanks and Regards -- Dario Faggioli, Ph.D http://about.me/dario.faggioli Virtualization Software Engineer SUSE Labs, SUSE https://www.suse.com/ ------------------------------------------------------------------- <<This happens because _I_ choose it to happen!>> (Raistlin Majere)
© 2016 - 2024 Red Hat, Inc.