[PATCH v4 5/5] xen/cpupool: make per-cpupool sched-gran hypfs node writable

Juergen Gross posted 5 patches 5 years ago
[PATCH v4 5/5] xen/cpupool: make per-cpupool sched-gran hypfs node writable
Posted by Juergen Gross 5 years ago
Make /cpupool/<id>/sched-gran in hypfs writable. This will enable per
cpupool selectable scheduling granularity.

Writing this node is allowed only with no cpu assigned to the cpupool.
Allowed are values "cpu", "core" and "socket".

Signed-off-by: Juergen Gross <jgross@suse.com>
---
V2:
- test user parameters earlier (Jan Beulich)

V3:
- fix build without CONFIG_HYPFS on Arm (Andrew Cooper)
---
 docs/misc/hypfs-paths.pandoc |  5 ++-
 xen/common/sched/cpupool.c   | 70 ++++++++++++++++++++++++++++++------
 2 files changed, 63 insertions(+), 12 deletions(-)

diff --git a/docs/misc/hypfs-paths.pandoc b/docs/misc/hypfs-paths.pandoc
index f1ce24d7fe..e86f7d0dbe 100644
--- a/docs/misc/hypfs-paths.pandoc
+++ b/docs/misc/hypfs-paths.pandoc
@@ -184,10 +184,13 @@ A directory of all current cpupools.
 The individual cpupools. Each entry is a directory with the name being the
 cpupool-id (e.g. /cpupool/0/).
 
-#### /cpupool/*/sched-gran = ("cpu" | "core" | "socket")
+#### /cpupool/*/sched-gran = ("cpu" | "core" | "socket") [w]
 
 The scheduling granularity of a cpupool.
 
+Writing a value is allowed only for cpupools with no cpu assigned and if the
+architecture is supporting different scheduling granularities.
+
 #### /params/
 
 A directory of runtime parameters.
diff --git a/xen/common/sched/cpupool.c b/xen/common/sched/cpupool.c
index e2011367bd..acd26f9449 100644
--- a/xen/common/sched/cpupool.c
+++ b/xen/common/sched/cpupool.c
@@ -77,7 +77,7 @@ static void sched_gran_print(enum sched_gran mode, unsigned int gran)
 }
 
 #ifdef CONFIG_HAS_SCHED_GRANULARITY
-static int __init sched_select_granularity(const char *str)
+static int sched_gran_get(const char *str, enum sched_gran *mode)
 {
     unsigned int i;
 
@@ -85,36 +85,43 @@ static int __init sched_select_granularity(const char *str)
     {
         if ( strcmp(sg_name[i].name, str) == 0 )
         {
-            opt_sched_granularity = sg_name[i].mode;
+            *mode = sg_name[i].mode;
             return 0;
         }
     }
 
     return -EINVAL;
 }
+
+static int __init sched_select_granularity(const char *str)
+{
+    return sched_gran_get(str, &opt_sched_granularity);
+}
 custom_param("sched-gran", sched_select_granularity);
+#elif CONFIG_HYPFS
+static int sched_gran_get(const char *str, enum sched_gran *mode)
+{
+    return -EINVAL;
+}
 #endif
 
-static unsigned int __init cpupool_check_granularity(void)
+static unsigned int cpupool_check_granularity(enum sched_gran mode)
 {
     unsigned int cpu;
     unsigned int siblings, gran = 0;
 
-    if ( opt_sched_granularity == SCHED_GRAN_cpu )
+    if ( mode == SCHED_GRAN_cpu )
         return 1;
 
     for_each_online_cpu ( cpu )
     {
-        siblings = cpumask_weight(sched_get_opt_cpumask(opt_sched_granularity,
-                                                        cpu));
+        siblings = cpumask_weight(sched_get_opt_cpumask(mode, cpu));
         if ( gran == 0 )
             gran = siblings;
         else if ( gran != siblings )
             return 0;
     }
 
-    sched_disable_smt_switching = true;
-
     return gran;
 }
 
@@ -126,7 +133,7 @@ static void __init cpupool_gran_init(void)
 
     while ( gran == 0 )
     {
-        gran = cpupool_check_granularity();
+        gran = cpupool_check_granularity(opt_sched_granularity);
 
         if ( gran == 0 )
         {
@@ -152,6 +159,9 @@ static void __init cpupool_gran_init(void)
     if ( fallback )
         warning_add(fallback);
 
+    if ( opt_sched_granularity != SCHED_GRAN_cpu )
+        sched_disable_smt_switching = true;
+
     sched_granularity = gran;
     sched_gran_print(opt_sched_granularity, sched_granularity);
 }
@@ -1126,17 +1136,55 @@ static unsigned int hypfs_gran_getsize(const struct hypfs_entry *entry)
     return strlen(gran) + 1;
 }
 
+static int cpupool_gran_write(struct hypfs_entry_leaf *leaf,
+                              XEN_GUEST_HANDLE_PARAM(const_void) uaddr,
+                              unsigned int ulen)
+{
+    const struct hypfs_dyndir_id *data;
+    struct cpupool *cpupool;
+    enum sched_gran gran;
+    unsigned int sched_gran = 0;
+    char name[SCHED_GRAN_NAME_LEN];
+    int ret = 0;
+
+    if ( ulen > SCHED_GRAN_NAME_LEN )
+        return -ENOSPC;
+
+    if ( copy_from_guest(name, uaddr, ulen) )
+        return -EFAULT;
+
+    if ( memchr(name, 0, ulen) == (name + ulen - 1) )
+        sched_gran = sched_gran_get(name, &gran) ?
+                     0 : cpupool_check_granularity(gran);
+    if ( sched_gran == 0 )
+        return -EINVAL;
+
+    data = hypfs_get_dyndata();
+    cpupool = data->data;
+    ASSERT(cpupool);
+
+    if ( !cpumask_empty(cpupool->cpu_valid) )
+        ret = -EBUSY;
+    else
+    {
+        cpupool->gran = gran;
+        cpupool->sched_gran = sched_gran;
+    }
+
+    return ret;
+}
+
 static const struct hypfs_funcs cpupool_gran_funcs = {
     .enter = hypfs_node_enter,
     .exit = hypfs_node_exit,
     .read = cpupool_gran_read,
-    .write = hypfs_write_deny,
+    .write = cpupool_gran_write,
     .getsize = hypfs_gran_getsize,
     .findentry = hypfs_leaf_findentry,
 };
 
 static HYPFS_VARSIZE_INIT(cpupool_gran, XEN_HYPFS_TYPE_STRING, "sched-gran",
-                          0, &cpupool_gran_funcs);
+                          SCHED_GRAN_NAME_LEN, &cpupool_gran_funcs);
 static char granstr[SCHED_GRAN_NAME_LEN] = {
     [0 ... SCHED_GRAN_NAME_LEN - 2] = '?',
     [SCHED_GRAN_NAME_LEN - 1] = 0
-- 
2.26.2


Re: [PATCH v4 5/5] xen/cpupool: make per-cpupool sched-gran hypfs node writable
Posted by Dario Faggioli 5 years ago
On Mon, 2021-01-18 at 12:55 +0100, Juergen Gross wrote:
> Make /cpupool/<id>/sched-gran in hypfs writable. This will enable per
> cpupool selectable scheduling granularity.
> 
> Writing this node is allowed only with no cpu assigned to the
> cpupool.
> Allowed are values "cpu", "core" and "socket".
> 
> Signed-off-by: Juergen Gross <jgross@suse.com>
>
Reviewed-by: Dario Faggioli <dfaggioli@suse.com>

This holds with Jan's proposed adjustments.

Regards
-- 
Dario Faggioli, Ph.D
http://about.me/dario.faggioli
Virtualization Software Engineer
SUSE Labs, SUSE https://www.suse.com/
-------------------------------------------------------------------
<<This happens because _I_ choose it to happen!>> (Raistlin Majere)
Re: [PATCH v4 5/5] xen/cpupool: make per-cpupool sched-gran hypfs node writable
Posted by Jan Beulich 5 years ago
On 18.01.2021 12:55, Juergen Gross wrote:
> Make /cpupool/<id>/sched-gran in hypfs writable. This will enable per
> cpupool selectable scheduling granularity.
> 
> Writing this node is allowed only with no cpu assigned to the cpupool.
> Allowed are values "cpu", "core" and "socket".
> 
> Signed-off-by: Juergen Gross <jgross@suse.com>

Reviewed-by: Jan Beulich <jbeulich@suse.com>
with two small adjustment requests:

> @@ -85,36 +85,43 @@ static int __init sched_select_granularity(const char *str)
>      {
>          if ( strcmp(sg_name[i].name, str) == 0 )
>          {
> -            opt_sched_granularity = sg_name[i].mode;
> +            *mode = sg_name[i].mode;
>              return 0;
>          }
>      }
>  
>      return -EINVAL;
>  }
> +
> +static int __init sched_select_granularity(const char *str)
> +{
> +    return sched_gran_get(str, &opt_sched_granularity);
> +}
>  custom_param("sched-gran", sched_select_granularity);
> +#elif CONFIG_HYPFS

Missing defined().

> @@ -1126,17 +1136,55 @@ static unsigned int hypfs_gran_getsize(const struct hypfs_entry *entry)
>      return strlen(gran) + 1;
>  }
>  
> +static int cpupool_gran_write(struct hypfs_entry_leaf *leaf,
> +                              XEN_GUEST_HANDLE_PARAM(const_void) uaddr,
> +                              unsigned int ulen)
> +{
> +    const struct hypfs_dyndir_id *data;
> +    struct cpupool *cpupool;
> +    enum sched_gran gran;
> +    unsigned int sched_gran = 0;
> +    char name[SCHED_GRAN_NAME_LEN];
> +    int ret = 0;
> +
> +    if ( ulen > SCHED_GRAN_NAME_LEN )
> +        return -ENOSPC;
> +
> +    if ( copy_from_guest(name, uaddr, ulen) )
> +        return -EFAULT;
> +
> +    if ( memchr(name, 0, ulen) == (name + ulen - 1) )
> +        sched_gran = sched_gran_get(name, &gran) ?
> +                     0 : cpupool_check_granularity(gran);
> +    if ( sched_gran == 0 )
> +        return -EINVAL;
> +
> +    data = hypfs_get_dyndata();
> +    cpupool = data->data;
> +    ASSERT(cpupool);
> +
> +    if ( !cpumask_empty(cpupool->cpu_valid) )
> +        ret = -EBUSY;
> +    else
> +    {
> +        cpupool->gran = gran;
> +        cpupool->sched_gran = sched_gran;
> +    }

I think this could do with a comment clarifying what lock this
is protected by, as the function itself has no sign of any
locking, not even an assertion that a certain lock is being held.
If you were to suggest some text, this as well as the minor issue
above could likely be taken care of while committing.

Jan

Re: [PATCH v4 5/5] xen/cpupool: make per-cpupool sched-gran hypfs node writable
Posted by Jürgen Groß 5 years ago
On 21.01.21 16:55, Jan Beulich wrote:
> On 18.01.2021 12:55, Juergen Gross wrote:
>> Make /cpupool/<id>/sched-gran in hypfs writable. This will enable per
>> cpupool selectable scheduling granularity.
>>
>> Writing this node is allowed only with no cpu assigned to the cpupool.
>> Allowed are values "cpu", "core" and "socket".
>>
>> Signed-off-by: Juergen Gross <jgross@suse.com>
> 
> Reviewed-by: Jan Beulich <jbeulich@suse.com>
> with two small adjustment requests:
> 
>> @@ -85,36 +85,43 @@ static int __init sched_select_granularity(const char *str)
>>       {
>>           if ( strcmp(sg_name[i].name, str) == 0 )
>>           {
>> -            opt_sched_granularity = sg_name[i].mode;
>> +            *mode = sg_name[i].mode;
>>               return 0;
>>           }
>>       }
>>   
>>       return -EINVAL;
>>   }
>> +
>> +static int __init sched_select_granularity(const char *str)
>> +{
>> +    return sched_gran_get(str, &opt_sched_granularity);
>> +}
>>   custom_param("sched-gran", sched_select_granularity);
>> +#elif CONFIG_HYPFS
> 
> Missing defined().
> 
>> @@ -1126,17 +1136,55 @@ static unsigned int hypfs_gran_getsize(const struct hypfs_entry *entry)
>>       return strlen(gran) + 1;
>>   }
>>   
>> +static int cpupool_gran_write(struct hypfs_entry_leaf *leaf,
>> +                              XEN_GUEST_HANDLE_PARAM(const_void) uaddr,
>> +                              unsigned int ulen)
>> +{
>> +    const struct hypfs_dyndir_id *data;
>> +    struct cpupool *cpupool;
>> +    enum sched_gran gran;
>> +    unsigned int sched_gran = 0;
>> +    char name[SCHED_GRAN_NAME_LEN];
>> +    int ret = 0;
>> +
>> +    if ( ulen > SCHED_GRAN_NAME_LEN )
>> +        return -ENOSPC;
>> +
>> +    if ( copy_from_guest(name, uaddr, ulen) )
>> +        return -EFAULT;
>> +
>> +    if ( memchr(name, 0, ulen) == (name + ulen - 1) )
>> +        sched_gran = sched_gran_get(name, &gran) ?
>> +                     0 : cpupool_check_granularity(gran);
>> +    if ( sched_gran == 0 )
>> +        return -EINVAL;
>> +
>> +    data = hypfs_get_dyndata();
>> +    cpupool = data->data;
>> +    ASSERT(cpupool);
>> +
>> +    if ( !cpumask_empty(cpupool->cpu_valid) )
>> +        ret = -EBUSY;
>> +    else
>> +    {
>> +        cpupool->gran = gran;
>> +        cpupool->sched_gran = sched_gran;
>> +    }
> 
> I think this could do with a comment clarifying what lock this
> is protected by, as the function itself has no sign of any
> locking, not even an assertion that a certain lock is being held.
> If you were to suggest some text, this as well as the minor issue
> above could likely be taken care of while committing.

cpupool_gran_[read|write]() are both guarded by the cpupool_lock
taken via cpupool_dir_enter().


Juergen