[libvirt] [PATCH v2] vcpupin: add clear feature

Yi Wang posted 1 patch 6 years, 1 month ago
Patches applied successfully (tree, apply log)
git fetch https://github.com/patchew-project/libvirt tags/patchew/1518425661-46982-1-git-send-email-wang.yi59@zte.com.cn
include/libvirt/libvirt-domain.h | 11 +++++++++++
src/qemu/qemu_driver.c           | 32 ++++++++++++++++++++++++--------
tools/virsh-domain.c             |  5 ++++-
tools/virsh.pod                  |  1 +
4 files changed, 40 insertions(+), 9 deletions(-)
[libvirt] [PATCH v2] vcpupin: add clear feature
Posted by Yi Wang 6 years, 1 month ago
We can't clear vcpupin settings of XML once we did vcpupin
command, this is not convenient under some condition such
as migration to a host with less CPUs.

This patch introduces clear feature, which can clear vcpuin
setting of XML using a 'c' option.

Signed-off-by: Yi Wang <wang.yi59@zte.com.cn>
Signed-off-by: Xi Xu <xu.xi8@zte.com.cn>
---
 include/libvirt/libvirt-domain.h | 11 +++++++++++
 src/qemu/qemu_driver.c           | 32 ++++++++++++++++++++++++--------
 tools/virsh-domain.c             |  5 ++++-
 tools/virsh.pod                  |  1 +
 4 files changed, 40 insertions(+), 9 deletions(-)

diff --git a/include/libvirt/libvirt-domain.h b/include/libvirt/libvirt-domain.h
index 4048acf..46f4e77 100644
--- a/include/libvirt/libvirt-domain.h
+++ b/include/libvirt/libvirt-domain.h
@@ -1847,6 +1847,17 @@ int                     virDomainSetVcpusFlags  (virDomainPtr domain,
 int                     virDomainGetVcpusFlags  (virDomainPtr domain,
                                                  unsigned int flags);
 
+/* Flags for controlling virtual CPU pinning.  */
+typedef enum {
+    /* See virDomainModificationImpact for these flags.  */
+    VIR_DOMAIN_VCPU_PIN_CURRENT = VIR_DOMAIN_AFFECT_CURRENT,
+    VIR_DOMAIN_VCPU_PIN_LIVE    = VIR_DOMAIN_AFFECT_LIVE,
+    VIR_DOMAIN_VCPU_PIN_CONFIG  = VIR_DOMAIN_AFFECT_CONFIG,
+
+    /* Additionally, these flags may be bitwise-OR'd in.  */
+    VIR_DOMAIN_VCPU_PIN_CLEAR = (1 << 2), /* Clear vcpus pin info */
+} virDomainVcpuPinFlags;
+
 int                     virDomainPinVcpu        (virDomainPtr domain,
                                                  unsigned int vcpu,
                                                  unsigned char *cpumap,
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index bbce5bd..fe1f62f 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -5056,7 +5056,8 @@ qemuDomainPinVcpuLive(virDomainObjPtr vm,
                       int vcpu,
                       virQEMUDriverPtr driver,
                       virQEMUDriverConfigPtr cfg,
-                      virBitmapPtr cpumap)
+                      virBitmapPtr cpumap,
+                      bool clear)
 {
     virBitmapPtr tmpmap = NULL;
     virDomainVcpuDefPtr vcpuinfo;
@@ -5069,6 +5070,7 @@ qemuDomainPinVcpuLive(virDomainObjPtr vm,
     int eventNparams = 0;
     int eventMaxparams = 0;
     int ret = -1;
+    virBitmapPtr targetMap = NULL;
 
     if (!qemuDomainHasVcpuPids(vm)) {
         virReportError(VIR_ERR_OPERATION_INVALID,
@@ -5083,10 +5085,15 @@ qemuDomainPinVcpuLive(virDomainObjPtr vm,
         goto cleanup;
     }
 
-    if (!(tmpmap = virBitmapNewCopy(cpumap)))
-        goto cleanup;
+    if (clear) {
+        targetMap = virHostCPUGetOnlineBitmap();
+    } else {
+        targetMap = cpumap;
+        if (!(tmpmap = virBitmapNewCopy(cpumap)))
+            goto cleanup;
+    }
 
-    if (!(str = virBitmapFormat(cpumap)))
+    if (!(str = virBitmapFormat(targetMap)))
         goto cleanup;
 
     if (vcpuinfo->online) {
@@ -5095,11 +5102,11 @@ qemuDomainPinVcpuLive(virDomainObjPtr vm,
             if (virCgroupNewThread(priv->cgroup, VIR_CGROUP_THREAD_VCPU, vcpu,
                                    false, &cgroup_vcpu) < 0)
                 goto cleanup;
-            if (qemuSetupCgroupCpusetCpus(cgroup_vcpu, cpumap) < 0)
+            if (qemuSetupCgroupCpusetCpus(cgroup_vcpu, targetMap) < 0)
                 goto cleanup;
         }
 
-        if (virProcessSetAffinity(qemuDomainGetVcpuPid(vm, vcpu), cpumap) < 0)
+        if (virProcessSetAffinity(qemuDomainGetVcpuPid(vm, vcpu), targetMap) < 0)
             goto cleanup;
     }
 
@@ -5128,6 +5135,8 @@ qemuDomainPinVcpuLive(virDomainObjPtr vm,
     virCgroupFree(&cgroup_vcpu);
     VIR_FREE(str);
     qemuDomainEventQueue(driver, event);
+    if (clear)
+        virBitmapFree(targetMap);
     return ret;
 }
 
@@ -5148,9 +5157,11 @@ qemuDomainPinVcpuFlags(virDomainPtr dom,
     virBitmapPtr pcpumap = NULL;
     virDomainVcpuDefPtr vcpuinfo = NULL;
     virQEMUDriverConfigPtr cfg = NULL;
+    bool clear = false;
 
     virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
-                  VIR_DOMAIN_AFFECT_CONFIG, -1);
+                  VIR_DOMAIN_AFFECT_CONFIG |
+                  VIR_DOMAIN_VCPU_PIN_CLEAR, -1);
 
     cfg = virQEMUDriverGetConfig(driver);
 
@@ -5166,6 +5177,8 @@ qemuDomainPinVcpuFlags(virDomainPtr dom,
     if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
         goto endjob;
 
+    clear = !!(flags & VIR_DOMAIN_VCPU_PIN_CLEAR);
+
     if ((def && def->virtType == VIR_DOMAIN_VIRT_QEMU) ||
         (persistentDef && persistentDef->virtType == VIR_DOMAIN_VIRT_QEMU)) {
         virReportError(VIR_ERR_OPERATION_FAILED, "%s",
@@ -5191,7 +5204,7 @@ qemuDomainPinVcpuFlags(virDomainPtr dom,
     }
 
     if (def &&
-        qemuDomainPinVcpuLive(vm, def, vcpu, driver, cfg, pcpumap) < 0)
+        qemuDomainPinVcpuLive(vm, def, vcpu, driver, cfg, pcpumap, clear) < 0)
         goto endjob;
 
     if (persistentDef) {
@@ -5199,6 +5212,9 @@ qemuDomainPinVcpuFlags(virDomainPtr dom,
         vcpuinfo->cpumask = pcpumap;
         pcpumap = NULL;
 
+        if (clear)
+            virBitmapFree(vcpuinfo->cpumask);
+
         ret = virDomainSaveConfig(cfg->configDir, driver->caps, persistentDef);
         goto endjob;
     }
diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c
index 5a0e0c1..4981ecc 100644
--- a/tools/virsh-domain.c
+++ b/tools/virsh-domain.c
@@ -6863,7 +6863,7 @@ virshParseCPUList(vshControl *ctl, int *cpumaplen,
     unsigned char *cpumap = NULL;
     virBitmapPtr map = NULL;
 
-    if (cpulist[0] == 'r') {
+    if (cpulist[0] == 'r' || cpulist[0] == 'c') {
         if (!(map = virBitmapNew(maxcpu)))
             return NULL;
         virBitmapSetAll(map);
@@ -6941,6 +6941,9 @@ cmdVcpuPin(vshControl *ctl, const vshCmd *cmd)
         goto cleanup;
     }
 
+    if (STREQ(cpulist, "c"))
+        flags |= VIR_DOMAIN_VCPU_PIN_CLEAR;
+
     /* Pin mode: pinning specified vcpu to specified physical cpus*/
     if (!(cpumap = virshParseCPUList(ctl, &cpumaplen, cpulist, maxcpu)))
         goto cleanup;
diff --git a/tools/virsh.pod b/tools/virsh.pod
index 69cc423..d5a1779 100644
--- a/tools/virsh.pod
+++ b/tools/virsh.pod
@@ -2857,6 +2857,7 @@ I<cpulist> is a list of physical CPU numbers. Its syntax is a comma
 separated list and a special markup using '-' and '^' (ex. '0-4', '0-3,^2') can
 also be allowed. The '-' denotes the range and the '^' denotes exclusive.
 For pinning the I<vcpu> to all physical cpus specify 'r' as a I<cpulist>.
+For clearing pinning info, specify 'c' as a I<cpulist>.
 If I<--live> is specified, affect a running guest.
 If I<--config> is specified, affect the next boot of a persistent guest.
 If I<--current> is specified, affect the current guest state.
-- 
1.8.3.1

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [PATCH v2] vcpupin: add clear feature
Posted by Daniel P. Berrangé 6 years, 1 month ago
On Mon, Feb 12, 2018 at 03:54:21AM -0500, Yi Wang wrote:
> We can't clear vcpupin settings of XML once we did vcpupin
> command, this is not convenient under some condition such
> as migration to a host with less CPUs.
> 
> This patch introduces clear feature, which can clear vcpuin
> setting of XML using a 'c' option.
> 
> Signed-off-by: Yi Wang <wang.yi59@zte.com.cn>
> Signed-off-by: Xi Xu <xu.xi8@zte.com.cn>
> ---
>  include/libvirt/libvirt-domain.h | 11 +++++++++++
>  src/qemu/qemu_driver.c           | 32 ++++++++++++++++++++++++--------

I'm not seeing a good reason for these change. There's no concept of clearing
CPU pinning - the default is simply "pinned" to all possible CPUs, and the
callers should already be able to request all possile CPUs with the current
API design.

>  tools/virsh-domain.c             |  5 ++++-
>  tools/virsh.pod                  |  1 +
>  4 files changed, 40 insertions(+), 9 deletions(-)
> 
> diff --git a/include/libvirt/libvirt-domain.h b/include/libvirt/libvirt-domain.h
> index 4048acf..46f4e77 100644
> --- a/include/libvirt/libvirt-domain.h
> +++ b/include/libvirt/libvirt-domain.h
> @@ -1847,6 +1847,17 @@ int                     virDomainSetVcpusFlags  (virDomainPtr domain,
>  int                     virDomainGetVcpusFlags  (virDomainPtr domain,
>                                                   unsigned int flags);
>  
> +/* Flags for controlling virtual CPU pinning.  */
> +typedef enum {
> +    /* See virDomainModificationImpact for these flags.  */
> +    VIR_DOMAIN_VCPU_PIN_CURRENT = VIR_DOMAIN_AFFECT_CURRENT,
> +    VIR_DOMAIN_VCPU_PIN_LIVE    = VIR_DOMAIN_AFFECT_LIVE,
> +    VIR_DOMAIN_VCPU_PIN_CONFIG  = VIR_DOMAIN_AFFECT_CONFIG,
> +
> +    /* Additionally, these flags may be bitwise-OR'd in.  */
> +    VIR_DOMAIN_VCPU_PIN_CLEAR = (1 << 2), /* Clear vcpus pin info */
> +} virDomainVcpuPinFlags;
> +
>  int                     virDomainPinVcpu        (virDomainPtr domain,
>                                                   unsigned int vcpu,
>                                                   unsigned char *cpumap,
> diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
> index bbce5bd..fe1f62f 100644
> --- a/src/qemu/qemu_driver.c
> +++ b/src/qemu/qemu_driver.c
> @@ -5056,7 +5056,8 @@ qemuDomainPinVcpuLive(virDomainObjPtr vm,
>                        int vcpu,
>                        virQEMUDriverPtr driver,
>                        virQEMUDriverConfigPtr cfg,
> -                      virBitmapPtr cpumap)
> +                      virBitmapPtr cpumap,
> +                      bool clear)
>  {
>      virBitmapPtr tmpmap = NULL;
>      virDomainVcpuDefPtr vcpuinfo;
> @@ -5069,6 +5070,7 @@ qemuDomainPinVcpuLive(virDomainObjPtr vm,
>      int eventNparams = 0;
>      int eventMaxparams = 0;
>      int ret = -1;
> +    virBitmapPtr targetMap = NULL;
>  
>      if (!qemuDomainHasVcpuPids(vm)) {
>          virReportError(VIR_ERR_OPERATION_INVALID,
> @@ -5083,10 +5085,15 @@ qemuDomainPinVcpuLive(virDomainObjPtr vm,
>          goto cleanup;
>      }
>  
> -    if (!(tmpmap = virBitmapNewCopy(cpumap)))
> -        goto cleanup;
> +    if (clear) {
> +        targetMap = virHostCPUGetOnlineBitmap();
> +    } else {
> +        targetMap = cpumap;
> +        if (!(tmpmap = virBitmapNewCopy(cpumap)))
> +            goto cleanup;
> +    }
>  
> -    if (!(str = virBitmapFormat(cpumap)))
> +    if (!(str = virBitmapFormat(targetMap)))
>          goto cleanup;
>  
>      if (vcpuinfo->online) {
> @@ -5095,11 +5102,11 @@ qemuDomainPinVcpuLive(virDomainObjPtr vm,
>              if (virCgroupNewThread(priv->cgroup, VIR_CGROUP_THREAD_VCPU, vcpu,
>                                     false, &cgroup_vcpu) < 0)
>                  goto cleanup;
> -            if (qemuSetupCgroupCpusetCpus(cgroup_vcpu, cpumap) < 0)
> +            if (qemuSetupCgroupCpusetCpus(cgroup_vcpu, targetMap) < 0)
>                  goto cleanup;
>          }
>  
> -        if (virProcessSetAffinity(qemuDomainGetVcpuPid(vm, vcpu), cpumap) < 0)
> +        if (virProcessSetAffinity(qemuDomainGetVcpuPid(vm, vcpu), targetMap) < 0)
>              goto cleanup;
>      }
>  
> @@ -5128,6 +5135,8 @@ qemuDomainPinVcpuLive(virDomainObjPtr vm,
>      virCgroupFree(&cgroup_vcpu);
>      VIR_FREE(str);
>      qemuDomainEventQueue(driver, event);
> +    if (clear)
> +        virBitmapFree(targetMap);
>      return ret;
>  }
>  
> @@ -5148,9 +5157,11 @@ qemuDomainPinVcpuFlags(virDomainPtr dom,
>      virBitmapPtr pcpumap = NULL;
>      virDomainVcpuDefPtr vcpuinfo = NULL;
>      virQEMUDriverConfigPtr cfg = NULL;
> +    bool clear = false;
>  
>      virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
> -                  VIR_DOMAIN_AFFECT_CONFIG, -1);
> +                  VIR_DOMAIN_AFFECT_CONFIG |
> +                  VIR_DOMAIN_VCPU_PIN_CLEAR, -1);
>  
>      cfg = virQEMUDriverGetConfig(driver);
>  
> @@ -5166,6 +5177,8 @@ qemuDomainPinVcpuFlags(virDomainPtr dom,
>      if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
>          goto endjob;
>  
> +    clear = !!(flags & VIR_DOMAIN_VCPU_PIN_CLEAR);
> +
>      if ((def && def->virtType == VIR_DOMAIN_VIRT_QEMU) ||
>          (persistentDef && persistentDef->virtType == VIR_DOMAIN_VIRT_QEMU)) {
>          virReportError(VIR_ERR_OPERATION_FAILED, "%s",
> @@ -5191,7 +5204,7 @@ qemuDomainPinVcpuFlags(virDomainPtr dom,
>      }
>  
>      if (def &&
> -        qemuDomainPinVcpuLive(vm, def, vcpu, driver, cfg, pcpumap) < 0)
> +        qemuDomainPinVcpuLive(vm, def, vcpu, driver, cfg, pcpumap, clear) < 0)
>          goto endjob;
>  
>      if (persistentDef) {
> @@ -5199,6 +5212,9 @@ qemuDomainPinVcpuFlags(virDomainPtr dom,
>          vcpuinfo->cpumask = pcpumap;
>          pcpumap = NULL;
>  
> +        if (clear)
> +            virBitmapFree(vcpuinfo->cpumask);
> +
>          ret = virDomainSaveConfig(cfg->configDir, driver->caps, persistentDef);
>          goto endjob;
>      }
> diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c
> index 5a0e0c1..4981ecc 100644
> --- a/tools/virsh-domain.c
> +++ b/tools/virsh-domain.c
> @@ -6863,7 +6863,7 @@ virshParseCPUList(vshControl *ctl, int *cpumaplen,
>      unsigned char *cpumap = NULL;
>      virBitmapPtr map = NULL;
>  
> -    if (cpulist[0] == 'r') {
> +    if (cpulist[0] == 'r' || cpulist[0] == 'c') {
>          if (!(map = virBitmapNew(maxcpu)))
>              return NULL;
>          virBitmapSetAll(map);
> @@ -6941,6 +6941,9 @@ cmdVcpuPin(vshControl *ctl, const vshCmd *cmd)
>          goto cleanup;
>      }
>  
> +    if (STREQ(cpulist, "c"))
> +        flags |= VIR_DOMAIN_VCPU_PIN_CLEAR;
> +
>      /* Pin mode: pinning specified vcpu to specified physical cpus*/
>      if (!(cpumap = virshParseCPUList(ctl, &cpumaplen, cpulist, maxcpu)))
>          goto cleanup;
> diff --git a/tools/virsh.pod b/tools/virsh.pod
> index 69cc423..d5a1779 100644
> --- a/tools/virsh.pod
> +++ b/tools/virsh.pod
> @@ -2857,6 +2857,7 @@ I<cpulist> is a list of physical CPU numbers. Its syntax is a comma
>  separated list and a special markup using '-' and '^' (ex. '0-4', '0-3,^2') can
>  also be allowed. The '-' denotes the range and the '^' denotes exclusive.
>  For pinning the I<vcpu> to all physical cpus specify 'r' as a I<cpulist>.
> +For clearing pinning info, specify 'c' as a I<cpulist>.
>  If I<--live> is specified, affect a running guest.
>  If I<--config> is specified, affect the next boot of a persistent guest.
>  If I<--current> is specified, affect the current guest state.
> -- 
> 1.8.3.1
> 
> --
> libvir-list mailing list
> libvir-list@redhat.com
> https://www.redhat.com/mailman/listinfo/libvir-list

Regards,
Daniel
-- 
|: https://berrange.com      -o-    https://www.flickr.com/photos/dberrange :|
|: https://libvirt.org         -o-            https://fstop138.berrange.com :|
|: https://entangle-photo.org    -o-    https://www.instagram.com/dberrange :|

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [PATCH v2] vcpupin: add clear feature
Posted by Peter Krempa 6 years, 1 month ago
On Mon, Feb 12, 2018 at 09:39:26 +0000, Daniel Berrange wrote:
> On Mon, Feb 12, 2018 at 03:54:21AM -0500, Yi Wang wrote:
> > We can't clear vcpupin settings of XML once we did vcpupin
> > command, this is not convenient under some condition such
> > as migration to a host with less CPUs.
> > 
> > This patch introduces clear feature, which can clear vcpuin
> > setting of XML using a 'c' option.
> > 
> > Signed-off-by: Yi Wang <wang.yi59@zte.com.cn>
> > Signed-off-by: Xi Xu <xu.xi8@zte.com.cn>
> > ---
> >  include/libvirt/libvirt-domain.h | 11 +++++++++++
> >  src/qemu/qemu_driver.c           | 32 ++++++++++++++++++++++++--------
> 
> I'm not seeing a good reason for these change. There's no concept of clearing
> CPU pinning - the default is simply "pinned" to all possible CPUs, and the
> callers should already be able to request all possile CPUs with the current
> API design.

Well, the thing is that in some cases the default is not pinned to all
pCPUs, but rather can be taken from other configuration points, e.g.
global VM pinning bitmap.

As of such the current code does not allow restoring such state since
pinning to all pCPUs might be a desired legitimate configuration so I've
removed the hack that deleted the pinning for such configuration a long
time ago. This means that an explicit removal of the pinning might be a
desired behaviour of the API, since abusing of any other value to
restore the default state is not a good idea.
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [PATCH v2] vcpupin: add clear feature
Posted by Daniel P. Berrangé 6 years, 1 month ago
On Mon, Feb 12, 2018 at 02:31:46PM +0100, Peter Krempa wrote:
> On Mon, Feb 12, 2018 at 09:39:26 +0000, Daniel Berrange wrote:
> > On Mon, Feb 12, 2018 at 03:54:21AM -0500, Yi Wang wrote:
> > > We can't clear vcpupin settings of XML once we did vcpupin
> > > command, this is not convenient under some condition such
> > > as migration to a host with less CPUs.
> > > 
> > > This patch introduces clear feature, which can clear vcpuin
> > > setting of XML using a 'c' option.
> > > 
> > > Signed-off-by: Yi Wang <wang.yi59@zte.com.cn>
> > > Signed-off-by: Xi Xu <xu.xi8@zte.com.cn>
> > > ---
> > >  include/libvirt/libvirt-domain.h | 11 +++++++++++
> > >  src/qemu/qemu_driver.c           | 32 ++++++++++++++++++++++++--------
> > 
> > I'm not seeing a good reason for these change. There's no concept of clearing
> > CPU pinning - the default is simply "pinned" to all possible CPUs, and the
> > callers should already be able to request all possile CPUs with the current
> > API design.
> 
> Well, the thing is that in some cases the default is not pinned to all
> pCPUs, but rather can be taken from other configuration points, e.g.
> global VM pinning bitmap.

Which global VM pinning bitmap are you referring to ?

IIRC, when we spawned QEMU we explicitly set it to use all pCPUs, so
that it doesn't inherit whatever affinity libvirtd itself has. I see
we slightly tuned that to only include CPUs that are currently online
(as reported in /sys/devices/system/cpu/online).

Except I see it is even more complicated.  We only use the online
bitmap check if virHostCPUHasBitmap() returns true. We also potentially
asked numad to give us default placement.

So as implemented this _CLEAR flag is still potentially significantly
different to what the original affinity was set to, which I think is
pretty bad semantically.

> As of such the current code does not allow restoring such state since
> pinning to all pCPUs might be a desired legitimate configuration so I've
> removed the hack that deleted the pinning for such configuration a long
> time ago. This means that an explicit removal of the pinning might be a
> desired behaviour of the API, since abusing of any other value to
> restore the default state is not a good idea.

True, but I think it rather a hack to modify this API with a flag _CLEAR
that essentially means "ignore the bitmap parameter", as opposed to
creating an API virDomainClearCPUAffinity(dom).

Regards,
Daniel
-- 
|: https://berrange.com      -o-    https://www.flickr.com/photos/dberrange :|
|: https://libvirt.org         -o-            https://fstop138.berrange.com :|
|: https://entangle-photo.org    -o-    https://www.instagram.com/dberrange :|

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [PATCH v2] vcpupin: add clear feature
Posted by Peter Krempa 6 years, 1 month ago
On Mon, Feb 12, 2018 at 13:42:02 +0000, Daniel Berrange wrote:
> On Mon, Feb 12, 2018 at 02:31:46PM +0100, Peter Krempa wrote:
> > On Mon, Feb 12, 2018 at 09:39:26 +0000, Daniel Berrange wrote:
> > > On Mon, Feb 12, 2018 at 03:54:21AM -0500, Yi Wang wrote:
> > > > We can't clear vcpupin settings of XML once we did vcpupin
> > > > command, this is not convenient under some condition such
> > > > as migration to a host with less CPUs.
> > > > 
> > > > This patch introduces clear feature, which can clear vcpuin
> > > > setting of XML using a 'c' option.
> > > > 
> > > > Signed-off-by: Yi Wang <wang.yi59@zte.com.cn>
> > > > Signed-off-by: Xi Xu <xu.xi8@zte.com.cn>
> > > > ---
> > > >  include/libvirt/libvirt-domain.h | 11 +++++++++++
> > > >  src/qemu/qemu_driver.c           | 32 ++++++++++++++++++++++++--------
> > > 
> > > I'm not seeing a good reason for these change. There's no concept of clearing
> > > CPU pinning - the default is simply "pinned" to all possible CPUs, and the
> > > callers should already be able to request all possile CPUs with the current
> > > API design.
> > 
> > Well, the thing is that in some cases the default is not pinned to all
> > pCPUs, but rather can be taken from other configuration points, e.g.
> > global VM pinning bitmap.
> 
> Which global VM pinning bitmap are you referring to ?

<vcpu placement='static' cpuset="1-4,^3,6" current="1">2</vcpu>

The above 'cpuset' is the default pinning for all vcpus, unless a vcpu
specifies individual other pinning.

> IIRC, when we spawned QEMU we explicitly set it to use all pCPUs, so
> that it doesn't inherit whatever affinity libvirtd itself has. I see
> we slightly tuned that to only include CPUs that are currently online
> (as reported in /sys/devices/system/cpu/online).
> 
> Except I see it is even more complicated.  We only use the online
> bitmap check if virHostCPUHasBitmap() returns true. We also potentially
> asked numad to give us default placement.

Yes, that is the second possible source of pinning information if the
user requests automatic pinning.

> So as implemented this _CLEAR flag is still potentially significantly
> different to what the original affinity was set to, which I think is
> pretty bad semantically.

I did not look at the implementation here, but basically this should do
the same logic as virDomainDefGetVcpuPinInfoHelper does to determine
which bitmap is actually used. The hierarchy is:

1) specific per-vcpu bitmap
2) automatic pinning from numad if enabled
3) <vcpu cpuset=''>
4) all physical vcpus present on the host.

The above algorithm is used in all the pinning code to determine the
effective bitmap

> 
> > As of such the current code does not allow restoring such state since
> > pinning to all pCPUs might be a desired legitimate configuration so I've
> > removed the hack that deleted the pinning for such configuration a long
> > time ago. This means that an explicit removal of the pinning might be a
> > desired behaviour of the API, since abusing of any other value to
> > restore the default state is not a good idea.
> 
> True, but I think it rather a hack to modify this API with a flag _CLEAR
> that essentially means "ignore the bitmap parameter", as opposed to
> creating an API virDomainClearCPUAffinity(dom).

Yes, this is probably a better solution. The API needs a 'vcpu' argument
though:

virDomainClearVcpuPin(virDomainPtr dom,
                      unsigned int vcpu,
                      unsigned int flags)

> 
> Regards,
> Daniel
> -- 
> |: https://berrange.com      -o-    https://www.flickr.com/photos/dberrange :|
> |: https://libvirt.org         -o-            https://fstop138.berrange.com :|
> |: https://entangle-photo.org    -o-    https://www.instagram.com/dberrange :|
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list