[PATCH] Qemu: migration: Not bind RAM info with active migration status

Keqian Zhu posted 1 patch 3 years, 9 months ago
Test syntax-check failed
Patches applied successfully (tree, apply log)
git fetch https://github.com/patchew-project/libvirt tags/patchew/20200715061801.19192-1-zhukeqian1@huawei.com
src/qemu/qemu_monitor_json.c | 88 +++++++++++++++++-------------------
1 file changed, 42 insertions(+), 46 deletions(-)
[PATCH] Qemu: migration: Not bind RAM info with active migration status
Posted by Keqian Zhu 3 years, 9 months ago
For that Qemu supports returning incoming migration info since its commit
65ace0604551 (migration: add postcopy total blocktime into query-migrate),
which may contains active status, but without RAM info. Drop this binding
relationship check in libvirt.

Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com>
---
 src/qemu/qemu_monitor_json.c | 88 +++++++++++++++++-------------------
 1 file changed, 42 insertions(+), 46 deletions(-)

diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c
index d808c4b55b..ba8e340742 100644
--- a/src/qemu/qemu_monitor_json.c
+++ b/src/qemu/qemu_monitor_json.c
@@ -3547,56 +3547,52 @@ qemuMonitorJSONGetMigrationStatsReply(virJSONValuePtr reply,
     case QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER:
     case QEMU_MONITOR_MIGRATION_STATUS_DEVICE:
         ram = virJSONValueObjectGetObject(ret, "ram");
-        if (!ram) {
-            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
-                           _("migration was active, but no RAM info was set"));
-            return -1;
-        }
+        if (ram) {
+            if (virJSONValueObjectGetNumberUlong(ram, "transferred",
+                                                 &stats->ram_transferred) < 0) {
+                virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+                               _("migration was active, but RAM 'transferred' "
+                                 "data was missing"));
+                return -1;
+            }
+            if (virJSONValueObjectGetNumberUlong(ram, "remaining",
+                                                 &stats->ram_remaining) < 0) {
+                virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+                               _("migration was active, but RAM 'remaining' "
+                                 "data was missing"));
+                return -1;
+            }
+            if (virJSONValueObjectGetNumberUlong(ram, "total",
+                                                 &stats->ram_total) < 0) {
+                virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+                               _("migration was active, but RAM 'total' "
+                                 "data was missing"));
+                return -1;
+            }
 
-        if (virJSONValueObjectGetNumberUlong(ram, "transferred",
-                                             &stats->ram_transferred) < 0) {
-            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
-                           _("migration was active, but RAM 'transferred' "
-                             "data was missing"));
-            return -1;
-        }
-        if (virJSONValueObjectGetNumberUlong(ram, "remaining",
-                                             &stats->ram_remaining) < 0) {
-            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
-                           _("migration was active, but RAM 'remaining' "
-                             "data was missing"));
-            return -1;
-        }
-        if (virJSONValueObjectGetNumberUlong(ram, "total",
-                                             &stats->ram_total) < 0) {
-            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
-                           _("migration was active, but RAM 'total' "
-                             "data was missing"));
-            return -1;
-        }
+            if (virJSONValueObjectGetNumberDouble(ram, "mbps", &mbps) == 0 &&
+                mbps > 0) {
+                /* mpbs from QEMU reports Mbits/s (M as in 10^6 not Mi as 2^20) */
+                stats->ram_bps = mbps * (1000 * 1000 / 8);
+            }
 
-        if (virJSONValueObjectGetNumberDouble(ram, "mbps", &mbps) == 0 &&
-            mbps > 0) {
-            /* mpbs from QEMU reports Mbits/s (M as in 10^6 not Mi as 2^20) */
-            stats->ram_bps = mbps * (1000 * 1000 / 8);
+            if (virJSONValueObjectGetNumberUlong(ram, "duplicate",
+                                                 &stats->ram_duplicate) == 0)
+                stats->ram_duplicate_set = true;
+            ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal",
+                                                          &stats->ram_normal));
+            ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal-bytes",
+                                                          &stats->ram_normal_bytes));
+            ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-pages-rate",
+                                                          &stats->ram_dirty_rate));
+            ignore_value(virJSONValueObjectGetNumberUlong(ram, "page-size",
+                                                          &stats->ram_page_size));
+            ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-sync-count",
+                                                          &stats->ram_iteration));
+            ignore_value(virJSONValueObjectGetNumberUlong(ram, "postcopy-requests",
+                                                          &stats->ram_postcopy_reqs));
         }
 
-        if (virJSONValueObjectGetNumberUlong(ram, "duplicate",
-                                             &stats->ram_duplicate) == 0)
-            stats->ram_duplicate_set = true;
-        ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal",
-                                                      &stats->ram_normal));
-        ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal-bytes",
-                                                      &stats->ram_normal_bytes));
-        ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-pages-rate",
-                                                      &stats->ram_dirty_rate));
-        ignore_value(virJSONValueObjectGetNumberUlong(ram, "page-size",
-                                                      &stats->ram_page_size));
-        ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-sync-count",
-                                                      &stats->ram_iteration));
-        ignore_value(virJSONValueObjectGetNumberUlong(ram, "postcopy-requests",
-                                                      &stats->ram_postcopy_reqs));
-
         disk = virJSONValueObjectGetObject(ret, "disk");
         if (disk) {
             rc = virJSONValueObjectGetNumberUlong(disk, "transferred",
-- 
2.19.1

Re: [PATCH] Qemu: migration: Not bind RAM info with active migration status
Posted by Daniel Henrique Barboza 3 years, 9 months ago

On 7/15/20 3:18 AM, Keqian Zhu wrote:
> For that Qemu supports returning incoming migration info since its commit
> 65ace0604551 (migration: add postcopy total blocktime into query-migrate),


It is worth saying that this QEMU commit that decoupled the RAM
status from the active migration status is live since early 2018:

$ git show 65ace0604551
commit 65ace060455122a461cdc9302238b914084bcd42
Author: Alexey Perevalov <a.perevalov@samsung.com>
Date:   Thu Mar 22 21:17:27 2018 +0300

     migration: add postcopy total blocktime into query-migrate

$ git describe 65ace0604551
v2.12.0-6-g65ace06045


I am not sure if we care about removing a migration failure check for
QEMU 2.12 when we're waiting for 5.1 to come out. My guess is that we
do care, but  not enough to demand a "if (QEMU <= 2.12)" in this logic.
I'll also assume that the existing failure check is doing more harm than
good nowadays, so:


Reviewed-by: Daniel Henrique Barboza <danielhb413@gmail.com>


> which may contains active status, but without RAM info. Drop this binding
> relationship check in libvirt.
> 
> Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com>
> ---





>   src/qemu/qemu_monitor_json.c | 88 +++++++++++++++++-------------------
>   1 file changed, 42 insertions(+), 46 deletions(-)
> 
> diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c
> index d808c4b55b..ba8e340742 100644
> --- a/src/qemu/qemu_monitor_json.c
> +++ b/src/qemu/qemu_monitor_json.c
> @@ -3547,56 +3547,52 @@ qemuMonitorJSONGetMigrationStatsReply(virJSONValuePtr reply,
>       case QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER:
>       case QEMU_MONITOR_MIGRATION_STATUS_DEVICE:
>           ram = virJSONValueObjectGetObject(ret, "ram");
> -        if (!ram) {
> -            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> -                           _("migration was active, but no RAM info was set"));
> -            return -1;
> -        }
> +        if (ram) {
> +            if (virJSONValueObjectGetNumberUlong(ram, "transferred",
> +                                                 &stats->ram_transferred) < 0) {
> +                virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> +                               _("migration was active, but RAM 'transferred' "
> +                                 "data was missing"));
> +                return -1;
> +            }
> +            if (virJSONValueObjectGetNumberUlong(ram, "remaining",
> +                                                 &stats->ram_remaining) < 0) {
> +                virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> +                               _("migration was active, but RAM 'remaining' "
> +                                 "data was missing"));
> +                return -1;
> +            }
> +            if (virJSONValueObjectGetNumberUlong(ram, "total",
> +                                                 &stats->ram_total) < 0) {
> +                virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> +                               _("migration was active, but RAM 'total' "
> +                                 "data was missing"));
> +                return -1;
> +            }
>   
> -        if (virJSONValueObjectGetNumberUlong(ram, "transferred",
> -                                             &stats->ram_transferred) < 0) {
> -            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> -                           _("migration was active, but RAM 'transferred' "
> -                             "data was missing"));
> -            return -1;
> -        }
> -        if (virJSONValueObjectGetNumberUlong(ram, "remaining",
> -                                             &stats->ram_remaining) < 0) {
> -            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> -                           _("migration was active, but RAM 'remaining' "
> -                             "data was missing"));
> -            return -1;
> -        }
> -        if (virJSONValueObjectGetNumberUlong(ram, "total",
> -                                             &stats->ram_total) < 0) {
> -            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> -                           _("migration was active, but RAM 'total' "
> -                             "data was missing"));
> -            return -1;
> -        }
> +            if (virJSONValueObjectGetNumberDouble(ram, "mbps", &mbps) == 0 &&
> +                mbps > 0) {
> +                /* mpbs from QEMU reports Mbits/s (M as in 10^6 not Mi as 2^20) */
> +                stats->ram_bps = mbps * (1000 * 1000 / 8);
> +            }
>   
> -        if (virJSONValueObjectGetNumberDouble(ram, "mbps", &mbps) == 0 &&
> -            mbps > 0) {
> -            /* mpbs from QEMU reports Mbits/s (M as in 10^6 not Mi as 2^20) */
> -            stats->ram_bps = mbps * (1000 * 1000 / 8);
> +            if (virJSONValueObjectGetNumberUlong(ram, "duplicate",
> +                                                 &stats->ram_duplicate) == 0)
> +                stats->ram_duplicate_set = true;
> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal",
> +                                                          &stats->ram_normal));
> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal-bytes",
> +                                                          &stats->ram_normal_bytes));
> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-pages-rate",
> +                                                          &stats->ram_dirty_rate));
> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "page-size",
> +                                                          &stats->ram_page_size));
> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-sync-count",
> +                                                          &stats->ram_iteration));
> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "postcopy-requests",
> +                                                          &stats->ram_postcopy_reqs));
>           }
>   
> -        if (virJSONValueObjectGetNumberUlong(ram, "duplicate",
> -                                             &stats->ram_duplicate) == 0)
> -            stats->ram_duplicate_set = true;
> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal",
> -                                                      &stats->ram_normal));
> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal-bytes",
> -                                                      &stats->ram_normal_bytes));
> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-pages-rate",
> -                                                      &stats->ram_dirty_rate));
> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "page-size",
> -                                                      &stats->ram_page_size));
> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-sync-count",
> -                                                      &stats->ram_iteration));
> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "postcopy-requests",
> -                                                      &stats->ram_postcopy_reqs));
> -
>           disk = virJSONValueObjectGetObject(ret, "disk");
>           if (disk) {
>               rc = virJSONValueObjectGetNumberUlong(disk, "transferred",
> 

Re: [PATCH] Qemu: migration: Not bind RAM info with active migration status
Posted by zhukeqian 3 years, 9 months ago
Hi Daniel,

On 2020/7/17 22:33, Daniel Henrique Barboza wrote:
> 
> 
> On 7/15/20 3:18 AM, Keqian Zhu wrote:
>> For that Qemu supports returning incoming migration info since its commit
>> 65ace0604551 (migration: add postcopy total blocktime into query-migrate),
> 
> 
> It is worth saying that this QEMU commit that decoupled the RAM
> status from the active migration status is live since early 2018:
> 
> $ git show 65ace0604551
> commit 65ace060455122a461cdc9302238b914084bcd42
> Author: Alexey Perevalov <a.perevalov@samsung.com>
> Date:   Thu Mar 22 21:17:27 2018 +0300
> 
>     migration: add postcopy total blocktime into query-migrate
> 
> $ git describe 65ace0604551
> v2.12.0-6-g65ace06045
> 
> 
> I am not sure if we care about removing a migration failure check for
> QEMU 2.12 when we're waiting for 5.1 to come out. My guess is that we
> do care, but  not enough to demand a "if (QEMU <= 2.12)" in this logic.
> I'll also assume that the existing failure check is doing more harm than
> good nowadays, so:
> 
Thanks for your review.

Thanks,
Keqian
> 
> Reviewed-by: Daniel Henrique Barboza <danielhb413@gmail.com>
> 
> 
>> which may contains active status, but without RAM info. Drop this binding
>> relationship check in libvirt.
>>
>> Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com>
>> ---
> 
> 
> 
> 
> 
>>   src/qemu/qemu_monitor_json.c | 88 +++++++++++++++++-------------------
>>   1 file changed, 42 insertions(+), 46 deletions(-)
>>
>> diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c
>> index d808c4b55b..ba8e340742 100644
>> --- a/src/qemu/qemu_monitor_json.c
>> +++ b/src/qemu/qemu_monitor_json.c
>> @@ -3547,56 +3547,52 @@ qemuMonitorJSONGetMigrationStatsReply(virJSONValuePtr reply,
>>       case QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER:
>>       case QEMU_MONITOR_MIGRATION_STATUS_DEVICE:
>>           ram = virJSONValueObjectGetObject(ret, "ram");
>> -        if (!ram) {
>> -            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>> -                           _("migration was active, but no RAM info was set"));
>> -            return -1;
>> -        }
>> +        if (ram) {
>> +            if (virJSONValueObjectGetNumberUlong(ram, "transferred",
>> +                                                 &stats->ram_transferred) < 0) {
>> +                virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>> +                               _("migration was active, but RAM 'transferred' "
>> +                                 "data was missing"));
>> +                return -1;
>> +            }
>> +            if (virJSONValueObjectGetNumberUlong(ram, "remaining",
>> +                                                 &stats->ram_remaining) < 0) {
>> +                virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>> +                               _("migration was active, but RAM 'remaining' "
>> +                                 "data was missing"));
>> +                return -1;
>> +            }
>> +            if (virJSONValueObjectGetNumberUlong(ram, "total",
>> +                                                 &stats->ram_total) < 0) {
>> +                virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>> +                               _("migration was active, but RAM 'total' "
>> +                                 "data was missing"));
>> +                return -1;
>> +            }
>>   -        if (virJSONValueObjectGetNumberUlong(ram, "transferred",
>> -                                             &stats->ram_transferred) < 0) {
>> -            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>> -                           _("migration was active, but RAM 'transferred' "
>> -                             "data was missing"));
>> -            return -1;
>> -        }
>> -        if (virJSONValueObjectGetNumberUlong(ram, "remaining",
>> -                                             &stats->ram_remaining) < 0) {
>> -            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>> -                           _("migration was active, but RAM 'remaining' "
>> -                             "data was missing"));
>> -            return -1;
>> -        }
>> -        if (virJSONValueObjectGetNumberUlong(ram, "total",
>> -                                             &stats->ram_total) < 0) {
>> -            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>> -                           _("migration was active, but RAM 'total' "
>> -                             "data was missing"));
>> -            return -1;
>> -        }
>> +            if (virJSONValueObjectGetNumberDouble(ram, "mbps", &mbps) == 0 &&
>> +                mbps > 0) {
>> +                /* mpbs from QEMU reports Mbits/s (M as in 10^6 not Mi as 2^20) */
>> +                stats->ram_bps = mbps * (1000 * 1000 / 8);
>> +            }
>>   -        if (virJSONValueObjectGetNumberDouble(ram, "mbps", &mbps) == 0 &&
>> -            mbps > 0) {
>> -            /* mpbs from QEMU reports Mbits/s (M as in 10^6 not Mi as 2^20) */
>> -            stats->ram_bps = mbps * (1000 * 1000 / 8);
>> +            if (virJSONValueObjectGetNumberUlong(ram, "duplicate",
>> +                                                 &stats->ram_duplicate) == 0)
>> +                stats->ram_duplicate_set = true;
>> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal",
>> +                                                          &stats->ram_normal));
>> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal-bytes",
>> +                                                          &stats->ram_normal_bytes));
>> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-pages-rate",
>> +                                                          &stats->ram_dirty_rate));
>> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "page-size",
>> +                                                          &stats->ram_page_size));
>> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-sync-count",
>> +                                                          &stats->ram_iteration));
>> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "postcopy-requests",
>> +                                                          &stats->ram_postcopy_reqs));
>>           }
>>   -        if (virJSONValueObjectGetNumberUlong(ram, "duplicate",
>> -                                             &stats->ram_duplicate) == 0)
>> -            stats->ram_duplicate_set = true;
>> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal",
>> -                                                      &stats->ram_normal));
>> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal-bytes",
>> -                                                      &stats->ram_normal_bytes));
>> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-pages-rate",
>> -                                                      &stats->ram_dirty_rate));
>> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "page-size",
>> -                                                      &stats->ram_page_size));
>> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-sync-count",
>> -                                                      &stats->ram_iteration));
>> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "postcopy-requests",
>> -                                                      &stats->ram_postcopy_reqs));
>> -
>>           disk = virJSONValueObjectGetObject(ret, "disk");
>>           if (disk) {
>>               rc = virJSONValueObjectGetNumberUlong(disk, "transferred",
>>
> .
> 

Re: [PATCH] Qemu: migration: Not bind RAM info with active migration status
Posted by zhukeqian 3 years, 9 months ago
Hi Daniel,

On 2020/7/17 22:33, Daniel Henrique Barboza wrote:
> 
> 
> On 7/15/20 3:18 AM, Keqian Zhu wrote:
>> For that Qemu supports returning incoming migration info since its commit
>> 65ace0604551 (migration: add postcopy total blocktime into query-migrate),
> 
> 
> It is worth saying that this QEMU commit that decoupled the RAM
> status from the active migration status is live since early 2018:
> 
> $ git show 65ace0604551
> commit 65ace060455122a461cdc9302238b914084bcd42
> Author: Alexey Perevalov <a.perevalov@samsung.com>
> Date:   Thu Mar 22 21:17:27 2018 +0300
> 
>     migration: add postcopy total blocktime into query-migrate
> 
> $ git describe 65ace0604551
> v2.12.0-6-g65ace06045
> 
> 
> I am not sure if we care about removing a migration failure check for
> QEMU 2.12 when we're waiting for 5.1 to come out. My guess is that we
> do care, but  not enough to demand a "if (QEMU <= 2.12)" in this logic.
> I'll also assume that the existing failure check is doing more harm than
> good nowadays, so:
> 
> 
> Reviewed-by: Daniel Henrique Barboza <danielhb413@gmail.com>
> 
Do you have authority to merge this patch?

Thanks,
Keqian

Re: [PATCH] Qemu: migration: Not bind RAM info with active migration status
Posted by Daniel Henrique Barboza 3 years, 9 months ago

On 7/28/20 5:46 AM, zhukeqian wrote:
> Hi Daniel,
> 
> On 2020/7/17 22:33, Daniel Henrique Barboza wrote:
>>
>>
>> On 7/15/20 3:18 AM, Keqian Zhu wrote:
>>> For that Qemu supports returning incoming migration info since its commit
>>> 65ace0604551 (migration: add postcopy total blocktime into query-migrate),
>>
>>
>> It is worth saying that this QEMU commit that decoupled the RAM
>> status from the active migration status is live since early 2018:
>>
>> $ git show 65ace0604551
>> commit 65ace060455122a461cdc9302238b914084bcd42
>> Author: Alexey Perevalov <a.perevalov@samsung.com>
>> Date:   Thu Mar 22 21:17:27 2018 +0300
>>
>>      migration: add postcopy total blocktime into query-migrate
>>
>> $ git describe 65ace0604551
>> v2.12.0-6-g65ace06045
>>
>>
>> I am not sure if we care about removing a migration failure check for
>> QEMU 2.12 when we're waiting for 5.1 to come out. My guess is that we
>> do care, but  not enough to demand a "if (QEMU <= 2.12)" in this logic.
>> I'll also assume that the existing failure check is doing more harm than
>> good nowadays, so:
>>
>>
>> Reviewed-by: Daniel Henrique Barboza <danielhb413@gmail.com>
>>
> Do you have authority to merge this patch?

I don't have the authority to merge this in.

We're on code freeze for release 6.6.0 at this moment, meaning that only
urgent fixes will make it upstream during this time frame. I believe a
commiter/maintainer will be able to review/merge this patch shortly
after that.


Thanks,


DHB

> 
> Thanks,
> Keqian
> 

Re: [PATCH] Qemu: migration: Not bind RAM info with active migration status
Posted by zhukeqian 3 years, 9 months ago
Hi Daniel,

On 2020/7/28 18:21, Daniel Henrique Barboza wrote:
> 
> 
> On 7/28/20 5:46 AM, zhukeqian wrote:
>> Hi Daniel,
>>
>> On 2020/7/17 22:33, Daniel Henrique Barboza wrote:
>>>
>>>
>>> On 7/15/20 3:18 AM, Keqian Zhu wrote:
>>>> For that Qemu supports returning incoming migration info since its commit
>>>> 65ace0604551 (migration: add postcopy total blocktime into query-migrate),
>>>
>>>
>>> It is worth saying that this QEMU commit that decoupled the RAM
>>> status from the active migration status is live since early 2018:
>>>
>>> $ git show 65ace0604551
>>> commit 65ace060455122a461cdc9302238b914084bcd42
>>> Author: Alexey Perevalov <a.perevalov@samsung.com>
>>> Date:   Thu Mar 22 21:17:27 2018 +0300
>>>
>>>      migration: add postcopy total blocktime into query-migrate
>>>
>>> $ git describe 65ace0604551
>>> v2.12.0-6-g65ace06045
>>>
>>>
>>> I am not sure if we care about removing a migration failure check for
>>> QEMU 2.12 when we're waiting for 5.1 to come out. My guess is that we
>>> do care, but  not enough to demand a "if (QEMU <= 2.12)" in this logic.
>>> I'll also assume that the existing failure check is doing more harm than
>>> good nowadays, so:
>>>
>>>
>>> Reviewed-by: Daniel Henrique Barboza <danielhb413@gmail.com>
>>>
>> Do you have authority to merge this patch?
> 
> I don't have the authority to merge this in.
> 
> We're on code freeze for release 6.6.0 at this moment, meaning that only
> urgent fixes will make it upstream during this time frame. I believe a
> commiter/maintainer will be able to review/merge this patch shortly
> after that.
OK, I see. Thanks :-)
> 
> 
> Thanks,
> 
> 
> DHB
> 
>>
>> Thanks,
>> Keqian
>>
> 

Re: [PATCH] Qemu: migration: Not bind RAM info with active migration status
Posted by Daniel P. Berrangé 3 years, 9 months ago
On Wed, Jul 15, 2020 at 02:18:01PM +0800, Keqian Zhu wrote:
> For that Qemu supports returning incoming migration info since its commit
> 65ace0604551 (migration: add postcopy total blocktime into query-migrate),
> which may contains active status, but without RAM info. Drop this binding
> relationship check in libvirt.

I'm not clear from this description what the actual bug in libvirt
is ?   What currently fails in libvirt that this patch fixes ?

> 
> Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com>
> ---
>  src/qemu/qemu_monitor_json.c | 88 +++++++++++++++++-------------------
>  1 file changed, 42 insertions(+), 46 deletions(-)
> 
> diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c
> index d808c4b55b..ba8e340742 100644
> --- a/src/qemu/qemu_monitor_json.c
> +++ b/src/qemu/qemu_monitor_json.c
> @@ -3547,56 +3547,52 @@ qemuMonitorJSONGetMigrationStatsReply(virJSONValuePtr reply,
>      case QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER:
>      case QEMU_MONITOR_MIGRATION_STATUS_DEVICE:
>          ram = virJSONValueObjectGetObject(ret, "ram");
> -        if (!ram) {
> -            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> -                           _("migration was active, but no RAM info was set"));
> -            return -1;
> -        }
> +        if (ram) {
> +            if (virJSONValueObjectGetNumberUlong(ram, "transferred",
> +                                                 &stats->ram_transferred) < 0) {
> +                virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> +                               _("migration was active, but RAM 'transferred' "
> +                                 "data was missing"));
> +                return -1;
> +            }
> +            if (virJSONValueObjectGetNumberUlong(ram, "remaining",
> +                                                 &stats->ram_remaining) < 0) {
> +                virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> +                               _("migration was active, but RAM 'remaining' "
> +                                 "data was missing"));
> +                return -1;
> +            }
> +            if (virJSONValueObjectGetNumberUlong(ram, "total",
> +                                                 &stats->ram_total) < 0) {
> +                virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> +                               _("migration was active, but RAM 'total' "
> +                                 "data was missing"));
> +                return -1;
> +            }
>  
> -        if (virJSONValueObjectGetNumberUlong(ram, "transferred",
> -                                             &stats->ram_transferred) < 0) {
> -            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> -                           _("migration was active, but RAM 'transferred' "
> -                             "data was missing"));
> -            return -1;
> -        }
> -        if (virJSONValueObjectGetNumberUlong(ram, "remaining",
> -                                             &stats->ram_remaining) < 0) {
> -            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> -                           _("migration was active, but RAM 'remaining' "
> -                             "data was missing"));
> -            return -1;
> -        }
> -        if (virJSONValueObjectGetNumberUlong(ram, "total",
> -                                             &stats->ram_total) < 0) {
> -            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> -                           _("migration was active, but RAM 'total' "
> -                             "data was missing"));
> -            return -1;
> -        }
> +            if (virJSONValueObjectGetNumberDouble(ram, "mbps", &mbps) == 0 &&
> +                mbps > 0) {
> +                /* mpbs from QEMU reports Mbits/s (M as in 10^6 not Mi as 2^20) */
> +                stats->ram_bps = mbps * (1000 * 1000 / 8);
> +            }
>  
> -        if (virJSONValueObjectGetNumberDouble(ram, "mbps", &mbps) == 0 &&
> -            mbps > 0) {
> -            /* mpbs from QEMU reports Mbits/s (M as in 10^6 not Mi as 2^20) */
> -            stats->ram_bps = mbps * (1000 * 1000 / 8);
> +            if (virJSONValueObjectGetNumberUlong(ram, "duplicate",
> +                                                 &stats->ram_duplicate) == 0)
> +                stats->ram_duplicate_set = true;
> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal",
> +                                                          &stats->ram_normal));
> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal-bytes",
> +                                                          &stats->ram_normal_bytes));
> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-pages-rate",
> +                                                          &stats->ram_dirty_rate));
> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "page-size",
> +                                                          &stats->ram_page_size));
> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-sync-count",
> +                                                          &stats->ram_iteration));
> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "postcopy-requests",
> +                                                          &stats->ram_postcopy_reqs));
>          }
>  
> -        if (virJSONValueObjectGetNumberUlong(ram, "duplicate",
> -                                             &stats->ram_duplicate) == 0)
> -            stats->ram_duplicate_set = true;
> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal",
> -                                                      &stats->ram_normal));
> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal-bytes",
> -                                                      &stats->ram_normal_bytes));
> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-pages-rate",
> -                                                      &stats->ram_dirty_rate));
> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "page-size",
> -                                                      &stats->ram_page_size));
> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-sync-count",
> -                                                      &stats->ram_iteration));
> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "postcopy-requests",
> -                                                      &stats->ram_postcopy_reqs));
> -
>          disk = virJSONValueObjectGetObject(ret, "disk");
>          if (disk) {
>              rc = virJSONValueObjectGetNumberUlong(disk, "transferred",
> -- 
> 2.19.1
> 

Regards,
Daniel
-- 
|: https://berrange.com      -o-    https://www.flickr.com/photos/dberrange :|
|: https://libvirt.org         -o-            https://fstop138.berrange.com :|
|: https://entangle-photo.org    -o-    https://www.instagram.com/dberrange :|

Re: [PATCH] Qemu: migration: Not bind RAM info with active migration status
Posted by zhukeqian 3 years, 9 months ago

On 2020/7/28 18:32, Daniel P. Berrangé wrote:
> On Wed, Jul 15, 2020 at 02:18:01PM +0800, Keqian Zhu wrote:
>> For that Qemu supports returning incoming migration info since its commit
>> 65ace0604551 (migration: add postcopy total blocktime into query-migrate),
>> which may contains active status, but without RAM info. Drop this binding
>> relationship check in libvirt.
> 
> I'm not clear from this description what the actual bug in libvirt
> is ?   What currently fails in libvirt that this patch fixes ?
> 
When query migration status, libvirt assumes that when migration status is active,
then RAM info must be set. However qemu has changed this assumption, so libvirt will
report "migration was active, but no RAM info was set" when query migration info.

Thanks,
Keqian
>>
>> Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com>
>> ---
>>  src/qemu/qemu_monitor_json.c | 88 +++++++++++++++++-------------------
>>  1 file changed, 42 insertions(+), 46 deletions(-)
>>
>> diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c
>> index d808c4b55b..ba8e340742 100644
>> --- a/src/qemu/qemu_monitor_json.c
>> +++ b/src/qemu/qemu_monitor_json.c
>> @@ -3547,56 +3547,52 @@ qemuMonitorJSONGetMigrationStatsReply(virJSONValuePtr reply,
>>      case QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER:
>>      case QEMU_MONITOR_MIGRATION_STATUS_DEVICE:
>>          ram = virJSONValueObjectGetObject(ret, "ram");
>> -        if (!ram) {
>> -            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>> -                           _("migration was active, but no RAM info was set"));
>> -            return -1;
>> -        }
>> +        if (ram) {
>> +            if (virJSONValueObjectGetNumberUlong(ram, "transferred",
>> +                                                 &stats->ram_transferred) < 0) {
>> +                virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>> +                               _("migration was active, but RAM 'transferred' "
>> +                                 "data was missing"));
>> +                return -1;
>> +            }
>> +            if (virJSONValueObjectGetNumberUlong(ram, "remaining",
>> +                                                 &stats->ram_remaining) < 0) {
>> +                virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>> +                               _("migration was active, but RAM 'remaining' "
>> +                                 "data was missing"));
>> +                return -1;
>> +            }
>> +            if (virJSONValueObjectGetNumberUlong(ram, "total",
>> +                                                 &stats->ram_total) < 0) {
>> +                virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>> +                               _("migration was active, but RAM 'total' "
>> +                                 "data was missing"));
>> +                return -1;
>> +            }
>>  
>> -        if (virJSONValueObjectGetNumberUlong(ram, "transferred",
>> -                                             &stats->ram_transferred) < 0) {
>> -            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>> -                           _("migration was active, but RAM 'transferred' "
>> -                             "data was missing"));
>> -            return -1;
>> -        }
>> -        if (virJSONValueObjectGetNumberUlong(ram, "remaining",
>> -                                             &stats->ram_remaining) < 0) {
>> -            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>> -                           _("migration was active, but RAM 'remaining' "
>> -                             "data was missing"));
>> -            return -1;
>> -        }
>> -        if (virJSONValueObjectGetNumberUlong(ram, "total",
>> -                                             &stats->ram_total) < 0) {
>> -            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>> -                           _("migration was active, but RAM 'total' "
>> -                             "data was missing"));
>> -            return -1;
>> -        }
>> +            if (virJSONValueObjectGetNumberDouble(ram, "mbps", &mbps) == 0 &&
>> +                mbps > 0) {
>> +                /* mpbs from QEMU reports Mbits/s (M as in 10^6 not Mi as 2^20) */
>> +                stats->ram_bps = mbps * (1000 * 1000 / 8);
>> +            }
>>  
>> -        if (virJSONValueObjectGetNumberDouble(ram, "mbps", &mbps) == 0 &&
>> -            mbps > 0) {
>> -            /* mpbs from QEMU reports Mbits/s (M as in 10^6 not Mi as 2^20) */
>> -            stats->ram_bps = mbps * (1000 * 1000 / 8);
>> +            if (virJSONValueObjectGetNumberUlong(ram, "duplicate",
>> +                                                 &stats->ram_duplicate) == 0)
>> +                stats->ram_duplicate_set = true;
>> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal",
>> +                                                          &stats->ram_normal));
>> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal-bytes",
>> +                                                          &stats->ram_normal_bytes));
>> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-pages-rate",
>> +                                                          &stats->ram_dirty_rate));
>> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "page-size",
>> +                                                          &stats->ram_page_size));
>> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-sync-count",
>> +                                                          &stats->ram_iteration));
>> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "postcopy-requests",
>> +                                                          &stats->ram_postcopy_reqs));
>>          }
>>  
>> -        if (virJSONValueObjectGetNumberUlong(ram, "duplicate",
>> -                                             &stats->ram_duplicate) == 0)
>> -            stats->ram_duplicate_set = true;
>> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal",
>> -                                                      &stats->ram_normal));
>> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal-bytes",
>> -                                                      &stats->ram_normal_bytes));
>> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-pages-rate",
>> -                                                      &stats->ram_dirty_rate));
>> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "page-size",
>> -                                                      &stats->ram_page_size));
>> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-sync-count",
>> -                                                      &stats->ram_iteration));
>> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "postcopy-requests",
>> -                                                      &stats->ram_postcopy_reqs));
>> -
>>          disk = virJSONValueObjectGetObject(ret, "disk");
>>          if (disk) {
>>              rc = virJSONValueObjectGetNumberUlong(disk, "transferred",
>> -- 
>> 2.19.1
>>
> 
> Regards,
> Daniel
> 


Re: [PATCH] Qemu: migration: Not bind RAM info with active migration status
Posted by Daniel P. Berrangé 3 years, 9 months ago
On Tue, Jul 28, 2020 at 07:35:11PM +0800, zhukeqian wrote:
> 
> 
> On 2020/7/28 18:32, Daniel P. Berrangé wrote:
> > On Wed, Jul 15, 2020 at 02:18:01PM +0800, Keqian Zhu wrote:
> >> For that Qemu supports returning incoming migration info since its commit
> >> 65ace0604551 (migration: add postcopy total blocktime into query-migrate),
> >> which may contains active status, but without RAM info. Drop this binding
> >> relationship check in libvirt.
> > 
> > I'm not clear from this description what the actual bug in libvirt
> > is ?   What currently fails in libvirt that this patch fixes ?
> > 
> When query migration status, libvirt assumes that when migration status is active,
> then RAM info must be set. However qemu has changed this assumption, so libvirt will
> report "migration was active, but no RAM info was set" when query migration info.

So you're trying to make it possible to query migration stats on the
destination host ?  IOW, this is a new feature, rather than a bug
fix ?

Regards,
Daniel
-- 
|: https://berrange.com      -o-    https://www.flickr.com/photos/dberrange :|
|: https://libvirt.org         -o-            https://fstop138.berrange.com :|
|: https://entangle-photo.org    -o-    https://www.instagram.com/dberrange :|

Re: [PATCH] Qemu: migration: Not bind RAM info with active migration status
Posted by zhukeqian 3 years, 9 months ago

On 2020/7/28 19:44, Daniel P. Berrangé wrote:
> On Tue, Jul 28, 2020 at 07:35:11PM +0800, zhukeqian wrote:
>>
>>
>> On 2020/7/28 18:32, Daniel P. Berrangé wrote:
>>> On Wed, Jul 15, 2020 at 02:18:01PM +0800, Keqian Zhu wrote:
>>>> For that Qemu supports returning incoming migration info since its commit
>>>> 65ace0604551 (migration: add postcopy total blocktime into query-migrate),
>>>> which may contains active status, but without RAM info. Drop this binding
>>>> relationship check in libvirt.
>>>
>>> I'm not clear from this description what the actual bug in libvirt
>>> is ?   What currently fails in libvirt that this patch fixes ?
>>>
>> When query migration status, libvirt assumes that when migration status is active,
>> then RAM info must be set. However qemu has changed this assumption, so libvirt will
>> report "migration was active, but no RAM info was set" when query migration info.
> 
> So you're trying to make it possible to query migration stats on the
> destination host ?  IOW, this is a new feature, rather than a bug
> fix ?
Yes, actually I do not obviously mention it as bug fix in patch message.

Thanks,
Keqian
> 
> Regards,
> Daniel
> 


Ping: [PATCH] Qemu: migration: Not bind RAM info with active migration status
Posted by zhukeqian 3 years, 4 months ago
Hi folks,

Kindly ping. I found that this patch is not applied.
Has reviewed by Daniel Henrique Barboza and Daniel P. Berrangé.

Cheers,
Keqian

On 2020/7/15 14:18, Keqian Zhu wrote:
> For that Qemu supports returning incoming migration info since its commit
> 65ace0604551 (migration: add postcopy total blocktime into query-migrate),
> which may contains active status, but without RAM info. Drop this binding
> relationship check in libvirt.
> 
> Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com>
> ---
>  src/qemu/qemu_monitor_json.c | 88 +++++++++++++++++-------------------
>  1 file changed, 42 insertions(+), 46 deletions(-)
> 
> diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c
> index d808c4b55b..ba8e340742 100644
> --- a/src/qemu/qemu_monitor_json.c
> +++ b/src/qemu/qemu_monitor_json.c
> @@ -3547,56 +3547,52 @@ qemuMonitorJSONGetMigrationStatsReply(virJSONValuePtr reply,
>      case QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER:
>      case QEMU_MONITOR_MIGRATION_STATUS_DEVICE:
>          ram = virJSONValueObjectGetObject(ret, "ram");
> -        if (!ram) {
> -            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> -                           _("migration was active, but no RAM info was set"));
> -            return -1;
> -        }
> +        if (ram) {
> +            if (virJSONValueObjectGetNumberUlong(ram, "transferred",
> +                                                 &stats->ram_transferred) < 0) {
> +                virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> +                               _("migration was active, but RAM 'transferred' "
> +                                 "data was missing"));
> +                return -1;
> +            }
> +            if (virJSONValueObjectGetNumberUlong(ram, "remaining",
> +                                                 &stats->ram_remaining) < 0) {
> +                virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> +                               _("migration was active, but RAM 'remaining' "
> +                                 "data was missing"));
> +                return -1;
> +            }
> +            if (virJSONValueObjectGetNumberUlong(ram, "total",
> +                                                 &stats->ram_total) < 0) {
> +                virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> +                               _("migration was active, but RAM 'total' "
> +                                 "data was missing"));
> +                return -1;
> +            }
>  
> -        if (virJSONValueObjectGetNumberUlong(ram, "transferred",
> -                                             &stats->ram_transferred) < 0) {
> -            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> -                           _("migration was active, but RAM 'transferred' "
> -                             "data was missing"));
> -            return -1;
> -        }
> -        if (virJSONValueObjectGetNumberUlong(ram, "remaining",
> -                                             &stats->ram_remaining) < 0) {
> -            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> -                           _("migration was active, but RAM 'remaining' "
> -                             "data was missing"));
> -            return -1;
> -        }
> -        if (virJSONValueObjectGetNumberUlong(ram, "total",
> -                                             &stats->ram_total) < 0) {
> -            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> -                           _("migration was active, but RAM 'total' "
> -                             "data was missing"));
> -            return -1;
> -        }
> +            if (virJSONValueObjectGetNumberDouble(ram, "mbps", &mbps) == 0 &&
> +                mbps > 0) {
> +                /* mpbs from QEMU reports Mbits/s (M as in 10^6 not Mi as 2^20) */
> +                stats->ram_bps = mbps * (1000 * 1000 / 8);
> +            }
>  
> -        if (virJSONValueObjectGetNumberDouble(ram, "mbps", &mbps) == 0 &&
> -            mbps > 0) {
> -            /* mpbs from QEMU reports Mbits/s (M as in 10^6 not Mi as 2^20) */
> -            stats->ram_bps = mbps * (1000 * 1000 / 8);
> +            if (virJSONValueObjectGetNumberUlong(ram, "duplicate",
> +                                                 &stats->ram_duplicate) == 0)
> +                stats->ram_duplicate_set = true;
> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal",
> +                                                          &stats->ram_normal));
> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal-bytes",
> +                                                          &stats->ram_normal_bytes));
> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-pages-rate",
> +                                                          &stats->ram_dirty_rate));
> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "page-size",
> +                                                          &stats->ram_page_size));
> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-sync-count",
> +                                                          &stats->ram_iteration));
> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "postcopy-requests",
> +                                                          &stats->ram_postcopy_reqs));
>          }
>  
> -        if (virJSONValueObjectGetNumberUlong(ram, "duplicate",
> -                                             &stats->ram_duplicate) == 0)
> -            stats->ram_duplicate_set = true;
> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal",
> -                                                      &stats->ram_normal));
> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal-bytes",
> -                                                      &stats->ram_normal_bytes));
> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-pages-rate",
> -                                                      &stats->ram_dirty_rate));
> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "page-size",
> -                                                      &stats->ram_page_size));
> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-sync-count",
> -                                                      &stats->ram_iteration));
> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "postcopy-requests",
> -                                                      &stats->ram_postcopy_reqs));
> -
>          disk = virJSONValueObjectGetObject(ret, "disk");
>          if (disk) {
>              rc = virJSONValueObjectGetNumberUlong(disk, "transferred",
> 


Re: Ping: [PATCH] Qemu: migration: Not bind RAM info with active migration status
Posted by Daniel Henrique Barboza 3 years, 4 months ago

On 12/4/20 5:12 AM, zhukeqian wrote:
> Hi folks,
> 
> Kindly ping. I found that this patch is not applied.
> Has reviewed by Daniel Henrique Barboza and Daniel P. Berrangé.


It has my ACK, but looking into the messages I see that Daniel was
inquiring about this being a bug fix or an enhancement (he didn't
provide an ACK). Not sure if he wants some changes in the commit
message or if he has any other reservations about it.




Thanks,


DHB


> 
> Cheers,
> Keqian
> 
> On 2020/7/15 14:18, Keqian Zhu wrote:
>> For that Qemu supports returning incoming migration info since its commit
>> 65ace0604551 (migration: add postcopy total blocktime into query-migrate),
>> which may contains active status, but without RAM info. Drop this binding
>> relationship check in libvirt.
>>
>> Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com>
>> ---
>>   src/qemu/qemu_monitor_json.c | 88 +++++++++++++++++-------------------
>>   1 file changed, 42 insertions(+), 46 deletions(-)
>>
>> diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c
>> index d808c4b55b..ba8e340742 100644
>> --- a/src/qemu/qemu_monitor_json.c
>> +++ b/src/qemu/qemu_monitor_json.c
>> @@ -3547,56 +3547,52 @@ qemuMonitorJSONGetMigrationStatsReply(virJSONValuePtr reply,
>>       case QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER:
>>       case QEMU_MONITOR_MIGRATION_STATUS_DEVICE:
>>           ram = virJSONValueObjectGetObject(ret, "ram");
>> -        if (!ram) {
>> -            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>> -                           _("migration was active, but no RAM info was set"));
>> -            return -1;
>> -        }
>> +        if (ram) {
>> +            if (virJSONValueObjectGetNumberUlong(ram, "transferred",
>> +                                                 &stats->ram_transferred) < 0) {
>> +                virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>> +                               _("migration was active, but RAM 'transferred' "
>> +                                 "data was missing"));
>> +                return -1;
>> +            }
>> +            if (virJSONValueObjectGetNumberUlong(ram, "remaining",
>> +                                                 &stats->ram_remaining) < 0) {
>> +                virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>> +                               _("migration was active, but RAM 'remaining' "
>> +                                 "data was missing"));
>> +                return -1;
>> +            }
>> +            if (virJSONValueObjectGetNumberUlong(ram, "total",
>> +                                                 &stats->ram_total) < 0) {
>> +                virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>> +                               _("migration was active, but RAM 'total' "
>> +                                 "data was missing"));
>> +                return -1;
>> +            }
>>   
>> -        if (virJSONValueObjectGetNumberUlong(ram, "transferred",
>> -                                             &stats->ram_transferred) < 0) {
>> -            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>> -                           _("migration was active, but RAM 'transferred' "
>> -                             "data was missing"));
>> -            return -1;
>> -        }
>> -        if (virJSONValueObjectGetNumberUlong(ram, "remaining",
>> -                                             &stats->ram_remaining) < 0) {
>> -            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>> -                           _("migration was active, but RAM 'remaining' "
>> -                             "data was missing"));
>> -            return -1;
>> -        }
>> -        if (virJSONValueObjectGetNumberUlong(ram, "total",
>> -                                             &stats->ram_total) < 0) {
>> -            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>> -                           _("migration was active, but RAM 'total' "
>> -                             "data was missing"));
>> -            return -1;
>> -        }
>> +            if (virJSONValueObjectGetNumberDouble(ram, "mbps", &mbps) == 0 &&
>> +                mbps > 0) {
>> +                /* mpbs from QEMU reports Mbits/s (M as in 10^6 not Mi as 2^20) */
>> +                stats->ram_bps = mbps * (1000 * 1000 / 8);
>> +            }
>>   
>> -        if (virJSONValueObjectGetNumberDouble(ram, "mbps", &mbps) == 0 &&
>> -            mbps > 0) {
>> -            /* mpbs from QEMU reports Mbits/s (M as in 10^6 not Mi as 2^20) */
>> -            stats->ram_bps = mbps * (1000 * 1000 / 8);
>> +            if (virJSONValueObjectGetNumberUlong(ram, "duplicate",
>> +                                                 &stats->ram_duplicate) == 0)
>> +                stats->ram_duplicate_set = true;
>> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal",
>> +                                                          &stats->ram_normal));
>> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal-bytes",
>> +                                                          &stats->ram_normal_bytes));
>> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-pages-rate",
>> +                                                          &stats->ram_dirty_rate));
>> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "page-size",
>> +                                                          &stats->ram_page_size));
>> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-sync-count",
>> +                                                          &stats->ram_iteration));
>> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "postcopy-requests",
>> +                                                          &stats->ram_postcopy_reqs));
>>           }
>>   
>> -        if (virJSONValueObjectGetNumberUlong(ram, "duplicate",
>> -                                             &stats->ram_duplicate) == 0)
>> -            stats->ram_duplicate_set = true;
>> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal",
>> -                                                      &stats->ram_normal));
>> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal-bytes",
>> -                                                      &stats->ram_normal_bytes));
>> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-pages-rate",
>> -                                                      &stats->ram_dirty_rate));
>> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "page-size",
>> -                                                      &stats->ram_page_size));
>> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-sync-count",
>> -                                                      &stats->ram_iteration));
>> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "postcopy-requests",
>> -                                                      &stats->ram_postcopy_reqs));
>> -
>>           disk = virJSONValueObjectGetObject(ret, "disk");
>>           if (disk) {
>>               rc = virJSONValueObjectGetNumberUlong(disk, "transferred",
>>

Re: Ping: [PATCH] Qemu: migration: Not bind RAM info with active migration status
Posted by zhukeqian 3 years, 4 months ago
Hi Daniel,

On 2020/12/4 22:42, Daniel Henrique Barboza wrote:
> 
> 
> On 12/4/20 5:12 AM, zhukeqian wrote:
>> Hi folks,
>>
>> Kindly ping. I found that this patch is not applied.
>> Has reviewed by Daniel Henrique Barboza and Daniel P. Berrangé.
> 
> 
> It has my ACK, but looking into the messages I see that Daniel was
> inquiring about this being a bug fix or an enhancement (he didn't
> provide an ACK). Not sure if he wants some changes in the commit
> message or if he has any other reservations about it.
> 
I see, thanks. I will ask for his thoughts.

Cheers,
Keqian
> 
> 
> 
> Thanks,
> 
> 
> DHB
> 
> 
>>
>> Cheers,
>> Keqian
>>
>> On 2020/7/15 14:18, Keqian Zhu wrote:
>>> For that Qemu supports returning incoming migration info since its commit
>>> 65ace0604551 (migration: add postcopy total blocktime into query-migrate),
>>> which may contains active status, but without RAM info. Drop this binding
>>> relationship check in libvirt.
>>>
>>> Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com>
>>> ---
>>>   src/qemu/qemu_monitor_json.c | 88 +++++++++++++++++-------------------
>>>   1 file changed, 42 insertions(+), 46 deletions(-)
>>>
>>> diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c
>>> index d808c4b55b..ba8e340742 100644
>>> --- a/src/qemu/qemu_monitor_json.c
>>> +++ b/src/qemu/qemu_monitor_json.c
>>> @@ -3547,56 +3547,52 @@ qemuMonitorJSONGetMigrationStatsReply(virJSONValuePtr reply,
>>>       case QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER:
>>>       case QEMU_MONITOR_MIGRATION_STATUS_DEVICE:
>>>           ram = virJSONValueObjectGetObject(ret, "ram");
>>> -        if (!ram) {
>>> -            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>>> -                           _("migration was active, but no RAM info was set"));
>>> -            return -1;
>>> -        }
>>> +        if (ram) {
>>> +            if (virJSONValueObjectGetNumberUlong(ram, "transferred",
>>> +                                                 &stats->ram_transferred) < 0) {
>>> +                virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>>> +                               _("migration was active, but RAM 'transferred' "
>>> +                                 "data was missing"));
>>> +                return -1;
>>> +            }
>>> +            if (virJSONValueObjectGetNumberUlong(ram, "remaining",
>>> +                                                 &stats->ram_remaining) < 0) {
>>> +                virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>>> +                               _("migration was active, but RAM 'remaining' "
>>> +                                 "data was missing"));
>>> +                return -1;
>>> +            }
>>> +            if (virJSONValueObjectGetNumberUlong(ram, "total",
>>> +                                                 &stats->ram_total) < 0) {
>>> +                virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>>> +                               _("migration was active, but RAM 'total' "
>>> +                                 "data was missing"));
>>> +                return -1;
>>> +            }
>>>   -        if (virJSONValueObjectGetNumberUlong(ram, "transferred",
>>> -                                             &stats->ram_transferred) < 0) {
>>> -            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>>> -                           _("migration was active, but RAM 'transferred' "
>>> -                             "data was missing"));
>>> -            return -1;
>>> -        }
>>> -        if (virJSONValueObjectGetNumberUlong(ram, "remaining",
>>> -                                             &stats->ram_remaining) < 0) {
>>> -            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>>> -                           _("migration was active, but RAM 'remaining' "
>>> -                             "data was missing"));
>>> -            return -1;
>>> -        }
>>> -        if (virJSONValueObjectGetNumberUlong(ram, "total",
>>> -                                             &stats->ram_total) < 0) {
>>> -            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>>> -                           _("migration was active, but RAM 'total' "
>>> -                             "data was missing"));
>>> -            return -1;
>>> -        }
>>> +            if (virJSONValueObjectGetNumberDouble(ram, "mbps", &mbps) == 0 &&
>>> +                mbps > 0) {
>>> +                /* mpbs from QEMU reports Mbits/s (M as in 10^6 not Mi as 2^20) */
>>> +                stats->ram_bps = mbps * (1000 * 1000 / 8);
>>> +            }
>>>   -        if (virJSONValueObjectGetNumberDouble(ram, "mbps", &mbps) == 0 &&
>>> -            mbps > 0) {
>>> -            /* mpbs from QEMU reports Mbits/s (M as in 10^6 not Mi as 2^20) */
>>> -            stats->ram_bps = mbps * (1000 * 1000 / 8);
>>> +            if (virJSONValueObjectGetNumberUlong(ram, "duplicate",
>>> +                                                 &stats->ram_duplicate) == 0)
>>> +                stats->ram_duplicate_set = true;
>>> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal",
>>> +                                                          &stats->ram_normal));
>>> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal-bytes",
>>> +                                                          &stats->ram_normal_bytes));
>>> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-pages-rate",
>>> +                                                          &stats->ram_dirty_rate));
>>> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "page-size",
>>> +                                                          &stats->ram_page_size));
>>> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-sync-count",
>>> +                                                          &stats->ram_iteration));
>>> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "postcopy-requests",
>>> +                                                          &stats->ram_postcopy_reqs));
>>>           }
>>>   -        if (virJSONValueObjectGetNumberUlong(ram, "duplicate",
>>> -                                             &stats->ram_duplicate) == 0)
>>> -            stats->ram_duplicate_set = true;
>>> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal",
>>> -                                                      &stats->ram_normal));
>>> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal-bytes",
>>> -                                                      &stats->ram_normal_bytes));
>>> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-pages-rate",
>>> -                                                      &stats->ram_dirty_rate));
>>> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "page-size",
>>> -                                                      &stats->ram_page_size));
>>> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-sync-count",
>>> -                                                      &stats->ram_iteration));
>>> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "postcopy-requests",
>>> -                                                      &stats->ram_postcopy_reqs));
>>> -
>>>           disk = virJSONValueObjectGetObject(ret, "disk");
>>>           if (disk) {
>>>               rc = virJSONValueObjectGetNumberUlong(disk, "transferred",
>>>
> 
> .
> 


Re: Ping: [PATCH] Qemu: migration: Not bind RAM info with active migration status
Posted by Daniel P. Berrangé 3 years, 4 months ago
On Mon, Dec 07, 2020 at 09:55:53AM +0800, zhukeqian wrote:
> Hi Daniel,
> 
> On 2020/12/4 22:42, Daniel Henrique Barboza wrote:
> > 
> > 
> > On 12/4/20 5:12 AM, zhukeqian wrote:
> >> Hi folks,
> >>
> >> Kindly ping. I found that this patch is not applied.
> >> Has reviewed by Daniel Henrique Barboza and Daniel P. Berrangé.
> > 
> > 
> > It has my ACK, but looking into the messages I see that Daniel was
> > inquiring about this being a bug fix or an enhancement (he didn't
> > provide an ACK). Not sure if he wants some changes in the commit
> > message or if he has any other reservations about it.
> > 
> I see, thanks. I will ask for his thoughts.

Yes, it wasn't clear what this actually changed from libvirt's POV.

What API call or usage scenario is currently broken, that this fixes ? 


Regards,
Daniel
-- 
|: https://berrange.com      -o-    https://www.flickr.com/photos/dberrange :|
|: https://libvirt.org         -o-            https://fstop138.berrange.com :|
|: https://entangle-photo.org    -o-    https://www.instagram.com/dberrange :|

Re: Ping: [PATCH] Qemu: migration: Not bind RAM info with active migration status
Posted by zhukeqian 3 years, 4 months ago
On 2020/12/7 18:38, Daniel P. Berrangé wrote:
> On Mon, Dec 07, 2020 at 09:55:53AM +0800, zhukeqian wrote:
>> Hi Daniel,
>>
>> On 2020/12/4 22:42, Daniel Henrique Barboza wrote:
>>>
>>>
>>> On 12/4/20 5:12 AM, zhukeqian wrote:
>>>> Hi folks,
>>>>
>>>> Kindly ping. I found that this patch is not applied.
>>>> Has reviewed by Daniel Henrique Barboza and Daniel P. Berrangé.
>>>
>>>
>>> It has my ACK, but looking into the messages I see that Daniel was
>>> inquiring about this being a bug fix or an enhancement (he didn't
>>> provide an ACK). Not sure if he wants some changes in the commit
>>> message or if he has any other reservations about it.
>>>
>> I see, thanks. I will ask for his thoughts.
> 
> Yes, it wasn't clear what this actually changed from libvirt's POV.
> 
> What API call or usage scenario is currently broken, that this fixes ? 
> 

Hi Daniel,

The purpose is to remove this failure check for QEMU v2.12.
In QEMU commit 65ace0604551, it decoupled the RAM status from the active migration status.

The usage scenario is querying migration status at destination side, which may contain
active migration status, but without RAM status, so we will see that libvirt report error here.

The backward is that we loss failure check for QEMU < v2.12.
However, add a QEMU version check here is not graceful.

Can you accept this trade-off? Or better thoughts?

Thanks,
Keqian


> 
> Regards,
> Daniel
> 


Re: Ping: [PATCH] Qemu: migration: Not bind RAM info with active migration status
Posted by Daniel P. Berrangé 3 years, 4 months ago
On Tue, Dec 08, 2020 at 10:06:25AM +0800, zhukeqian wrote:
> 
> On 2020/12/7 18:38, Daniel P. Berrangé wrote:
> > On Mon, Dec 07, 2020 at 09:55:53AM +0800, zhukeqian wrote:
> >> Hi Daniel,
> >>
> >> On 2020/12/4 22:42, Daniel Henrique Barboza wrote:
> >>>
> >>>
> >>> On 12/4/20 5:12 AM, zhukeqian wrote:
> >>>> Hi folks,
> >>>>
> >>>> Kindly ping. I found that this patch is not applied.
> >>>> Has reviewed by Daniel Henrique Barboza and Daniel P. Berrangé.
> >>>
> >>>
> >>> It has my ACK, but looking into the messages I see that Daniel was
> >>> inquiring about this being a bug fix or an enhancement (he didn't
> >>> provide an ACK). Not sure if he wants some changes in the commit
> >>> message or if he has any other reservations about it.
> >>>
> >> I see, thanks. I will ask for his thoughts.
> > 
> > Yes, it wasn't clear what this actually changed from libvirt's POV.
> > 
> > What API call or usage scenario is currently broken, that this fixes ? 
> > 
> 
> Hi Daniel,
> 
> The purpose is to remove this failure check for QEMU v2.12.
> In QEMU commit 65ace0604551, it decoupled the RAM status from the active migration status.
> 
> The usage scenario is querying migration status at destination side, which may contain
> active migration status, but without RAM status, so we will see that libvirt report error here.

I'm confused, because AFAIK, libvirt does not need to run
query-migrate on the destination, so there shouldn't be anything
that needs fixing.

So can you explain what sceanario with libvirt you are seeing an
error in, and exactly how this fixes it.


Regards,
Daniel
-- 
|: https://berrange.com      -o-    https://www.flickr.com/photos/dberrange :|
|: https://libvirt.org         -o-            https://fstop138.berrange.com :|
|: https://entangle-photo.org    -o-    https://www.instagram.com/dberrange :|

Re: Ping: [PATCH] Qemu: migration: Not bind RAM info with active migration status
Posted by Jiri Denemark 3 years, 4 months ago
On Tue, Dec 08, 2020 at 09:27:39 +0000, Daniel P. Berrangé wrote:
> On Tue, Dec 08, 2020 at 10:06:25AM +0800, zhukeqian wrote:
> > 
> > On 2020/12/7 18:38, Daniel P. Berrangé wrote:
> > > On Mon, Dec 07, 2020 at 09:55:53AM +0800, zhukeqian wrote:
> > >> Hi Daniel,
> > >>
> > >> On 2020/12/4 22:42, Daniel Henrique Barboza wrote:
> > >>>
> > >>>
> > >>> On 12/4/20 5:12 AM, zhukeqian wrote:
> > >>>> Hi folks,
> > >>>>
> > >>>> Kindly ping. I found that this patch is not applied.
> > >>>> Has reviewed by Daniel Henrique Barboza and Daniel P. Berrangé.
> > >>>
> > >>>
> > >>> It has my ACK, but looking into the messages I see that Daniel was
> > >>> inquiring about this being a bug fix or an enhancement (he didn't
> > >>> provide an ACK). Not sure if he wants some changes in the commit
> > >>> message or if he has any other reservations about it.
> > >>>
> > >> I see, thanks. I will ask for his thoughts.
> > > 
> > > Yes, it wasn't clear what this actually changed from libvirt's POV.
> > > 
> > > What API call or usage scenario is currently broken, that this fixes ? 
> > > 
> > 
> > Hi Daniel,
> > 
> > The purpose is to remove this failure check for QEMU v2.12.
> > In QEMU commit 65ace0604551, it decoupled the RAM status from the active migration status.
> > 
> > The usage scenario is querying migration status at destination side, which may contain
> > active migration status, but without RAM status, so we will see that libvirt report error here.
> 
> I'm confused, because AFAIK, libvirt does not need to run
> query-migrate on the destination, so there shouldn't be anything
> that needs fixing.

Moreover, you can't even request migration statistics on the destination
manually because libvirt blocks that:

# virsh domjobinfo nest
error: Operation not supported: migration statistics are available only
on the source host

Jirka

Re: Ping: [PATCH] Qemu: migration: Not bind RAM info with active migration status
Posted by Keqian Zhu 3 years, 4 months ago
Hi Daniel and Jiri,

On 2020/12/8 18:31, Jiri Denemark wrote:
> On Tue, Dec 08, 2020 at 09:27:39 +0000, Daniel P. Berrangé wrote:
>> On Tue, Dec 08, 2020 at 10:06:25AM +0800, zhukeqian wrote:
>>>
>>> On 2020/12/7 18:38, Daniel P. Berrangé wrote:
>>>> On Mon, Dec 07, 2020 at 09:55:53AM +0800, zhukeqian wrote:
>>>>> Hi Daniel,
[...]

>>>
>>> Hi Daniel,
>>>
>>> The purpose is to remove this failure check for QEMU v2.12.
>>> In QEMU commit 65ace0604551, it decoupled the RAM status from the active migration status.
>>>
>>> The usage scenario is querying migration status at destination side, which may contain
>>> active migration status, but without RAM status, so we will see that libvirt report error here.
>>
>> I'm confused, because AFAIK, libvirt does not need to run
>> query-migrate on the destination, so there shouldn't be anything
>> that needs fixing.
> 
> Moreover, you can't even request migration statistics on the destination
> manually because libvirt blocks that:
> 
> # virsh domjobinfo nest
> error: Operation not supported: migration statistics are available only
> on the source host
> 
> Jirka
> 
> .
> 
Sorry for delay reply.

The purpose of QEMU commit 65ace0604551 (migration: add postcopy total blocktime into query-migrate)
is to query some postcopy related information on destination side.

We can call query-migrate on destination side *after* migration complete, thanks.

Keqian.


Re: Ping: [PATCH] Qemu: migration: Not bind RAM info with active migration status
Posted by Daniel P. Berrangé 3 years, 3 months ago
On Fri, Dec 18, 2020 at 04:38:22PM +0800, Keqian Zhu wrote:
> Hi Daniel and Jiri,
> 
> On 2020/12/8 18:31, Jiri Denemark wrote:
> > On Tue, Dec 08, 2020 at 09:27:39 +0000, Daniel P. Berrangé wrote:
> >> On Tue, Dec 08, 2020 at 10:06:25AM +0800, zhukeqian wrote:
> >>>
> >>> On 2020/12/7 18:38, Daniel P. Berrangé wrote:
> >>>> On Mon, Dec 07, 2020 at 09:55:53AM +0800, zhukeqian wrote:
> >>>>> Hi Daniel,
> [...]
> 
> >>>
> >>> Hi Daniel,
> >>>
> >>> The purpose is to remove this failure check for QEMU v2.12.
> >>> In QEMU commit 65ace0604551, it decoupled the RAM status from the active migration status.
> >>>
> >>> The usage scenario is querying migration status at destination side, which may contain
> >>> active migration status, but without RAM status, so we will see that libvirt report error here.
> >>
> >> I'm confused, because AFAIK, libvirt does not need to run
> >> query-migrate on the destination, so there shouldn't be anything
> >> that needs fixing.
> > 
> > Moreover, you can't even request migration statistics on the destination
> > manually because libvirt blocks that:
> > 
> > # virsh domjobinfo nest
> > error: Operation not supported: migration statistics are available only
> > on the source host
> > 
> > Jirka
> > 
> > .
> > 
> Sorry for delay reply.
> 
> The purpose of QEMU commit 65ace0604551 (migration: add postcopy total blocktime into query-migrate)
> is to query some postcopy related information on destination side.
> 
> We can call query-migrate on destination side *after* migration complete, thanks.

But nothing in libvirt ever tries to call query-migrate on the dest
side. 

Do you have more patches that add such calls ? If so, then please send a
patch series that does the full job.

Regards,
Daniel
-- 
|: https://berrange.com      -o-    https://www.flickr.com/photos/dberrange :|
|: https://libvirt.org         -o-            https://fstop138.berrange.com :|
|: https://entangle-photo.org    -o-    https://www.instagram.com/dberrange :|

Re: Ping: [PATCH] Qemu: migration: Not bind RAM info with active migration status
Posted by Keqian Zhu 3 years, 3 months ago
Hi Daniel,

Thanks for your reply :-) Please see my words below.

On 2021/1/4 19:58, Daniel P. Berrangé wrote:
> On Fri, Dec 18, 2020 at 04:38:22PM +0800, Keqian Zhu wrote:
>> Hi Daniel and Jiri,
>>
>> On 2020/12/8 18:31, Jiri Denemark wrote:
>>> On Tue, Dec 08, 2020 at 09:27:39 +0000, Daniel P. Berrangé wrote:
>>>> On Tue, Dec 08, 2020 at 10:06:25AM +0800, zhukeqian wrote:
>>>>>
>>>>> On 2020/12/7 18:38, Daniel P. Berrangé wrote:
>>>>>> On Mon, Dec 07, 2020 at 09:55:53AM +0800, zhukeqian wrote:
>>>>>>> Hi Daniel,
>> [...]
>>
>>>>>
>>>>> Hi Daniel,
>>>>>
>>>>> The purpose is to remove this failure check for QEMU v2.12.
>>>>> In QEMU commit 65ace0604551, it decoupled the RAM status from the active migration status.
>>>>>
>>>>> The usage scenario is querying migration status at destination side, which may contain
>>>>> active migration status, but without RAM status, so we will see that libvirt report error here.
>>>>
>>>> I'm confused, because AFAIK, libvirt does not need to run
>>>> query-migrate on the destination, so there shouldn't be anything
>>>> that needs fixing.
>>>
>>> Moreover, you can't even request migration statistics on the destination
>>> manually because libvirt blocks that:
>>>
>>> # virsh domjobinfo nest
>>> error: Operation not supported: migration statistics are available only
>>> on the source host
>>>
>>> Jirka
>>>
>>> .
>>>
>> Sorry for delay reply.
>>
>> The purpose of QEMU commit 65ace0604551 (migration: add postcopy total blocktime into query-migrate)
>> is to query some postcopy related information on destination side.
>>
>> We can call query-migrate on destination side *after* migration complete, thanks.
> 
> But nothing in libvirt ever tries to call query-migrate on the dest
> side. 
Yes, but the dest side does not always act as dest. After migration completion, the dest side enters
to a normal status and libvirt does not forbid us to query migration status.

Before QEMU commit 65ace0604551, we can successfully query the migration status, which is
MIGRATION_STATUS_NONE. But this commit will return valid status (MIGRATION_STATUS_COMPLETED)
without ram info, causing libvirt reports error (migration was active, but no RAM info was set).

> 
> Do you have more patches that add such calls ? If so, then please send a
> patch series that does the full job.
I do not add new feature to libvirt, but just manually execute query-migrate on dest side *after
migration completion* will trigger this problem.

Thanks,
Keqian


Re: Ping: [PATCH] Qemu: migration: Not bind RAM info with active migration status
Posted by Daniel P. Berrangé 3 years, 3 months ago
On Tue, Jan 05, 2021 at 09:28:27PM +0800, Keqian Zhu wrote:
> Hi Daniel,
> 
> Thanks for your reply :-) Please see my words below.
> 
> On 2021/1/4 19:58, Daniel P. Berrangé wrote:
> > On Fri, Dec 18, 2020 at 04:38:22PM +0800, Keqian Zhu wrote:
> >> Hi Daniel and Jiri,
> >>
> >> On 2020/12/8 18:31, Jiri Denemark wrote:
> >>> On Tue, Dec 08, 2020 at 09:27:39 +0000, Daniel P. Berrangé wrote:
> >>>> On Tue, Dec 08, 2020 at 10:06:25AM +0800, zhukeqian wrote:
> >>>>>
> >>>>> On 2020/12/7 18:38, Daniel P. Berrangé wrote:
> >>>>>> On Mon, Dec 07, 2020 at 09:55:53AM +0800, zhukeqian wrote:
> >>>>>>> Hi Daniel,
> >> [...]
> >>
> >>>>>
> >>>>> Hi Daniel,
> >>>>>
> >>>>> The purpose is to remove this failure check for QEMU v2.12.
> >>>>> In QEMU commit 65ace0604551, it decoupled the RAM status from the active migration status.
> >>>>>
> >>>>> The usage scenario is querying migration status at destination side, which may contain
> >>>>> active migration status, but without RAM status, so we will see that libvirt report error here.
> >>>>
> >>>> I'm confused, because AFAIK, libvirt does not need to run
> >>>> query-migrate on the destination, so there shouldn't be anything
> >>>> that needs fixing.
> >>>
> >>> Moreover, you can't even request migration statistics on the destination
> >>> manually because libvirt blocks that:
> >>>
> >>> # virsh domjobinfo nest
> >>> error: Operation not supported: migration statistics are available only
> >>> on the source host
> >>>
> >>> Jirka
> >>>
> >>> .
> >>>
> >> Sorry for delay reply.
> >>
> >> The purpose of QEMU commit 65ace0604551 (migration: add postcopy total blocktime into query-migrate)
> >> is to query some postcopy related information on destination side.
> >>
> >> We can call query-migrate on destination side *after* migration complete, thanks.
> > 
> > But nothing in libvirt ever tries to call query-migrate on the dest
> > side. 
> Yes, but the dest side does not always act as dest. After migration completion, the dest side enters
> to a normal status and libvirt does not forbid us to query migration status.
> 
> Before QEMU commit 65ace0604551, we can successfully query the migration status, which is
> MIGRATION_STATUS_NONE. But this commit will return valid status (MIGRATION_STATUS_COMPLETED)
> without ram info, causing libvirt reports error (migration was active, but no RAM info was set).
> 
> > 
> > Do you have more patches that add such calls ? If so, then please send a
> > patch series that does the full job.
> I do not add new feature to libvirt, but just manually execute query-migrate on dest side *after
> migration completion* will trigger this problem.

How are you running query-migrate ? If you're doing that with monitor
command passthrough then we shouldn't need to change the libvirt migration
code.

Please can you provide the exact sequence of libvirt API calls / virsh
commands you are running on both source and dest  to reproduce the
problem.


Regards,
Daniel
-- 
|: https://berrange.com      -o-    https://www.flickr.com/photos/dberrange :|
|: https://libvirt.org         -o-            https://fstop138.berrange.com :|
|: https://entangle-photo.org    -o-    https://www.instagram.com/dberrange :|

Re: Ping: [PATCH] Qemu: migration: Not bind RAM info with active migration status
Posted by Keqian Zhu 3 years, 3 months ago

On 2021/1/5 21:34, Daniel P. Berrangé wrote:
> On Tue, Jan 05, 2021 at 09:28:27PM +0800, Keqian Zhu wrote:
>> Hi Daniel,
>>
>> Thanks for your reply :-) Please see my words below.
>>
>> On 2021/1/4 19:58, Daniel P. Berrangé wrote:
>>> On Fri, Dec 18, 2020 at 04:38:22PM +0800, Keqian Zhu wrote:
>>>> Hi Daniel and Jiri,
>>>>
>>>> On 2020/12/8 18:31, Jiri Denemark wrote:
>>>>> On Tue, Dec 08, 2020 at 09:27:39 +0000, Daniel P. Berrangé wrote:
>>>>>> On Tue, Dec 08, 2020 at 10:06:25AM +0800, zhukeqian wrote:
>>>>>>>
>>>>>>> On 2020/12/7 18:38, Daniel P. Berrangé wrote:
>>>>>>>> On Mon, Dec 07, 2020 at 09:55:53AM +0800, zhukeqian wrote:
>>>>>>>>> Hi Daniel,
>>>> [...]
>>>>
>>>>>>>
>>>>>>> Hi Daniel,
>>>>>>>
>>>>>>> The purpose is to remove this failure check for QEMU v2.12.
>>>>>>> In QEMU commit 65ace0604551, it decoupled the RAM status from the active migration status.
>>>>>>>
>>>>>>> The usage scenario is querying migration status at destination side, which may contain
>>>>>>> active migration status, but without RAM status, so we will see that libvirt report error here.
>>>>>>
>>>>>> I'm confused, because AFAIK, libvirt does not need to run
>>>>>> query-migrate on the destination, so there shouldn't be anything
>>>>>> that needs fixing.
>>>>>
>>>>> Moreover, you can't even request migration statistics on the destination
>>>>> manually because libvirt blocks that:
>>>>>
>>>>> # virsh domjobinfo nest
>>>>> error: Operation not supported: migration statistics are available only
>>>>> on the source host
>>>>>
>>>>> Jirka
>>>>>
>>>>> .
>>>>>
>>>> Sorry for delay reply.
>>>>
>>>> The purpose of QEMU commit 65ace0604551 (migration: add postcopy total blocktime into query-migrate)
>>>> is to query some postcopy related information on destination side.
>>>>
>>>> We can call query-migrate on destination side *after* migration complete, thanks.
>>>
>>> But nothing in libvirt ever tries to call query-migrate on the dest
>>> side. 
>> Yes, but the dest side does not always act as dest. After migration completion, the dest side enters
>> to a normal status and libvirt does not forbid us to query migration status.
>>
>> Before QEMU commit 65ace0604551, we can successfully query the migration status, which is
>> MIGRATION_STATUS_NONE. But this commit will return valid status (MIGRATION_STATUS_COMPLETED)
>> without ram info, causing libvirt reports error (migration was active, but no RAM info was set).
>>
>>>
>>> Do you have more patches that add such calls ? If so, then please send a
>>> patch series that does the full job.
>> I do not add new feature to libvirt, but just manually execute query-migrate on dest side *after
>> migration completion* will trigger this problem.
> 
> How are you running query-migrate ? If you're doing that with monitor
> command passthrough then we shouldn't need to change the libvirt migration
> code.
OK. I am not very familiar with libvirt code logic, and my work flow is:

     source side                                dest side

 virsh start domain_name                            |
      |                                             |
 virsh migrate --live domain_name       (wait for migrate complete)
      |                                             |
  (migrate complete)                                |
                                        virsh domjobinfo domain_name
                                                    |
                                 (migration was active, but no RAM info was set)

Hope the above work flow helps.

Thanks,
Keqian

> 
> Please can you provide the exact sequence of libvirt API calls / virsh
> commands you are running on both source and dest  to reproduce the
> problem.
> 
> 
> Regards,
> Daniel
> 


Re: Ping: [PATCH] Qemu: migration: Not bind RAM info with active migration status
Posted by zhukeqian 3 years, 4 months ago
Hi Daniel,

This patch has received ACK from Daniel Henrique Barboza, and waits for your
ACK. Do you have any additional suggestion on it?

Thanks,
Keqian

On 2020/12/4 16:12, zhukeqian wrote:
> Hi folks,
> 
> Kindly ping. I found that this patch is not applied.
> Has reviewed by Daniel Henrique Barboza and Daniel P. Berrangé.
> 
> Cheers,
> Keqian
> 
> On 2020/7/15 14:18, Keqian Zhu wrote:
>> For that Qemu supports returning incoming migration info since its commit
>> 65ace0604551 (migration: add postcopy total blocktime into query-migrate),
>> which may contains active status, but without RAM info. Drop this binding
>> relationship check in libvirt.
>>
>> Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com>
>> ---
>>  src/qemu/qemu_monitor_json.c | 88 +++++++++++++++++-------------------
>>  1 file changed, 42 insertions(+), 46 deletions(-)
>>
>> diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c
>> index d808c4b55b..ba8e340742 100644
>> --- a/src/qemu/qemu_monitor_json.c
>> +++ b/src/qemu/qemu_monitor_json.c
>> @@ -3547,56 +3547,52 @@ qemuMonitorJSONGetMigrationStatsReply(virJSONValuePtr reply,
>>      case QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER:
>>      case QEMU_MONITOR_MIGRATION_STATUS_DEVICE:
>>          ram = virJSONValueObjectGetObject(ret, "ram");
>> -        if (!ram) {
>> -            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>> -                           _("migration was active, but no RAM info was set"));
>> -            return -1;
>> -        }
>> +        if (ram) {
>> +            if (virJSONValueObjectGetNumberUlong(ram, "transferred",
>> +                                                 &stats->ram_transferred) < 0) {
>> +                virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>> +                               _("migration was active, but RAM 'transferred' "
>> +                                 "data was missing"));
>> +                return -1;
>> +            }
>> +            if (virJSONValueObjectGetNumberUlong(ram, "remaining",
>> +                                                 &stats->ram_remaining) < 0) {
>> +                virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>> +                               _("migration was active, but RAM 'remaining' "
>> +                                 "data was missing"));
>> +                return -1;
>> +            }
>> +            if (virJSONValueObjectGetNumberUlong(ram, "total",
>> +                                                 &stats->ram_total) < 0) {
>> +                virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>> +                               _("migration was active, but RAM 'total' "
>> +                                 "data was missing"));
>> +                return -1;
>> +            }
>>  
>> -        if (virJSONValueObjectGetNumberUlong(ram, "transferred",
>> -                                             &stats->ram_transferred) < 0) {
>> -            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>> -                           _("migration was active, but RAM 'transferred' "
>> -                             "data was missing"));
>> -            return -1;
>> -        }
>> -        if (virJSONValueObjectGetNumberUlong(ram, "remaining",
>> -                                             &stats->ram_remaining) < 0) {
>> -            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>> -                           _("migration was active, but RAM 'remaining' "
>> -                             "data was missing"));
>> -            return -1;
>> -        }
>> -        if (virJSONValueObjectGetNumberUlong(ram, "total",
>> -                                             &stats->ram_total) < 0) {
>> -            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
>> -                           _("migration was active, but RAM 'total' "
>> -                             "data was missing"));
>> -            return -1;
>> -        }
>> +            if (virJSONValueObjectGetNumberDouble(ram, "mbps", &mbps) == 0 &&
>> +                mbps > 0) {
>> +                /* mpbs from QEMU reports Mbits/s (M as in 10^6 not Mi as 2^20) */
>> +                stats->ram_bps = mbps * (1000 * 1000 / 8);
>> +            }
>>  
>> -        if (virJSONValueObjectGetNumberDouble(ram, "mbps", &mbps) == 0 &&
>> -            mbps > 0) {
>> -            /* mpbs from QEMU reports Mbits/s (M as in 10^6 not Mi as 2^20) */
>> -            stats->ram_bps = mbps * (1000 * 1000 / 8);
>> +            if (virJSONValueObjectGetNumberUlong(ram, "duplicate",
>> +                                                 &stats->ram_duplicate) == 0)
>> +                stats->ram_duplicate_set = true;
>> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal",
>> +                                                          &stats->ram_normal));
>> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal-bytes",
>> +                                                          &stats->ram_normal_bytes));
>> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-pages-rate",
>> +                                                          &stats->ram_dirty_rate));
>> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "page-size",
>> +                                                          &stats->ram_page_size));
>> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-sync-count",
>> +                                                          &stats->ram_iteration));
>> +            ignore_value(virJSONValueObjectGetNumberUlong(ram, "postcopy-requests",
>> +                                                          &stats->ram_postcopy_reqs));
>>          }
>>  
>> -        if (virJSONValueObjectGetNumberUlong(ram, "duplicate",
>> -                                             &stats->ram_duplicate) == 0)
>> -            stats->ram_duplicate_set = true;
>> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal",
>> -                                                      &stats->ram_normal));
>> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal-bytes",
>> -                                                      &stats->ram_normal_bytes));
>> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-pages-rate",
>> -                                                      &stats->ram_dirty_rate));
>> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "page-size",
>> -                                                      &stats->ram_page_size));
>> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-sync-count",
>> -                                                      &stats->ram_iteration));
>> -        ignore_value(virJSONValueObjectGetNumberUlong(ram, "postcopy-requests",
>> -                                                      &stats->ram_postcopy_reqs));
>> -
>>          disk = virJSONValueObjectGetObject(ret, "disk");
>>          if (disk) {
>>              rc = virJSONValueObjectGetNumberUlong(disk, "transferred",
>>