This is a preparation patch for further cleanup.
No functional change, just wrap two major part of
qemu_savevm_state_complete_precopy() into function.
Signed-off-by: Wei Yang <richardw.yang@linux.intel.com>
---
migration/savevm.c | 66 ++++++++++++++++++++++++++++++++++------------
1 file changed, 49 insertions(+), 17 deletions(-)
diff --git a/migration/savevm.c b/migration/savevm.c
index becedcc1c6..c41e13e322 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -1246,23 +1246,12 @@ void qemu_savevm_state_complete_postcopy(QEMUFile *f)
qemu_fflush(f);
}
-int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only,
- bool inactivate_disks)
+static
+int qemu_savevm_state_complete_precopy_iterable(QEMUFile *f, bool in_postcopy,
+ bool iterable_only)
{
- QJSON *vmdesc;
- int vmdesc_len;
SaveStateEntry *se;
int ret;
- bool in_postcopy = migration_in_postcopy();
- Error *local_err = NULL;
-
- if (precopy_notify(PRECOPY_NOTIFY_COMPLETE, &local_err)) {
- error_report_err(local_err);
- }
-
- trace_savevm_state_complete_precopy();
-
- cpu_synchronize_all_states();
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
if (!se->ops ||
@@ -1291,9 +1280,18 @@ int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only,
}
}
- if (iterable_only) {
- goto flush;
- }
+ return 0;
+}
+
+static
+int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f,
+ bool in_postcopy,
+ bool inactivate_disks)
+{
+ QJSON *vmdesc;
+ int vmdesc_len;
+ SaveStateEntry *se;
+ int ret;
vmdesc = qjson_new();
json_prop_int(vmdesc, "page_size", qemu_target_page_size());
@@ -1353,6 +1351,40 @@ int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only,
}
qjson_destroy(vmdesc);
+ return 0;
+}
+
+int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only,
+ bool inactivate_disks)
+{
+ int ret;
+ Error *local_err = NULL;
+ bool in_postcopy = migration_in_postcopy();
+
+ if (precopy_notify(PRECOPY_NOTIFY_COMPLETE, &local_err)) {
+ error_report_err(local_err);
+ }
+
+ trace_savevm_state_complete_precopy();
+
+ cpu_synchronize_all_states();
+
+ ret = qemu_savevm_state_complete_precopy_iterable(f, in_postcopy,
+ iterable_only);
+ if (ret) {
+ return ret;
+ }
+
+ if (iterable_only) {
+ goto flush;
+ }
+
+ ret = qemu_savevm_state_complete_precopy_non_iterable(f, in_postcopy,
+ inactivate_disks);
+ if (ret) {
+ return ret;
+ }
+
flush:
qemu_fflush(f);
return 0;
--
2.17.1
* Wei Yang (richardw.yang@linux.intel.com) wrote:
> This is a preparation patch for further cleanup.
>
> No functional change, just wrap two major part of
> qemu_savevm_state_complete_precopy() into function.
>
> Signed-off-by: Wei Yang <richardw.yang@linux.intel.com>
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
> ---
> migration/savevm.c | 66 ++++++++++++++++++++++++++++++++++------------
> 1 file changed, 49 insertions(+), 17 deletions(-)
>
> diff --git a/migration/savevm.c b/migration/savevm.c
> index becedcc1c6..c41e13e322 100644
> --- a/migration/savevm.c
> +++ b/migration/savevm.c
> @@ -1246,23 +1246,12 @@ void qemu_savevm_state_complete_postcopy(QEMUFile *f)
> qemu_fflush(f);
> }
>
> -int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only,
> - bool inactivate_disks)
> +static
> +int qemu_savevm_state_complete_precopy_iterable(QEMUFile *f, bool in_postcopy,
> + bool iterable_only)
> {
> - QJSON *vmdesc;
> - int vmdesc_len;
> SaveStateEntry *se;
> int ret;
> - bool in_postcopy = migration_in_postcopy();
> - Error *local_err = NULL;
> -
> - if (precopy_notify(PRECOPY_NOTIFY_COMPLETE, &local_err)) {
> - error_report_err(local_err);
> - }
> -
> - trace_savevm_state_complete_precopy();
> -
> - cpu_synchronize_all_states();
>
> QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
> if (!se->ops ||
> @@ -1291,9 +1280,18 @@ int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only,
> }
> }
>
> - if (iterable_only) {
> - goto flush;
> - }
> + return 0;
> +}
> +
> +static
> +int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f,
> + bool in_postcopy,
> + bool inactivate_disks)
> +{
> + QJSON *vmdesc;
> + int vmdesc_len;
> + SaveStateEntry *se;
> + int ret;
>
> vmdesc = qjson_new();
> json_prop_int(vmdesc, "page_size", qemu_target_page_size());
> @@ -1353,6 +1351,40 @@ int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only,
> }
> qjson_destroy(vmdesc);
>
> + return 0;
> +}
> +
> +int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only,
> + bool inactivate_disks)
> +{
> + int ret;
> + Error *local_err = NULL;
> + bool in_postcopy = migration_in_postcopy();
> +
> + if (precopy_notify(PRECOPY_NOTIFY_COMPLETE, &local_err)) {
> + error_report_err(local_err);
> + }
> +
> + trace_savevm_state_complete_precopy();
> +
> + cpu_synchronize_all_states();
> +
> + ret = qemu_savevm_state_complete_precopy_iterable(f, in_postcopy,
> + iterable_only);
> + if (ret) {
> + return ret;
> + }
> +
> + if (iterable_only) {
> + goto flush;
> + }
> +
> + ret = qemu_savevm_state_complete_precopy_non_iterable(f, in_postcopy,
> + inactivate_disks);
> + if (ret) {
> + return ret;
> + }
> +
> flush:
> qemu_fflush(f);
> return 0;
> --
> 2.17.1
>
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
© 2016 - 2026 Red Hat, Inc.