[Qemu-devel] [PATCH v5 15/17] migration: Test new fd infrastructure

Juan Quintela posted 17 patches 8 years, 6 months ago
There is a newer version of this series
[Qemu-devel] [PATCH v5 15/17] migration: Test new fd infrastructure
Posted by Juan Quintela 8 years, 6 months ago
We just send the address through the alternate channels and test that it
is ok.

Signed-off-by: Juan Quintela <quintela@redhat.com>
---
 migration/ram.c | 50 ++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 50 insertions(+)

diff --git a/migration/ram.c b/migration/ram.c
index 49c4880..b55b243 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -468,8 +468,26 @@ static void *multifd_send_thread(void *opaque)
             break;
         }
         if (p->pages.num) {
+            int i;
+            int num;
+
+            num = p->pages.num;
             p->pages.num = 0;
             qemu_mutex_unlock(&p->mutex);
+
+            for (i = 0; i < num; i++) {
+                if (qio_channel_write(p->c,
+                                      (const char *)&p->pages.iov[i].iov_base,
+                                      sizeof(uint8_t *), &error_abort)
+                    != sizeof(uint8_t *)) {
+                    MigrationState *s = migrate_get_current();
+
+                    migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE,
+                                      MIGRATION_STATUS_FAILED);
+                    terminate_multifd_send_threads();
+                    return NULL;
+                }
+            }
             qemu_mutex_lock(&multifd_send_state->mutex);
             p->done = true;
             qemu_mutex_unlock(&multifd_send_state->mutex);
@@ -636,6 +654,7 @@ void multifd_load_cleanup(void)
 static void *multifd_recv_thread(void *opaque)
 {
     MultiFDRecvParams *p = opaque;
+    uint8_t *recv_address;
 
     qemu_sem_post(&p->ready);
     while (true) {
@@ -645,7 +664,38 @@ static void *multifd_recv_thread(void *opaque)
             break;
         }
         if (p->pages.num) {
+            int i;
+            int num;
+
+            num = p->pages.num;
             p->pages.num = 0;
+
+            for (i = 0; i < num; i++) {
+                if (qio_channel_read(p->c,
+                                     (char *)&recv_address,
+                                     sizeof(uint8_t *), &error_abort)
+                    != sizeof(uint8_t *)) {
+                    MigrationState *s = migrate_get_current();
+
+                    migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE,
+                                      MIGRATION_STATUS_FAILED);
+                    terminate_multifd_recv_threads();
+                    return NULL;
+                }
+                if (recv_address != p->pages.iov[i].iov_base) {
+                    MigrationState *s = migrate_get_current();
+
+                    printf("We received %p what we were expecting %p (%d)\n",
+                           recv_address,
+                           p->pages.iov[i].iov_base, i);
+
+                    migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE,
+                                      MIGRATION_STATUS_FAILED);
+                    terminate_multifd_recv_threads();
+                    return NULL;
+                }
+            }
+
             p->done = true;
             qemu_mutex_unlock(&p->mutex);
             qemu_sem_post(&p->ready);
-- 
2.9.4


Re: [Qemu-devel] [PATCH v5 15/17] migration: Test new fd infrastructure
Posted by Dr. David Alan Gilbert 8 years, 6 months ago
* Juan Quintela (quintela@redhat.com) wrote:
> We just send the address through the alternate channels and test that it
> is ok.

So this is just a debug patch?

> Signed-off-by: Juan Quintela <quintela@redhat.com>
> ---
>  migration/ram.c | 50 ++++++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 50 insertions(+)
> 
> diff --git a/migration/ram.c b/migration/ram.c
> index 49c4880..b55b243 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -468,8 +468,26 @@ static void *multifd_send_thread(void *opaque)
>              break;
>          }
>          if (p->pages.num) {
> +            int i;
> +            int num;
> +
> +            num = p->pages.num;
>              p->pages.num = 0;
>              qemu_mutex_unlock(&p->mutex);
> +
> +            for (i = 0; i < num; i++) {
> +                if (qio_channel_write(p->c,
> +                                      (const char *)&p->pages.iov[i].iov_base,
> +                                      sizeof(uint8_t *), &error_abort)

Never abort on the source.

> +                    != sizeof(uint8_t *)) {
> +                    MigrationState *s = migrate_get_current();
> +
> +                    migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE,
> +                                      MIGRATION_STATUS_FAILED);
> +                    terminate_multifd_send_threads();

Is it really safe to call terminate_multifd_send_threads from one of the
threads?  It feels like having set it to FAILED all the cleanup should
happen back in the main thread.

> +                    return NULL;
> +                }
> +            }
>              qemu_mutex_lock(&multifd_send_state->mutex);
>              p->done = true;
>              qemu_mutex_unlock(&multifd_send_state->mutex);
> @@ -636,6 +654,7 @@ void multifd_load_cleanup(void)
>  static void *multifd_recv_thread(void *opaque)
>  {
>      MultiFDRecvParams *p = opaque;
> +    uint8_t *recv_address;
>  
>      qemu_sem_post(&p->ready);
>      while (true) {
> @@ -645,7 +664,38 @@ static void *multifd_recv_thread(void *opaque)
>              break;
>          }
>          if (p->pages.num) {
> +            int i;
> +            int num;
> +
> +            num = p->pages.num;
>              p->pages.num = 0;
> +
> +            for (i = 0; i < num; i++) {
> +                if (qio_channel_read(p->c,
> +                                     (char *)&recv_address,
> +                                     sizeof(uint8_t *), &error_abort)

and I'd prefer we didn't abort on the dest either, but a bit less
critical (until postcopy?)

> +                    != sizeof(uint8_t *)) {
> +                    MigrationState *s = migrate_get_current();
> +
> +                    migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE,
> +                                      MIGRATION_STATUS_FAILED);
> +                    terminate_multifd_recv_threads();
> +                    return NULL;
> +                }
> +                if (recv_address != p->pages.iov[i].iov_base) {
> +                    MigrationState *s = migrate_get_current();
> +
> +                    printf("We received %p what we were expecting %p (%d)\n",
> +                           recv_address,
> +                           p->pages.iov[i].iov_base, i);
> +
> +                    migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE,
> +                                      MIGRATION_STATUS_FAILED);
> +                    terminate_multifd_recv_threads();
> +                    return NULL;
> +                }
> +            }
> +
>              p->done = true;
>              qemu_mutex_unlock(&p->mutex);
>              qemu_sem_post(&p->ready);

Dave
> -- 
> 2.9.4
> 
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK