[PATCH v7 08/13] multifd: Prepare to send a packet without the mutex held

Juan Quintela posted 13 patches 2 years, 5 months ago
Maintainers: Eduardo Habkost <eduardo@habkost.net>, Marcel Apfelbaum <marcel.apfelbaum@gmail.com>, "Philippe Mathieu-Daudé" <f4bug@amsat.org>, Yanan Wang <wangyanan55@huawei.com>, Juan Quintela <quintela@redhat.com>, "Dr. David Alan Gilbert" <dgilbert@redhat.com>
There is a newer version of this series
[PATCH v7 08/13] multifd: Prepare to send a packet without the mutex held
Posted by Juan Quintela 2 years, 5 months ago
We do the send_prepare() and the fill of the head packet without the
mutex held.  It will help a lot for compression and later in the
series for zero pages.

Notice that we can use p->pages without holding p->mutex because
p->pending_job == 1.

Signed-off-by: Juan Quintela <quintela@redhat.com>
---
 migration/multifd.h |  2 ++
 migration/multifd.c | 11 ++++++-----
 2 files changed, 8 insertions(+), 5 deletions(-)

diff --git a/migration/multifd.h b/migration/multifd.h
index af8ce8921d..d48597a1ea 100644
--- a/migration/multifd.h
+++ b/migration/multifd.h
@@ -109,7 +109,9 @@ typedef struct {
     /* array of pages to sent.
      * The owner of 'pages' depends of 'pending_job' value:
      * pending_job == 0 -> migration_thread can use it.
+     *                     No need for mutex lock.
      * pending_job != 0 -> multifd_channel can use it.
+     *                     No need for mutex lock.
      */
     MultiFDPages_t *pages;
 
diff --git a/migration/multifd.c b/migration/multifd.c
index 69b9d7cf98..056599cbaf 100644
--- a/migration/multifd.c
+++ b/migration/multifd.c
@@ -661,6 +661,8 @@ static void *multifd_send_thread(void *opaque)
                 p->flags |= MULTIFD_FLAG_SYNC;
                 p->sync_needed = false;
             }
+            qemu_mutex_unlock(&p->mutex);
+
             p->normal_num = 0;
 
             if (use_zero_copy_send) {
@@ -682,11 +684,6 @@ static void *multifd_send_thread(void *opaque)
                 }
             }
             multifd_send_fill_packet(p);
-            p->num_packets++;
-            p->total_normal_pages += p->normal_num;
-            p->pages->num = 0;
-            p->pages->block = NULL;
-            qemu_mutex_unlock(&p->mutex);
 
             trace_multifd_send(p->id, packet_num, p->normal_num, p->flags,
                                p->next_packet_size);
@@ -711,6 +708,10 @@ static void *multifd_send_thread(void *opaque)
             }
 
             qemu_mutex_lock(&p->mutex);
+            p->num_packets++;
+            p->total_normal_pages += p->normal_num;
+            p->pages->num = 0;
+            p->pages->block = NULL;
             p->sent_bytes += p->packet_len;;
             p->sent_bytes += p->next_packet_size;
             p->pending_job--;
-- 
2.35.3
Re: [PATCH v7 08/13] multifd: Prepare to send a packet without the mutex held
Posted by Dr. David Alan Gilbert 2 years, 3 months ago
* Juan Quintela (quintela@redhat.com) wrote:
> We do the send_prepare() and the fill of the head packet without the
> mutex held.  It will help a lot for compression and later in the
> series for zero pages.
> 
> Notice that we can use p->pages without holding p->mutex because
> p->pending_job == 1.
> 
> Signed-off-by: Juan Quintela <quintela@redhat.com>
> ---
>  migration/multifd.h |  2 ++
>  migration/multifd.c | 11 ++++++-----
>  2 files changed, 8 insertions(+), 5 deletions(-)
> 
> diff --git a/migration/multifd.h b/migration/multifd.h
> index af8ce8921d..d48597a1ea 100644
> --- a/migration/multifd.h
> +++ b/migration/multifd.h
> @@ -109,7 +109,9 @@ typedef struct {
>      /* array of pages to sent.
>       * The owner of 'pages' depends of 'pending_job' value:
>       * pending_job == 0 -> migration_thread can use it.
> +     *                     No need for mutex lock.
>       * pending_job != 0 -> multifd_channel can use it.
> +     *                     No need for mutex lock.
>       */
>      MultiFDPages_t *pages;
>  
> diff --git a/migration/multifd.c b/migration/multifd.c
> index 69b9d7cf98..056599cbaf 100644
> --- a/migration/multifd.c
> +++ b/migration/multifd.c
> @@ -661,6 +661,8 @@ static void *multifd_send_thread(void *opaque)
>                  p->flags |= MULTIFD_FLAG_SYNC;
>                  p->sync_needed = false;
>              }
> +            qemu_mutex_unlock(&p->mutex);
> +
>              p->normal_num = 0;
>  
>              if (use_zero_copy_send) {
> @@ -682,11 +684,6 @@ static void *multifd_send_thread(void *opaque)

Looking at my source tree, without the rest of your series, I see an
unlock just before this pair of }'s; doesn't that need to go?

Dave

>                  }
>              }
>              multifd_send_fill_packet(p);
> -            p->num_packets++;
> -            p->total_normal_pages += p->normal_num;
> -            p->pages->num = 0;
> -            p->pages->block = NULL;
> -            qemu_mutex_unlock(&p->mutex);
>  
>              trace_multifd_send(p->id, packet_num, p->normal_num, p->flags,
>                                 p->next_packet_size);
> @@ -711,6 +708,10 @@ static void *multifd_send_thread(void *opaque)
>              }
>  
>              qemu_mutex_lock(&p->mutex);
> +            p->num_packets++;
> +            p->total_normal_pages += p->normal_num;
> +            p->pages->num = 0;
> +            p->pages->block = NULL;
>              p->sent_bytes += p->packet_len;;
>              p->sent_bytes += p->next_packet_size;
>              p->pending_job--;
> -- 
> 2.35.3
> 
-- 
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK