We do the send_prepare() and the fill of the head packet without the
mutex held. It will help a lot for compression and later in the
series for zero pages.
Notice that we can use p->pages without holding p->mutex because
p->pending_job == 1.
Signed-off-by: Juan Quintela <quintela@redhat.com>
---
migration/multifd.h | 2 ++
migration/multifd.c | 11 ++++++-----
2 files changed, 8 insertions(+), 5 deletions(-)
diff --git a/migration/multifd.h b/migration/multifd.h
index a67cefc0a2..cd389d18d2 100644
--- a/migration/multifd.h
+++ b/migration/multifd.h
@@ -109,7 +109,9 @@ typedef struct {
/* array of pages to sent.
* The owner of 'pages' depends of 'pending_job' value:
* pending_job == 0 -> migration_thread can use it.
+ * No need for mutex lock.
* pending_job != 0 -> multifd_channel can use it.
+ * No need for mutex lock.
*/
MultiFDPages_t *pages;
diff --git a/migration/multifd.c b/migration/multifd.c
index 09a40a9135..68fc9f8e88 100644
--- a/migration/multifd.c
+++ b/migration/multifd.c
@@ -663,6 +663,8 @@ static void *multifd_send_thread(void *opaque)
p->flags |= MULTIFD_FLAG_SYNC;
p->sync_needed = false;
}
+ qemu_mutex_unlock(&p->mutex);
+
p->normal_num = 0;
if (use_zero_copy_send) {
@@ -684,11 +686,6 @@ static void *multifd_send_thread(void *opaque)
}
}
multifd_send_fill_packet(p);
- p->num_packets++;
- p->total_normal_pages += p->normal_num;
- p->pages->num = 0;
- p->pages->block = NULL;
- qemu_mutex_unlock(&p->mutex);
trace_multifd_send(p->id, packet_num, p->normal_num, p->flags,
p->next_packet_size);
@@ -713,6 +710,10 @@ static void *multifd_send_thread(void *opaque)
}
qemu_mutex_lock(&p->mutex);
+ p->num_packets++;
+ p->total_normal_pages += p->normal_num;
+ p->pages->num = 0;
+ p->pages->block = NULL;
p->sent_bytes += p->packet_len;;
p->sent_bytes += p->next_packet_size;
p->pending_job--;
--
2.37.1
On Tue, 2022-08-02 at 08:39 +0200, Juan Quintela wrote:
> We do the send_prepare() and the fill of the head packet without the
> mutex held. It will help a lot for compression and later in the
> series for zero pages.
>
> Notice that we can use p->pages without holding p->mutex because
> p->pending_job == 1.
>
> Signed-off-by: Juan Quintela <quintela@redhat.com>
> ---
> migration/multifd.h | 2 ++
> migration/multifd.c | 11 ++++++-----
> 2 files changed, 8 insertions(+), 5 deletions(-)
>
> diff --git a/migration/multifd.h b/migration/multifd.h
> index a67cefc0a2..cd389d18d2 100644
> --- a/migration/multifd.h
> +++ b/migration/multifd.h
> @@ -109,7 +109,9 @@ typedef struct {
> /* array of pages to sent.
> * The owner of 'pages' depends of 'pending_job' value:
> * pending_job == 0 -> migration_thread can use it.
> + * No need for mutex lock.
> * pending_job != 0 -> multifd_channel can use it.
> + * No need for mutex lock.
> */
> MultiFDPages_t *pages;
>
> diff --git a/migration/multifd.c b/migration/multifd.c
> index 09a40a9135..68fc9f8e88 100644
> --- a/migration/multifd.c
> +++ b/migration/multifd.c
> @@ -663,6 +663,8 @@ static void *multifd_send_thread(void *opaque)
> p->flags |= MULTIFD_FLAG_SYNC;
> p->sync_needed = false;
> }
> + qemu_mutex_unlock(&p->mutex);
> +
If it unlocks here, we will have unprotected:
for (int i = 0; i < p->pages->num; i++) {
p->normal[p->normal_num] = p->pages->offset[i];
p->normal_num++;
}
And p->pages seems to be in the mutex-protected area.
Should it be ok?
Also, under that we have:
if (p->normal_num) {
ret = multifd_send_state->ops->send_prepare(p, &local_err);
if (ret != 0) {
qemu_mutex_unlock(&p->mutex);
break;
}
}
Calling mutex_unlock() here, even though the unlock already happened before,
could cause any issue?
> p->normal_num = 0;
>
> if (use_zero_copy_send) {
> @@ -684,11 +686,6 @@ static void *multifd_send_thread(void *opaque)
> }
> }
> multifd_send_fill_packet(p);
> - p->num_packets++;
> - p->total_normal_pages += p->normal_num;
> - p->pages->num = 0;
> - p->pages->block = NULL;
> - qemu_mutex_unlock(&p->mutex);
>
> trace_multifd_send(p->id, packet_num, p->normal_num, p->flags,
> p->next_packet_size);
> @@ -713,6 +710,10 @@ static void *multifd_send_thread(void *opaque)
> }
>
> qemu_mutex_lock(&p->mutex);
> + p->num_packets++;
> + p->total_normal_pages += p->normal_num;
> + p->pages->num = 0;
> + p->pages->block = NULL;
> p->sent_bytes += p->packet_len;;
> p->sent_bytes += p->next_packet_size;
> p->pending_job--;
Not used in the interval, this part seems ok.
Best regards,
Leo
Leonardo Brás <leobras@redhat.com> wrote:
> On Tue, 2022-08-02 at 08:39 +0200, Juan Quintela wrote:
>> We do the send_prepare() and the fill of the head packet without the
>> mutex held. It will help a lot for compression and later in the
>> series for zero pages.
>>
>> Notice that we can use p->pages without holding p->mutex because
>> p->pending_job == 1.
>>
>> Signed-off-by: Juan Quintela <quintela@redhat.com>
>> ---
>> migration/multifd.h | 2 ++
>> migration/multifd.c | 11 ++++++-----
>> 2 files changed, 8 insertions(+), 5 deletions(-)
>>
>> diff --git a/migration/multifd.h b/migration/multifd.h
>> index a67cefc0a2..cd389d18d2 100644
>> --- a/migration/multifd.h
>> +++ b/migration/multifd.h
>> @@ -109,7 +109,9 @@ typedef struct {
>> /* array of pages to sent.
>> * The owner of 'pages' depends of 'pending_job' value:
>> * pending_job == 0 -> migration_thread can use it.
>> + * No need for mutex lock.
>> * pending_job != 0 -> multifd_channel can use it.
>> + * No need for mutex lock.
>> */
>> MultiFDPages_t *pages;
>>
>> diff --git a/migration/multifd.c b/migration/multifd.c
>> index 09a40a9135..68fc9f8e88 100644
>> --- a/migration/multifd.c
>> +++ b/migration/multifd.c
>> @@ -663,6 +663,8 @@ static void *multifd_send_thread(void *opaque)
>> p->flags |= MULTIFD_FLAG_SYNC;
>> p->sync_needed = false;
>> }
>> + qemu_mutex_unlock(&p->mutex);
>> +
>
> If it unlocks here, we will have unprotected:
> for (int i = 0; i < p->pages->num; i++) {
> p->normal[p->normal_num] = p->pages->offset[i];
> p->normal_num++;
> }
>
> And p->pages seems to be in the mutex-protected area.
> Should it be ok?
From the documentation:
/* array of pages to sent.
* The owner of 'pages' depends of 'pending_job' value:
* pending_job == 0 -> migration_thread can use it.
* No need for mutex lock.
* pending_job != 0 -> multifd_channel can use it.
* No need for mutex lock.
*/
MultiFDPages_t *pages;
So, it is right.
> Also, under that we have:
> if (p->normal_num) {
> ret = multifd_send_state->ops->send_prepare(p, &local_err);
> if (ret != 0) {
> qemu_mutex_unlock(&p->mutex);
> break;
> }
> }
>
> Calling mutex_unlock() here, even though the unlock already happened before,
> could cause any issue?
Good catch. Never got an error there.
Removing that bit.
> Best regards,
Thanks, Juan.
On Fri, Aug 19, 2022 at 8:32 AM Juan Quintela <quintela@redhat.com> wrote:
>
> Leonardo Brás <leobras@redhat.com> wrote:
> > On Tue, 2022-08-02 at 08:39 +0200, Juan Quintela wrote:
> >> We do the send_prepare() and the fill of the head packet without the
> >> mutex held. It will help a lot for compression and later in the
> >> series for zero pages.
> >>
> >> Notice that we can use p->pages without holding p->mutex because
> >> p->pending_job == 1.
> >>
> >> Signed-off-by: Juan Quintela <quintela@redhat.com>
> >> ---
> >> migration/multifd.h | 2 ++
> >> migration/multifd.c | 11 ++++++-----
> >> 2 files changed, 8 insertions(+), 5 deletions(-)
> >>
> >> diff --git a/migration/multifd.h b/migration/multifd.h
> >> index a67cefc0a2..cd389d18d2 100644
> >> --- a/migration/multifd.h
> >> +++ b/migration/multifd.h
> >> @@ -109,7 +109,9 @@ typedef struct {
> >> /* array of pages to sent.
> >> * The owner of 'pages' depends of 'pending_job' value:
> >> * pending_job == 0 -> migration_thread can use it.
> >> + * No need for mutex lock.
> >> * pending_job != 0 -> multifd_channel can use it.
> >> + * No need for mutex lock.
> >> */
> >> MultiFDPages_t *pages;
> >>
> >> diff --git a/migration/multifd.c b/migration/multifd.c
> >> index 09a40a9135..68fc9f8e88 100644
> >> --- a/migration/multifd.c
> >> +++ b/migration/multifd.c
> >> @@ -663,6 +663,8 @@ static void *multifd_send_thread(void *opaque)
> >> p->flags |= MULTIFD_FLAG_SYNC;
> >> p->sync_needed = false;
> >> }
> >> + qemu_mutex_unlock(&p->mutex);
> >> +
> >
> > If it unlocks here, we will have unprotected:
> > for (int i = 0; i < p->pages->num; i++) {
> > p->normal[p->normal_num] = p->pages->offset[i];
> > p->normal_num++;
> > }
> >
> > And p->pages seems to be in the mutex-protected area.
> > Should it be ok?
>
> From the documentation:
>
> /* array of pages to sent.
> * The owner of 'pages' depends of 'pending_job' value:
> * pending_job == 0 -> migration_thread can use it.
> * No need for mutex lock.
> * pending_job != 0 -> multifd_channel can use it.
> * No need for mutex lock.
> */
> MultiFDPages_t *pages;
>
> So, it is right.
Oh, right. I missed that part earlier .
>
> > Also, under that we have:
> > if (p->normal_num) {
> > ret = multifd_send_state->ops->send_prepare(p, &local_err);
> > if (ret != 0) {
> > qemu_mutex_unlock(&p->mutex);
> > break;
> > }
> > }
> >
> > Calling mutex_unlock() here, even though the unlock already happened before,
> > could cause any issue?
>
> Good catch. Never got an error there.
>
> Removing that bit.
Thanks!
Best regards,
Leo
>
> > Best regards,
>
>
> Thanks, Juan.
>
© 2016 - 2026 Red Hat, Inc.