From: Peter Xu <peterx@redhat.com>
When reviewing my attempt to refactor send_prepare(), Fabiano suggested we
try out with dropping the mutex in multifd code [1].
I thought about that before but I never tried to change the code. Now
maybe it's time to give it a stab. This only optimizes the sender side.
The trick here is multifd has a clear provider/consumer model, that the
migration main thread publishes requests (either pending_job/pending_sync),
while the multifd sender threads are consumers. Here we don't have a lot
of comlicated data sharing, and the jobs can logically be submitted lockless.
Arm the code with atomic weapons. Two things worth mentioning:
- For multifd_send_pages(): we can use qatomic_load_acquire() when trying
to find a free channel, but that's expensive if we attach one ACQUIRE per
channel. Instead, make it atomic_read() on the pending_job flag, but
merge the ACQUIRE into one single smp_mb_acquire() later.
- For pending_sync: it doesn't have any extra data required since now
p->flags are never touched, it should be safe to not use memory barrier.
That's different from pending_sync.
Provide rich comments for all the lockless operations to state how they are
paired. With that, we can remove the mutex.
[1] https://lore.kernel.org/r/87o7d1jlu5.fsf@suse.de
Suggested-by: Fabiano Rosas <farosas@suse.de>
Signed-off-by: Peter Xu <peterx@redhat.com>
---
migration/multifd.h | 2 --
migration/multifd.c | 51 +++++++++++++++++++++++----------------------
2 files changed, 26 insertions(+), 27 deletions(-)
diff --git a/migration/multifd.h b/migration/multifd.h
index 98876ff94a..78a2317263 100644
--- a/migration/multifd.h
+++ b/migration/multifd.h
@@ -91,8 +91,6 @@ typedef struct {
/* syncs main thread and channels */
QemuSemaphore sem_sync;
- /* this mutex protects the following parameters */
- QemuMutex mutex;
/* is this channel thread running */
bool running;
/* multifd flags for each packet */
diff --git a/migration/multifd.c b/migration/multifd.c
index b317d57d61..ef13e2e781 100644
--- a/migration/multifd.c
+++ b/migration/multifd.c
@@ -501,19 +501,19 @@ static bool multifd_send_pages(void)
}
}
- qemu_mutex_lock(&p->mutex);
- assert(!p->pages->num);
- assert(!p->pages->block);
/*
- * Double check on pending_job==false with the lock. In the future if
- * we can have >1 requester thread, we can replace this with a "goto
- * retry", but that is for later.
+ * Make sure we read p->pending_job before all the rest. Pairs with
+ * qatomic_store_release() in multifd_send_thread().
*/
- assert(qatomic_read(&p->pending_job) == false);
- qatomic_set(&p->pending_job, true);
+ smp_mb_acquire();
+ assert(!p->pages->num);
multifd_send_state->pages = p->pages;
p->pages = pages;
- qemu_mutex_unlock(&p->mutex);
+ /*
+ * Making sure p->pages is setup before marking pending_job=true. Pairs
+ * with the qatomic_load_acquire() in multifd_send_thread().
+ */
+ qatomic_store_release(&p->pending_job, true);
qemu_sem_post(&p->sem);
return true;
@@ -648,7 +648,6 @@ static bool multifd_send_cleanup_channel(MultiFDSendParams *p, Error **errp)
}
multifd_send_channel_destroy(p->c);
p->c = NULL;
- qemu_mutex_destroy(&p->mutex);
qemu_sem_destroy(&p->sem);
qemu_sem_destroy(&p->sem_sync);
g_free(p->name);
@@ -742,14 +741,12 @@ int multifd_send_sync_main(void)
trace_multifd_send_sync_main_signal(p->id);
- qemu_mutex_lock(&p->mutex);
/*
* We should be the only user so far, so not possible to be set by
* others concurrently.
*/
assert(qatomic_read(&p->pending_sync) == false);
qatomic_set(&p->pending_sync, true);
- qemu_mutex_unlock(&p->mutex);
qemu_sem_post(&p->sem);
}
for (i = 0; i < migrate_multifd_channels(); i++) {
@@ -796,9 +793,12 @@ static void *multifd_send_thread(void *opaque)
if (multifd_send_should_exit()) {
break;
}
- qemu_mutex_lock(&p->mutex);
- if (qatomic_read(&p->pending_job)) {
+ /*
+ * Read pending_job flag before p->pages. Pairs with the
+ * qatomic_store_release() in multifd_send_pages().
+ */
+ if (qatomic_load_acquire(&p->pending_job)) {
MultiFDPages_t *pages = p->pages;
p->iovs_num = 0;
@@ -806,14 +806,12 @@ static void *multifd_send_thread(void *opaque)
ret = multifd_send_state->ops->send_prepare(p, &local_err);
if (ret != 0) {
- qemu_mutex_unlock(&p->mutex);
break;
}
ret = qio_channel_writev_full_all(p->c, p->iov, p->iovs_num, NULL,
0, p->write_flags, &local_err);
if (ret != 0) {
- qemu_mutex_unlock(&p->mutex);
break;
}
@@ -822,24 +820,31 @@ static void *multifd_send_thread(void *opaque)
multifd_pages_reset(p->pages);
p->next_packet_size = 0;
- qatomic_set(&p->pending_job, false);
- qemu_mutex_unlock(&p->mutex);
+
+ /*
+ * Making sure p->pages is published before saying "we're
+ * free". Pairs with the qatomic_load_acquire() in
+ * multifd_send_pages().
+ */
+ qatomic_store_release(&p->pending_job, false);
} else {
- /* If not a normal job, must be a sync request */
+ /*
+ * If not a normal job, must be a sync request. Note that
+ * pending_sync is a standalone flag (unlike pending_job), so
+ * it doesn't require explicit memory barriers.
+ */
assert(qatomic_read(&p->pending_sync));
p->flags = MULTIFD_FLAG_SYNC;
multifd_send_fill_packet(p);
ret = qio_channel_write_all(p->c, (void *)p->packet,
p->packet_len, &local_err);
if (ret != 0) {
- qemu_mutex_unlock(&p->mutex);
break;
}
/* p->next_packet_size will always be zero for a SYNC packet */
stat64_add(&mig_stats.multifd_bytes, p->packet_len);
p->flags = 0;
qatomic_set(&p->pending_sync, false);
- qemu_mutex_unlock(&p->mutex);
qemu_sem_post(&p->sem_sync);
}
}
@@ -853,10 +858,7 @@ out:
error_free(local_err);
}
- qemu_mutex_lock(&p->mutex);
p->running = false;
- qemu_mutex_unlock(&p->mutex);
-
rcu_unregister_thread();
migration_threads_remove(thread);
trace_multifd_send_thread_end(p->id, p->packets_sent, p->total_normal_pages);
@@ -998,7 +1000,6 @@ int multifd_send_setup(Error **errp)
for (i = 0; i < thread_count; i++) {
MultiFDSendParams *p = &multifd_send_state->params[i];
- qemu_mutex_init(&p->mutex);
qemu_sem_init(&p->sem, 0);
qemu_sem_init(&p->sem_sync, 0);
p->id = i;
--
2.43.0
peterx@redhat.com writes:
> From: Peter Xu <peterx@redhat.com>
>
> When reviewing my attempt to refactor send_prepare(), Fabiano suggested we
> try out with dropping the mutex in multifd code [1].
>
> I thought about that before but I never tried to change the code. Now
> maybe it's time to give it a stab. This only optimizes the sender side.
>
> The trick here is multifd has a clear provider/consumer model, that the
> migration main thread publishes requests (either pending_job/pending_sync),
> while the multifd sender threads are consumers. Here we don't have a lot
> of comlicated data sharing, and the jobs can logically be submitted lockless.
complicated
>
> Arm the code with atomic weapons. Two things worth mentioning:
>
> - For multifd_send_pages(): we can use qatomic_load_acquire() when trying
> to find a free channel, but that's expensive if we attach one ACQUIRE per
> channel. Instead, make it atomic_read() on the pending_job flag, but
s/make it/keep it/
The diff doesn't show the atomic_read already there so it's confusing.
> merge the ACQUIRE into one single smp_mb_acquire() later.
>
> - For pending_sync: it doesn't have any extra data required since now
> p->flags are never touched, it should be safe to not use memory barrier.
> That's different from pending_sync.
pending_job?
>
> Provide rich comments for all the lockless operations to state how they are
> paired. With that, we can remove the mutex.
>
> [1] https://lore.kernel.org/r/87o7d1jlu5.fsf@suse.de
>
> Suggested-by: Fabiano Rosas <farosas@suse.de>
> Signed-off-by: Peter Xu <peterx@redhat.com>
> ---
> migration/multifd.h | 2 --
> migration/multifd.c | 51 +++++++++++++++++++++++----------------------
> 2 files changed, 26 insertions(+), 27 deletions(-)
>
> diff --git a/migration/multifd.h b/migration/multifd.h
> index 98876ff94a..78a2317263 100644
> --- a/migration/multifd.h
> +++ b/migration/multifd.h
> @@ -91,8 +91,6 @@ typedef struct {
> /* syncs main thread and channels */
> QemuSemaphore sem_sync;
>
> - /* this mutex protects the following parameters */
> - QemuMutex mutex;
> /* is this channel thread running */
> bool running;
> /* multifd flags for each packet */
> diff --git a/migration/multifd.c b/migration/multifd.c
> index b317d57d61..ef13e2e781 100644
> --- a/migration/multifd.c
> +++ b/migration/multifd.c
> @@ -501,19 +501,19 @@ static bool multifd_send_pages(void)
> }
> }
>
> - qemu_mutex_lock(&p->mutex);
> - assert(!p->pages->num);
> - assert(!p->pages->block);
> /*
> - * Double check on pending_job==false with the lock. In the future if
> - * we can have >1 requester thread, we can replace this with a "goto
> - * retry", but that is for later.
> + * Make sure we read p->pending_job before all the rest. Pairs with
> + * qatomic_store_release() in multifd_send_thread().
> */
> - assert(qatomic_read(&p->pending_job) == false);
> - qatomic_set(&p->pending_job, true);
> + smp_mb_acquire();
> + assert(!p->pages->num);
> multifd_send_state->pages = p->pages;
> p->pages = pages;
> - qemu_mutex_unlock(&p->mutex);
> + /*
> + * Making sure p->pages is setup before marking pending_job=true. Pairs
> + * with the qatomic_load_acquire() in multifd_send_thread().
> + */
> + qatomic_store_release(&p->pending_job, true);
> qemu_sem_post(&p->sem);
>
> return true;
> @@ -648,7 +648,6 @@ static bool multifd_send_cleanup_channel(MultiFDSendParams *p, Error **errp)
> }
> multifd_send_channel_destroy(p->c);
> p->c = NULL;
> - qemu_mutex_destroy(&p->mutex);
> qemu_sem_destroy(&p->sem);
> qemu_sem_destroy(&p->sem_sync);
> g_free(p->name);
> @@ -742,14 +741,12 @@ int multifd_send_sync_main(void)
>
> trace_multifd_send_sync_main_signal(p->id);
>
> - qemu_mutex_lock(&p->mutex);
> /*
> * We should be the only user so far, so not possible to be set by
> * others concurrently.
> */
> assert(qatomic_read(&p->pending_sync) == false);
> qatomic_set(&p->pending_sync, true);
> - qemu_mutex_unlock(&p->mutex);
> qemu_sem_post(&p->sem);
> }
> for (i = 0; i < migrate_multifd_channels(); i++) {
> @@ -796,9 +793,12 @@ static void *multifd_send_thread(void *opaque)
> if (multifd_send_should_exit()) {
> break;
> }
> - qemu_mutex_lock(&p->mutex);
>
> - if (qatomic_read(&p->pending_job)) {
> + /*
> + * Read pending_job flag before p->pages. Pairs with the
> + * qatomic_store_release() in multifd_send_pages().
> + */
> + if (qatomic_load_acquire(&p->pending_job)) {
> MultiFDPages_t *pages = p->pages;
>
> p->iovs_num = 0;
> @@ -806,14 +806,12 @@ static void *multifd_send_thread(void *opaque)
>
> ret = multifd_send_state->ops->send_prepare(p, &local_err);
> if (ret != 0) {
> - qemu_mutex_unlock(&p->mutex);
> break;
> }
>
> ret = qio_channel_writev_full_all(p->c, p->iov, p->iovs_num, NULL,
> 0, p->write_flags, &local_err);
> if (ret != 0) {
> - qemu_mutex_unlock(&p->mutex);
> break;
> }
>
> @@ -822,24 +820,31 @@ static void *multifd_send_thread(void *opaque)
>
> multifd_pages_reset(p->pages);
> p->next_packet_size = 0;
> - qatomic_set(&p->pending_job, false);
> - qemu_mutex_unlock(&p->mutex);
> +
> + /*
> + * Making sure p->pages is published before saying "we're
> + * free". Pairs with the qatomic_load_acquire() in
smp_mb_acquire()
> + * multifd_send_pages().
> + */
> + qatomic_store_release(&p->pending_job, false);
> } else {
> - /* If not a normal job, must be a sync request */
> + /*
> + * If not a normal job, must be a sync request. Note that
> + * pending_sync is a standalone flag (unlike pending_job), so
> + * it doesn't require explicit memory barriers.
> + */
> assert(qatomic_read(&p->pending_sync));
> p->flags = MULTIFD_FLAG_SYNC;
> multifd_send_fill_packet(p);
> ret = qio_channel_write_all(p->c, (void *)p->packet,
> p->packet_len, &local_err);
> if (ret != 0) {
> - qemu_mutex_unlock(&p->mutex);
> break;
> }
> /* p->next_packet_size will always be zero for a SYNC packet */
> stat64_add(&mig_stats.multifd_bytes, p->packet_len);
> p->flags = 0;
> qatomic_set(&p->pending_sync, false);
> - qemu_mutex_unlock(&p->mutex);
> qemu_sem_post(&p->sem_sync);
> }
> }
> @@ -853,10 +858,7 @@ out:
> error_free(local_err);
> }
>
> - qemu_mutex_lock(&p->mutex);
> p->running = false;
> - qemu_mutex_unlock(&p->mutex);
> -
> rcu_unregister_thread();
> migration_threads_remove(thread);
> trace_multifd_send_thread_end(p->id, p->packets_sent, p->total_normal_pages);
> @@ -998,7 +1000,6 @@ int multifd_send_setup(Error **errp)
> for (i = 0; i < thread_count; i++) {
> MultiFDSendParams *p = &multifd_send_state->params[i];
>
> - qemu_mutex_init(&p->mutex);
> qemu_sem_init(&p->sem, 0);
> qemu_sem_init(&p->sem_sync, 0);
> p->id = i;
On Fri, Feb 02, 2024 at 06:34:08PM -0300, Fabiano Rosas wrote:
> peterx@redhat.com writes:
>
> > From: Peter Xu <peterx@redhat.com>
> >
> > When reviewing my attempt to refactor send_prepare(), Fabiano suggested we
> > try out with dropping the mutex in multifd code [1].
> >
> > I thought about that before but I never tried to change the code. Now
> > maybe it's time to give it a stab. This only optimizes the sender side.
> >
> > The trick here is multifd has a clear provider/consumer model, that the
> > migration main thread publishes requests (either pending_job/pending_sync),
> > while the multifd sender threads are consumers. Here we don't have a lot
> > of comlicated data sharing, and the jobs can logically be submitted lockless.
>
> complicated
>
> >
> > Arm the code with atomic weapons. Two things worth mentioning:
> >
> > - For multifd_send_pages(): we can use qatomic_load_acquire() when trying
> > to find a free channel, but that's expensive if we attach one ACQUIRE per
> > channel. Instead, make it atomic_read() on the pending_job flag, but
>
> s/make it/keep it/
>
> The diff doesn't show the atomic_read already there so it's confusing.
Right. I also has a trivial typo on s/atomic_read/qatomic_read/..
I tried to rephrase the last sentence:
- For multifd_send_pages(): we can use qatomic_load_acquire() when trying
to find a free channel, but that's expensive if we attach one ACQUIRE per
channel. Instead, keep the qatomic_read() on reading the pending_job
flag as we do already, meanwhile use one smp_mb_acquire() after the loop
to guarantee the memory ordering.
Maybe slightly clearer?
>
> > merge the ACQUIRE into one single smp_mb_acquire() later.
> >
> > - For pending_sync: it doesn't have any extra data required since now
> > p->flags are never touched, it should be safe to not use memory barrier.
> > That's different from pending_sync.
>
> pending_job?
Yep, all the rest fixed.
>
> >
> > Provide rich comments for all the lockless operations to state how they are
> > paired. With that, we can remove the mutex.
> >
> > [1] https://lore.kernel.org/r/87o7d1jlu5.fsf@suse.de
> >
> > Suggested-by: Fabiano Rosas <farosas@suse.de>
> > Signed-off-by: Peter Xu <peterx@redhat.com>
> > ---
> > migration/multifd.h | 2 --
> > migration/multifd.c | 51 +++++++++++++++++++++++----------------------
> > 2 files changed, 26 insertions(+), 27 deletions(-)
> >
> > diff --git a/migration/multifd.h b/migration/multifd.h
> > index 98876ff94a..78a2317263 100644
> > --- a/migration/multifd.h
> > +++ b/migration/multifd.h
> > @@ -91,8 +91,6 @@ typedef struct {
> > /* syncs main thread and channels */
> > QemuSemaphore sem_sync;
> >
> > - /* this mutex protects the following parameters */
> > - QemuMutex mutex;
> > /* is this channel thread running */
> > bool running;
> > /* multifd flags for each packet */
> > diff --git a/migration/multifd.c b/migration/multifd.c
> > index b317d57d61..ef13e2e781 100644
> > --- a/migration/multifd.c
> > +++ b/migration/multifd.c
> > @@ -501,19 +501,19 @@ static bool multifd_send_pages(void)
> > }
> > }
> >
> > - qemu_mutex_lock(&p->mutex);
> > - assert(!p->pages->num);
> > - assert(!p->pages->block);
> > /*
> > - * Double check on pending_job==false with the lock. In the future if
> > - * we can have >1 requester thread, we can replace this with a "goto
> > - * retry", but that is for later.
> > + * Make sure we read p->pending_job before all the rest. Pairs with
> > + * qatomic_store_release() in multifd_send_thread().
> > */
> > - assert(qatomic_read(&p->pending_job) == false);
> > - qatomic_set(&p->pending_job, true);
> > + smp_mb_acquire();
> > + assert(!p->pages->num);
> > multifd_send_state->pages = p->pages;
> > p->pages = pages;
> > - qemu_mutex_unlock(&p->mutex);
> > + /*
> > + * Making sure p->pages is setup before marking pending_job=true. Pairs
> > + * with the qatomic_load_acquire() in multifd_send_thread().
> > + */
> > + qatomic_store_release(&p->pending_job, true);
> > qemu_sem_post(&p->sem);
> >
> > return true;
> > @@ -648,7 +648,6 @@ static bool multifd_send_cleanup_channel(MultiFDSendParams *p, Error **errp)
> > }
> > multifd_send_channel_destroy(p->c);
> > p->c = NULL;
> > - qemu_mutex_destroy(&p->mutex);
> > qemu_sem_destroy(&p->sem);
> > qemu_sem_destroy(&p->sem_sync);
> > g_free(p->name);
> > @@ -742,14 +741,12 @@ int multifd_send_sync_main(void)
> >
> > trace_multifd_send_sync_main_signal(p->id);
> >
> > - qemu_mutex_lock(&p->mutex);
> > /*
> > * We should be the only user so far, so not possible to be set by
> > * others concurrently.
> > */
> > assert(qatomic_read(&p->pending_sync) == false);
> > qatomic_set(&p->pending_sync, true);
> > - qemu_mutex_unlock(&p->mutex);
> > qemu_sem_post(&p->sem);
> > }
> > for (i = 0; i < migrate_multifd_channels(); i++) {
> > @@ -796,9 +793,12 @@ static void *multifd_send_thread(void *opaque)
> > if (multifd_send_should_exit()) {
> > break;
> > }
> > - qemu_mutex_lock(&p->mutex);
> >
> > - if (qatomic_read(&p->pending_job)) {
> > + /*
> > + * Read pending_job flag before p->pages. Pairs with the
> > + * qatomic_store_release() in multifd_send_pages().
> > + */
> > + if (qatomic_load_acquire(&p->pending_job)) {
> > MultiFDPages_t *pages = p->pages;
> >
> > p->iovs_num = 0;
> > @@ -806,14 +806,12 @@ static void *multifd_send_thread(void *opaque)
> >
> > ret = multifd_send_state->ops->send_prepare(p, &local_err);
> > if (ret != 0) {
> > - qemu_mutex_unlock(&p->mutex);
> > break;
> > }
> >
> > ret = qio_channel_writev_full_all(p->c, p->iov, p->iovs_num, NULL,
> > 0, p->write_flags, &local_err);
> > if (ret != 0) {
> > - qemu_mutex_unlock(&p->mutex);
> > break;
> > }
> >
> > @@ -822,24 +820,31 @@ static void *multifd_send_thread(void *opaque)
> >
> > multifd_pages_reset(p->pages);
> > p->next_packet_size = 0;
> > - qatomic_set(&p->pending_job, false);
> > - qemu_mutex_unlock(&p->mutex);
> > +
> > + /*
> > + * Making sure p->pages is published before saying "we're
> > + * free". Pairs with the qatomic_load_acquire() in
>
> smp_mb_acquire()
Fixed.
Any more comment on the code changes before I repost?
(maybe I can repost this single patch in-place to avoid another round of
mail bombs..)
--
Peter Xu
Peter Xu <peterx@redhat.com> writes:
> On Fri, Feb 02, 2024 at 06:34:08PM -0300, Fabiano Rosas wrote:
>> peterx@redhat.com writes:
>>
>> > From: Peter Xu <peterx@redhat.com>
>> >
>> > When reviewing my attempt to refactor send_prepare(), Fabiano suggested we
>> > try out with dropping the mutex in multifd code [1].
>> >
>> > I thought about that before but I never tried to change the code. Now
>> > maybe it's time to give it a stab. This only optimizes the sender side.
>> >
>> > The trick here is multifd has a clear provider/consumer model, that the
>> > migration main thread publishes requests (either pending_job/pending_sync),
>> > while the multifd sender threads are consumers. Here we don't have a lot
>> > of comlicated data sharing, and the jobs can logically be submitted lockless.
>>
>> complicated
>>
>> >
>> > Arm the code with atomic weapons. Two things worth mentioning:
>> >
>> > - For multifd_send_pages(): we can use qatomic_load_acquire() when trying
>> > to find a free channel, but that's expensive if we attach one ACQUIRE per
>> > channel. Instead, make it atomic_read() on the pending_job flag, but
>>
>> s/make it/keep it/
>>
>> The diff doesn't show the atomic_read already there so it's confusing.
>
> Right. I also has a trivial typo on s/atomic_read/qatomic_read/..
>
> I tried to rephrase the last sentence:
>
> - For multifd_send_pages(): we can use qatomic_load_acquire() when trying
> to find a free channel, but that's expensive if we attach one ACQUIRE per
> channel. Instead, keep the qatomic_read() on reading the pending_job
> flag as we do already, meanwhile use one smp_mb_acquire() after the loop
> to guarantee the memory ordering.
>
> Maybe slightly clearer?
>
Yep, that's better. Thanks.
>>
>> > merge the ACQUIRE into one single smp_mb_acquire() later.
>> >
>> > - For pending_sync: it doesn't have any extra data required since now
>> > p->flags are never touched, it should be safe to not use memory barrier.
>> > That's different from pending_sync.
>>
>> pending_job?
>
> Yep, all the rest fixed.
>
>>
>> >
>> > Provide rich comments for all the lockless operations to state how they are
>> > paired. With that, we can remove the mutex.
>> >
>> > [1] https://lore.kernel.org/r/87o7d1jlu5.fsf@suse.de
>> >
>> > Suggested-by: Fabiano Rosas <farosas@suse.de>
>> > Signed-off-by: Peter Xu <peterx@redhat.com>
>> > ---
>> > migration/multifd.h | 2 --
>> > migration/multifd.c | 51 +++++++++++++++++++++++----------------------
>> > 2 files changed, 26 insertions(+), 27 deletions(-)
>> >
>> > diff --git a/migration/multifd.h b/migration/multifd.h
>> > index 98876ff94a..78a2317263 100644
>> > --- a/migration/multifd.h
>> > +++ b/migration/multifd.h
>> > @@ -91,8 +91,6 @@ typedef struct {
>> > /* syncs main thread and channels */
>> > QemuSemaphore sem_sync;
>> >
>> > - /* this mutex protects the following parameters */
>> > - QemuMutex mutex;
>> > /* is this channel thread running */
>> > bool running;
>> > /* multifd flags for each packet */
>> > diff --git a/migration/multifd.c b/migration/multifd.c
>> > index b317d57d61..ef13e2e781 100644
>> > --- a/migration/multifd.c
>> > +++ b/migration/multifd.c
>> > @@ -501,19 +501,19 @@ static bool multifd_send_pages(void)
>> > }
>> > }
>> >
>> > - qemu_mutex_lock(&p->mutex);
>> > - assert(!p->pages->num);
>> > - assert(!p->pages->block);
>> > /*
>> > - * Double check on pending_job==false with the lock. In the future if
>> > - * we can have >1 requester thread, we can replace this with a "goto
>> > - * retry", but that is for later.
>> > + * Make sure we read p->pending_job before all the rest. Pairs with
>> > + * qatomic_store_release() in multifd_send_thread().
>> > */
>> > - assert(qatomic_read(&p->pending_job) == false);
>> > - qatomic_set(&p->pending_job, true);
>> > + smp_mb_acquire();
>> > + assert(!p->pages->num);
>> > multifd_send_state->pages = p->pages;
>> > p->pages = pages;
>> > - qemu_mutex_unlock(&p->mutex);
>> > + /*
>> > + * Making sure p->pages is setup before marking pending_job=true. Pairs
>> > + * with the qatomic_load_acquire() in multifd_send_thread().
>> > + */
>> > + qatomic_store_release(&p->pending_job, true);
>> > qemu_sem_post(&p->sem);
>> >
>> > return true;
>> > @@ -648,7 +648,6 @@ static bool multifd_send_cleanup_channel(MultiFDSendParams *p, Error **errp)
>> > }
>> > multifd_send_channel_destroy(p->c);
>> > p->c = NULL;
>> > - qemu_mutex_destroy(&p->mutex);
>> > qemu_sem_destroy(&p->sem);
>> > qemu_sem_destroy(&p->sem_sync);
>> > g_free(p->name);
>> > @@ -742,14 +741,12 @@ int multifd_send_sync_main(void)
>> >
>> > trace_multifd_send_sync_main_signal(p->id);
>> >
>> > - qemu_mutex_lock(&p->mutex);
>> > /*
>> > * We should be the only user so far, so not possible to be set by
>> > * others concurrently.
>> > */
>> > assert(qatomic_read(&p->pending_sync) == false);
>> > qatomic_set(&p->pending_sync, true);
>> > - qemu_mutex_unlock(&p->mutex);
>> > qemu_sem_post(&p->sem);
>> > }
>> > for (i = 0; i < migrate_multifd_channels(); i++) {
>> > @@ -796,9 +793,12 @@ static void *multifd_send_thread(void *opaque)
>> > if (multifd_send_should_exit()) {
>> > break;
>> > }
>> > - qemu_mutex_lock(&p->mutex);
>> >
>> > - if (qatomic_read(&p->pending_job)) {
>> > + /*
>> > + * Read pending_job flag before p->pages. Pairs with the
>> > + * qatomic_store_release() in multifd_send_pages().
>> > + */
>> > + if (qatomic_load_acquire(&p->pending_job)) {
>> > MultiFDPages_t *pages = p->pages;
>> >
>> > p->iovs_num = 0;
>> > @@ -806,14 +806,12 @@ static void *multifd_send_thread(void *opaque)
>> >
>> > ret = multifd_send_state->ops->send_prepare(p, &local_err);
>> > if (ret != 0) {
>> > - qemu_mutex_unlock(&p->mutex);
>> > break;
>> > }
>> >
>> > ret = qio_channel_writev_full_all(p->c, p->iov, p->iovs_num, NULL,
>> > 0, p->write_flags, &local_err);
>> > if (ret != 0) {
>> > - qemu_mutex_unlock(&p->mutex);
>> > break;
>> > }
>> >
>> > @@ -822,24 +820,31 @@ static void *multifd_send_thread(void *opaque)
>> >
>> > multifd_pages_reset(p->pages);
>> > p->next_packet_size = 0;
>> > - qatomic_set(&p->pending_job, false);
>> > - qemu_mutex_unlock(&p->mutex);
>> > +
>> > + /*
>> > + * Making sure p->pages is published before saying "we're
>> > + * free". Pairs with the qatomic_load_acquire() in
>>
>> smp_mb_acquire()
>
> Fixed.
>
> Any more comment on the code changes before I repost?
Nope, that's it.
>
> (maybe I can repost this single patch in-place to avoid another round of
> mail bombs..)
Sure.
On Mon, Feb 05, 2024 at 11:10:34AM -0300, Fabiano Rosas wrote:
> > (maybe I can repost this single patch in-place to avoid another round of
> > mail bombs..)
>
> Sure.
I've got the final version attached here. Feel free to have a look, thanks.
====
From 6ba337320430feae4ce9d3d906ea19f68430642d Mon Sep 17 00:00:00 2001
From: Peter Xu <peterx@redhat.com>
Date: Fri, 2 Feb 2024 18:28:57 +0800
Subject: [PATCH] migration/multifd: Optimize sender side to be lockless
When reviewing my attempt to refactor send_prepare(), Fabiano suggested we
try out with dropping the mutex in multifd code [1].
I thought about that before but I never tried to change the code. Now
maybe it's time to give it a stab. This only optimizes the sender side.
The trick here is multifd has a clear provider/consumer model, that the
migration main thread publishes requests (either pending_job/pending_sync),
while the multifd sender threads are consumers. Here we don't have a lot
of complicated data sharing, and the jobs can logically be submitted
lockless.
Arm the code with atomic weapons. Two things worth mentioning:
- For multifd_send_pages(): we can use qatomic_load_acquire() when trying
to find a free channel, but that's expensive if we attach one ACQUIRE per
channel. Instead, keep the qatomic_read() on reading the pending_job
flag as we do already, meanwhile use one smp_mb_acquire() after the loop
to guarantee the memory ordering.
- For pending_sync: it doesn't have any extra data required since now
p->flags are never touched, it should be safe to not use memory barrier.
That's different from pending_job.
Provide rich comments for all the lockless operations to state how they are
paired. With that, we can remove the mutex.
[1] https://lore.kernel.org/r/87o7d1jlu5.fsf@suse.de
Suggested-by: Fabiano Rosas <farosas@suse.de>
Link: https://lore.kernel.org/r/20240202102857.110210-24-peterx@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
---
migration/multifd.h | 2 --
migration/multifd.c | 51 +++++++++++++++++++++++----------------------
2 files changed, 26 insertions(+), 27 deletions(-)
diff --git a/migration/multifd.h b/migration/multifd.h
index 98876ff94a..78a2317263 100644
--- a/migration/multifd.h
+++ b/migration/multifd.h
@@ -91,8 +91,6 @@ typedef struct {
/* syncs main thread and channels */
QemuSemaphore sem_sync;
- /* this mutex protects the following parameters */
- QemuMutex mutex;
/* is this channel thread running */
bool running;
/* multifd flags for each packet */
diff --git a/migration/multifd.c b/migration/multifd.c
index b317d57d61..fbdb129088 100644
--- a/migration/multifd.c
+++ b/migration/multifd.c
@@ -501,19 +501,19 @@ static bool multifd_send_pages(void)
}
}
- qemu_mutex_lock(&p->mutex);
- assert(!p->pages->num);
- assert(!p->pages->block);
/*
- * Double check on pending_job==false with the lock. In the future if
- * we can have >1 requester thread, we can replace this with a "goto
- * retry", but that is for later.
+ * Make sure we read p->pending_job before all the rest. Pairs with
+ * qatomic_store_release() in multifd_send_thread().
*/
- assert(qatomic_read(&p->pending_job) == false);
- qatomic_set(&p->pending_job, true);
+ smp_mb_acquire();
+ assert(!p->pages->num);
multifd_send_state->pages = p->pages;
p->pages = pages;
- qemu_mutex_unlock(&p->mutex);
+ /*
+ * Making sure p->pages is setup before marking pending_job=true. Pairs
+ * with the qatomic_load_acquire() in multifd_send_thread().
+ */
+ qatomic_store_release(&p->pending_job, true);
qemu_sem_post(&p->sem);
return true;
@@ -648,7 +648,6 @@ static bool multifd_send_cleanup_channel(MultiFDSendParams *p, Error **errp)
}
multifd_send_channel_destroy(p->c);
p->c = NULL;
- qemu_mutex_destroy(&p->mutex);
qemu_sem_destroy(&p->sem);
qemu_sem_destroy(&p->sem_sync);
g_free(p->name);
@@ -742,14 +741,12 @@ int multifd_send_sync_main(void)
trace_multifd_send_sync_main_signal(p->id);
- qemu_mutex_lock(&p->mutex);
/*
* We should be the only user so far, so not possible to be set by
* others concurrently.
*/
assert(qatomic_read(&p->pending_sync) == false);
qatomic_set(&p->pending_sync, true);
- qemu_mutex_unlock(&p->mutex);
qemu_sem_post(&p->sem);
}
for (i = 0; i < migrate_multifd_channels(); i++) {
@@ -796,9 +793,12 @@ static void *multifd_send_thread(void *opaque)
if (multifd_send_should_exit()) {
break;
}
- qemu_mutex_lock(&p->mutex);
- if (qatomic_read(&p->pending_job)) {
+ /*
+ * Read pending_job flag before p->pages. Pairs with the
+ * qatomic_store_release() in multifd_send_pages().
+ */
+ if (qatomic_load_acquire(&p->pending_job)) {
MultiFDPages_t *pages = p->pages;
p->iovs_num = 0;
@@ -806,14 +806,12 @@ static void *multifd_send_thread(void *opaque)
ret = multifd_send_state->ops->send_prepare(p, &local_err);
if (ret != 0) {
- qemu_mutex_unlock(&p->mutex);
break;
}
ret = qio_channel_writev_full_all(p->c, p->iov, p->iovs_num, NULL,
0, p->write_flags, &local_err);
if (ret != 0) {
- qemu_mutex_unlock(&p->mutex);
break;
}
@@ -822,24 +820,31 @@ static void *multifd_send_thread(void *opaque)
multifd_pages_reset(p->pages);
p->next_packet_size = 0;
- qatomic_set(&p->pending_job, false);
- qemu_mutex_unlock(&p->mutex);
+
+ /*
+ * Making sure p->pages is published before saying "we're
+ * free". Pairs with the smp_mb_acquire() in
+ * multifd_send_pages().
+ */
+ qatomic_store_release(&p->pending_job, false);
} else {
- /* If not a normal job, must be a sync request */
+ /*
+ * If not a normal job, must be a sync request. Note that
+ * pending_sync is a standalone flag (unlike pending_job), so
+ * it doesn't require explicit memory barriers.
+ */
assert(qatomic_read(&p->pending_sync));
p->flags = MULTIFD_FLAG_SYNC;
multifd_send_fill_packet(p);
ret = qio_channel_write_all(p->c, (void *)p->packet,
p->packet_len, &local_err);
if (ret != 0) {
- qemu_mutex_unlock(&p->mutex);
break;
}
/* p->next_packet_size will always be zero for a SYNC packet */
stat64_add(&mig_stats.multifd_bytes, p->packet_len);
p->flags = 0;
qatomic_set(&p->pending_sync, false);
- qemu_mutex_unlock(&p->mutex);
qemu_sem_post(&p->sem_sync);
}
}
@@ -853,10 +858,7 @@ out:
error_free(local_err);
}
- qemu_mutex_lock(&p->mutex);
p->running = false;
- qemu_mutex_unlock(&p->mutex);
-
rcu_unregister_thread();
migration_threads_remove(thread);
trace_multifd_send_thread_end(p->id, p->packets_sent, p->total_normal_pages);
@@ -998,7 +1000,6 @@ int multifd_send_setup(Error **errp)
for (i = 0; i < thread_count; i++) {
MultiFDSendParams *p = &multifd_send_state->params[i];
- qemu_mutex_init(&p->mutex);
qemu_sem_init(&p->sem, 0);
qemu_sem_init(&p->sem_sync, 0);
p->id = i;
--
2.43.0
--
Peter Xu
Peter Xu <peterx@redhat.com> writes:
> On Mon, Feb 05, 2024 at 11:10:34AM -0300, Fabiano Rosas wrote:
>> > (maybe I can repost this single patch in-place to avoid another round of
>> > mail bombs..)
>>
>> Sure.
>
> I've got the final version attached here. Feel free to have a look, thanks.
>
> ====
> From 6ba337320430feae4ce9d3d906ea19f68430642d Mon Sep 17 00:00:00 2001
> From: Peter Xu <peterx@redhat.com>
> Date: Fri, 2 Feb 2024 18:28:57 +0800
> Subject: [PATCH] migration/multifd: Optimize sender side to be lockless
>
> When reviewing my attempt to refactor send_prepare(), Fabiano suggested we
> try out with dropping the mutex in multifd code [1].
>
> I thought about that before but I never tried to change the code. Now
> maybe it's time to give it a stab. This only optimizes the sender side.
>
> The trick here is multifd has a clear provider/consumer model, that the
> migration main thread publishes requests (either pending_job/pending_sync),
> while the multifd sender threads are consumers. Here we don't have a lot
> of complicated data sharing, and the jobs can logically be submitted
> lockless.
>
> Arm the code with atomic weapons. Two things worth mentioning:
>
> - For multifd_send_pages(): we can use qatomic_load_acquire() when trying
> to find a free channel, but that's expensive if we attach one ACQUIRE per
> channel. Instead, keep the qatomic_read() on reading the pending_job
> flag as we do already, meanwhile use one smp_mb_acquire() after the loop
> to guarantee the memory ordering.
>
> - For pending_sync: it doesn't have any extra data required since now
> p->flags are never touched, it should be safe to not use memory barrier.
> That's different from pending_job.
>
> Provide rich comments for all the lockless operations to state how they are
> paired. With that, we can remove the mutex.
>
> [1] https://lore.kernel.org/r/87o7d1jlu5.fsf@suse.de
>
> Suggested-by: Fabiano Rosas <farosas@suse.de>
> Link: https://lore.kernel.org/r/20240202102857.110210-24-peterx@redhat.com
> Signed-off-by: Peter Xu <peterx@redhat.com>
> ---
> migration/multifd.h | 2 --
> migration/multifd.c | 51 +++++++++++++++++++++++----------------------
> 2 files changed, 26 insertions(+), 27 deletions(-)
>
> diff --git a/migration/multifd.h b/migration/multifd.h
> index 98876ff94a..78a2317263 100644
> --- a/migration/multifd.h
> +++ b/migration/multifd.h
> @@ -91,8 +91,6 @@ typedef struct {
> /* syncs main thread and channels */
> QemuSemaphore sem_sync;
>
> - /* this mutex protects the following parameters */
> - QemuMutex mutex;
> /* is this channel thread running */
> bool running;
> /* multifd flags for each packet */
> diff --git a/migration/multifd.c b/migration/multifd.c
> index b317d57d61..fbdb129088 100644
> --- a/migration/multifd.c
> +++ b/migration/multifd.c
> @@ -501,19 +501,19 @@ static bool multifd_send_pages(void)
> }
> }
>
> - qemu_mutex_lock(&p->mutex);
> - assert(!p->pages->num);
> - assert(!p->pages->block);
> /*
> - * Double check on pending_job==false with the lock. In the future if
> - * we can have >1 requester thread, we can replace this with a "goto
> - * retry", but that is for later.
> + * Make sure we read p->pending_job before all the rest. Pairs with
> + * qatomic_store_release() in multifd_send_thread().
> */
> - assert(qatomic_read(&p->pending_job) == false);
> - qatomic_set(&p->pending_job, true);
> + smp_mb_acquire();
> + assert(!p->pages->num);
> multifd_send_state->pages = p->pages;
> p->pages = pages;
> - qemu_mutex_unlock(&p->mutex);
> + /*
> + * Making sure p->pages is setup before marking pending_job=true. Pairs
> + * with the qatomic_load_acquire() in multifd_send_thread().
> + */
> + qatomic_store_release(&p->pending_job, true);
> qemu_sem_post(&p->sem);
>
> return true;
> @@ -648,7 +648,6 @@ static bool multifd_send_cleanup_channel(MultiFDSendParams *p, Error **errp)
> }
> multifd_send_channel_destroy(p->c);
> p->c = NULL;
> - qemu_mutex_destroy(&p->mutex);
> qemu_sem_destroy(&p->sem);
> qemu_sem_destroy(&p->sem_sync);
> g_free(p->name);
> @@ -742,14 +741,12 @@ int multifd_send_sync_main(void)
>
> trace_multifd_send_sync_main_signal(p->id);
>
> - qemu_mutex_lock(&p->mutex);
> /*
> * We should be the only user so far, so not possible to be set by
> * others concurrently.
> */
> assert(qatomic_read(&p->pending_sync) == false);
> qatomic_set(&p->pending_sync, true);
> - qemu_mutex_unlock(&p->mutex);
> qemu_sem_post(&p->sem);
> }
> for (i = 0; i < migrate_multifd_channels(); i++) {
> @@ -796,9 +793,12 @@ static void *multifd_send_thread(void *opaque)
> if (multifd_send_should_exit()) {
> break;
> }
> - qemu_mutex_lock(&p->mutex);
>
> - if (qatomic_read(&p->pending_job)) {
> + /*
> + * Read pending_job flag before p->pages. Pairs with the
> + * qatomic_store_release() in multifd_send_pages().
> + */
> + if (qatomic_load_acquire(&p->pending_job)) {
> MultiFDPages_t *pages = p->pages;
>
> p->iovs_num = 0;
> @@ -806,14 +806,12 @@ static void *multifd_send_thread(void *opaque)
>
> ret = multifd_send_state->ops->send_prepare(p, &local_err);
> if (ret != 0) {
> - qemu_mutex_unlock(&p->mutex);
> break;
> }
>
> ret = qio_channel_writev_full_all(p->c, p->iov, p->iovs_num, NULL,
> 0, p->write_flags, &local_err);
> if (ret != 0) {
> - qemu_mutex_unlock(&p->mutex);
> break;
> }
>
> @@ -822,24 +820,31 @@ static void *multifd_send_thread(void *opaque)
>
> multifd_pages_reset(p->pages);
> p->next_packet_size = 0;
> - qatomic_set(&p->pending_job, false);
> - qemu_mutex_unlock(&p->mutex);
> +
> + /*
> + * Making sure p->pages is published before saying "we're
> + * free". Pairs with the smp_mb_acquire() in
> + * multifd_send_pages().
> + */
> + qatomic_store_release(&p->pending_job, false);
> } else {
> - /* If not a normal job, must be a sync request */
> + /*
> + * If not a normal job, must be a sync request. Note that
> + * pending_sync is a standalone flag (unlike pending_job), so
> + * it doesn't require explicit memory barriers.
> + */
> assert(qatomic_read(&p->pending_sync));
> p->flags = MULTIFD_FLAG_SYNC;
> multifd_send_fill_packet(p);
> ret = qio_channel_write_all(p->c, (void *)p->packet,
> p->packet_len, &local_err);
> if (ret != 0) {
> - qemu_mutex_unlock(&p->mutex);
> break;
> }
> /* p->next_packet_size will always be zero for a SYNC packet */
> stat64_add(&mig_stats.multifd_bytes, p->packet_len);
> p->flags = 0;
> qatomic_set(&p->pending_sync, false);
> - qemu_mutex_unlock(&p->mutex);
> qemu_sem_post(&p->sem_sync);
> }
> }
> @@ -853,10 +858,7 @@ out:
> error_free(local_err);
> }
>
> - qemu_mutex_lock(&p->mutex);
> p->running = false;
> - qemu_mutex_unlock(&p->mutex);
> -
> rcu_unregister_thread();
> migration_threads_remove(thread);
> trace_multifd_send_thread_end(p->id, p->packets_sent, p->total_normal_pages);
> @@ -998,7 +1000,6 @@ int multifd_send_setup(Error **errp)
> for (i = 0; i < thread_count; i++) {
> MultiFDSendParams *p = &multifd_send_state->params[i];
>
> - qemu_mutex_init(&p->mutex);
> qemu_sem_init(&p->sem, 0);
> qemu_sem_init(&p->sem_sync, 0);
> p->id = i;
> --
> 2.43.0
Reviewed-by: Fabiano Rosas <farosas@suse.de>
© 2016 - 2026 Red Hat, Inc.