qemu-thread used to abstract pthread primitives into futex for the
QemuEvent implementation of POSIX systems other than Linux. However,
this abstraction has one key difference: unlike futex, pthread
primitives require an explicit destruction, and it must be ordered after
wait and wake operations.
It would be easier to perform destruction if a wait operation ensures
the corresponding wake operation finishes as POSIX semaphore does, but
that requires to protect state accesses in qemu_event_set() and
qemu_event_wait() with a mutex. On the other hand, real futex does not
need such a protection but needs complex barrier and atomic operations
before wait and wake operations instead.
Add special implementations of qemu_event_set() and qemu_event_wait()
using pthread primitives. qemu_event_wait() will ensure qemu_event_set()
finishes, and these functions will avoid complex barrier and atomic
operations.
Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
---
util/qemu-thread-posix.c | 45 ++++++++++++++++++---------------------------
1 file changed, 18 insertions(+), 27 deletions(-)
diff --git a/util/qemu-thread-posix.c b/util/qemu-thread-posix.c
index 13459e44c768..805cac444f15 100644
--- a/util/qemu-thread-posix.c
+++ b/util/qemu-thread-posix.c
@@ -319,28 +319,6 @@ void qemu_sem_wait(QemuSemaphore *sem)
#ifdef __linux__
#include "qemu/futex.h"
-#else
-static inline void qemu_futex_wake(QemuEvent *ev, int n)
-{
- assert(ev->initialized);
- pthread_mutex_lock(&ev->lock);
- if (n == 1) {
- pthread_cond_signal(&ev->cond);
- } else {
- pthread_cond_broadcast(&ev->cond);
- }
- pthread_mutex_unlock(&ev->lock);
-}
-
-static inline void qemu_futex_wait(QemuEvent *ev, unsigned val)
-{
- assert(ev->initialized);
- pthread_mutex_lock(&ev->lock);
- if (ev->value == val) {
- pthread_cond_wait(&ev->cond, &ev->lock);
- }
- pthread_mutex_unlock(&ev->lock);
-}
#endif
/* Valid transitions:
@@ -363,7 +341,7 @@ static inline void qemu_futex_wait(QemuEvent *ev, unsigned val)
void qemu_event_init(QemuEvent *ev, bool init)
{
-#ifndef __linux__
+#ifndef CONFIG_LINUX
pthread_mutex_init(&ev->lock, NULL);
pthread_cond_init(&ev->cond, NULL);
#endif
@@ -376,7 +354,7 @@ void qemu_event_destroy(QemuEvent *ev)
{
assert(ev->initialized);
ev->initialized = false;
-#ifndef __linux__
+#ifndef CONFIG_LINUX
pthread_mutex_destroy(&ev->lock);
pthread_cond_destroy(&ev->cond);
#endif
@@ -386,6 +364,7 @@ void qemu_event_set(QemuEvent *ev)
{
assert(ev->initialized);
+#ifdef CONFIG_LINUX
/*
* Pairs with both qemu_event_reset() and qemu_event_wait().
*
@@ -403,6 +382,12 @@ void qemu_event_set(QemuEvent *ev)
qemu_futex_wake_all(ev);
}
}
+#else
+ pthread_mutex_lock(&ev->lock);
+ qatomic_set(&ev->value, EV_SET);
+ pthread_cond_broadcast(&ev->cond);
+ pthread_mutex_unlock(&ev->lock);
+#endif
}
void qemu_event_reset(QemuEvent *ev)
@@ -424,10 +409,9 @@ void qemu_event_reset(QemuEvent *ev)
void qemu_event_wait(QemuEvent *ev)
{
- unsigned value;
-
assert(ev->initialized);
+#ifdef CONFIG_LINUX
while (true) {
/*
* qemu_event_wait must synchronize with qemu_event_set even if it does
@@ -438,7 +422,7 @@ void qemu_event_wait(QemuEvent *ev)
* might miss a qemu_event_set() here but ultimately the memory barrier
* in qemu_futex_wait() will ensure the check is done correctly.
*/
- value = qatomic_load_acquire(&ev->value);
+ unsigned value = qatomic_load_acquire(&ev->value);
if (value == EV_SET) {
break;
}
@@ -467,6 +451,13 @@ void qemu_event_wait(QemuEvent *ev)
*/
qemu_futex_wait(ev, EV_BUSY);
}
+#else
+ pthread_mutex_lock(&ev->lock);
+ if (qatomic_read(&ev->value) != EV_SET) {
+ pthread_cond_wait(&ev->cond, &ev->lock);
+ }
+ pthread_mutex_unlock(&ev->lock);
+#endif
}
static __thread NotifierList thread_exit;
--
2.47.1
This fix makes sense. I've also successfully tested with this new "pure"
pthread-based QemuEvent in the apple-gfx code where we initially considered
using QemuEvent and ran into the destruction issue on macOS.
Tested-by: Phil Dennis-Jordan <phil@philjordan.eu>
Reviewed-by: Phil Dennis-Jordan <phil@philjordan.eu>
(Unrelated: email to the devel@daynix.com address in the cc list seems to
bounce.)
On Wed, 25 Dec 2024 at 06:44, Akihiko Odaki <akihiko.odaki@daynix.com>
wrote:
> qemu-thread used to abstract pthread primitives into futex for the
> QemuEvent implementation of POSIX systems other than Linux. However,
> this abstraction has one key difference: unlike futex, pthread
> primitives require an explicit destruction, and it must be ordered after
> wait and wake operations.
>
> It would be easier to perform destruction if a wait operation ensures
> the corresponding wake operation finishes as POSIX semaphore does, but
> that requires to protect state accesses in qemu_event_set() and
> qemu_event_wait() with a mutex. On the other hand, real futex does not
> need such a protection but needs complex barrier and atomic operations
> before wait and wake operations instead.
>
> Add special implementations of qemu_event_set() and qemu_event_wait()
> using pthread primitives. qemu_event_wait() will ensure qemu_event_set()
> finishes, and these functions will avoid complex barrier and atomic
> operations.
>
> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
> ---
> util/qemu-thread-posix.c | 45
> ++++++++++++++++++---------------------------
> 1 file changed, 18 insertions(+), 27 deletions(-)
>
> diff --git a/util/qemu-thread-posix.c b/util/qemu-thread-posix.c
> index 13459e44c768..805cac444f15 100644
> --- a/util/qemu-thread-posix.c
> +++ b/util/qemu-thread-posix.c
> @@ -319,28 +319,6 @@ void qemu_sem_wait(QemuSemaphore *sem)
>
> #ifdef __linux__
> #include "qemu/futex.h"
> -#else
> -static inline void qemu_futex_wake(QemuEvent *ev, int n)
> -{
> - assert(ev->initialized);
> - pthread_mutex_lock(&ev->lock);
> - if (n == 1) {
> - pthread_cond_signal(&ev->cond);
> - } else {
> - pthread_cond_broadcast(&ev->cond);
> - }
> - pthread_mutex_unlock(&ev->lock);
> -}
> -
> -static inline void qemu_futex_wait(QemuEvent *ev, unsigned val)
> -{
> - assert(ev->initialized);
> - pthread_mutex_lock(&ev->lock);
> - if (ev->value == val) {
> - pthread_cond_wait(&ev->cond, &ev->lock);
> - }
> - pthread_mutex_unlock(&ev->lock);
> -}
> #endif
>
> /* Valid transitions:
> @@ -363,7 +341,7 @@ static inline void qemu_futex_wait(QemuEvent *ev,
> unsigned val)
>
> void qemu_event_init(QemuEvent *ev, bool init)
> {
> -#ifndef __linux__
> +#ifndef CONFIG_LINUX
> pthread_mutex_init(&ev->lock, NULL);
> pthread_cond_init(&ev->cond, NULL);
> #endif
> @@ -376,7 +354,7 @@ void qemu_event_destroy(QemuEvent *ev)
> {
> assert(ev->initialized);
> ev->initialized = false;
> -#ifndef __linux__
> +#ifndef CONFIG_LINUX
> pthread_mutex_destroy(&ev->lock);
> pthread_cond_destroy(&ev->cond);
> #endif
> @@ -386,6 +364,7 @@ void qemu_event_set(QemuEvent *ev)
> {
> assert(ev->initialized);
>
> +#ifdef CONFIG_LINUX
> /*
> * Pairs with both qemu_event_reset() and qemu_event_wait().
> *
> @@ -403,6 +382,12 @@ void qemu_event_set(QemuEvent *ev)
> qemu_futex_wake_all(ev);
> }
> }
> +#else
> + pthread_mutex_lock(&ev->lock);
> + qatomic_set(&ev->value, EV_SET);
> + pthread_cond_broadcast(&ev->cond);
> + pthread_mutex_unlock(&ev->lock);
> +#endif
> }
>
> void qemu_event_reset(QemuEvent *ev)
> @@ -424,10 +409,9 @@ void qemu_event_reset(QemuEvent *ev)
>
> void qemu_event_wait(QemuEvent *ev)
> {
> - unsigned value;
> -
> assert(ev->initialized);
>
> +#ifdef CONFIG_LINUX
> while (true) {
> /*
> * qemu_event_wait must synchronize with qemu_event_set even if
> it does
> @@ -438,7 +422,7 @@ void qemu_event_wait(QemuEvent *ev)
> * might miss a qemu_event_set() here but ultimately the memory
> barrier
> * in qemu_futex_wait() will ensure the check is done correctly.
> */
> - value = qatomic_load_acquire(&ev->value);
> + unsigned value = qatomic_load_acquire(&ev->value);
> if (value == EV_SET) {
> break;
> }
> @@ -467,6 +451,13 @@ void qemu_event_wait(QemuEvent *ev)
> */
> qemu_futex_wait(ev, EV_BUSY);
> }
> +#else
> + pthread_mutex_lock(&ev->lock);
> + if (qatomic_read(&ev->value) != EV_SET) {
> + pthread_cond_wait(&ev->cond, &ev->lock);
> + }
> + pthread_mutex_unlock(&ev->lock);
> +#endif
> }
>
> static __thread NotifierList thread_exit;
>
> --
> 2.47.1
>
>
© 2016 - 2026 Red Hat, Inc.