accel/tcg/cpu-exec.c | 2 +- accel/tcg/tcg-accel-ops-mttcg.c | 2 +- accel/tcg/tcg-accel-ops-rr.c | 4 ++-- docs/devel/atomics.rst | 27 ++++----------------------- include/qemu/atomic.h | 4 ++-- monitor/qmp.c | 2 +- softmmu/cpus.c | 2 +- softmmu/physmem.c | 2 +- target/arm/hvf/hvf.c | 2 +- tests/unit/test-aio-multithread.c | 2 +- util/qemu-coroutine-lock.c | 4 ++-- 11 files changed, 17 insertions(+), 36 deletions(-)
qatomic_mb_read and qatomic_mb_set were the very first atomic primitives
introduced for QEMU; their semantics are unclear and they provide a false
sense of safety.
The last use of qatomic_mb_read() has been removed, so delete it.
qatomic_mb_set() instead can survive as an optimized
qatomic_set()+smp_mb(), similar to Linux's smp_store_mb(), but
rename it to qatomic_set_mb() to match the order of the two
operations.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
accel/tcg/cpu-exec.c | 2 +-
accel/tcg/tcg-accel-ops-mttcg.c | 2 +-
accel/tcg/tcg-accel-ops-rr.c | 4 ++--
docs/devel/atomics.rst | 27 ++++-----------------------
include/qemu/atomic.h | 4 ++--
monitor/qmp.c | 2 +-
softmmu/cpus.c | 2 +-
softmmu/physmem.c | 2 +-
target/arm/hvf/hvf.c | 2 +-
tests/unit/test-aio-multithread.c | 2 +-
util/qemu-coroutine-lock.c | 4 ++--
11 files changed, 17 insertions(+), 36 deletions(-)
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index 0e741960dab3..23812f78f2a3 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -779,7 +779,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
* Ensure zeroing happens before reading cpu->exit_request or
* cpu->interrupt_request (see also smp_wmb in cpu_exit())
*/
- qatomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0);
+ qatomic_set_mb(&cpu_neg(cpu)->icount_decr.u16.high, 0);
if (unlikely(qatomic_read(&cpu->interrupt_request))) {
int interrupt_request;
diff --git a/accel/tcg/tcg-accel-ops-mttcg.c b/accel/tcg/tcg-accel-ops-mttcg.c
index d50239e0e28f..0b342b803b59 100644
--- a/accel/tcg/tcg-accel-ops-mttcg.c
+++ b/accel/tcg/tcg-accel-ops-mttcg.c
@@ -119,7 +119,7 @@ static void *mttcg_cpu_thread_fn(void *arg)
}
}
- qatomic_mb_set(&cpu->exit_request, 0);
+ qatomic_set_mb(&cpu->exit_request, 0);
qemu_wait_io_event(cpu);
} while (!cpu->unplug || cpu_can_run(cpu));
diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c
index b6d10fa9a250..a27e066130a2 100644
--- a/accel/tcg/tcg-accel-ops-rr.c
+++ b/accel/tcg/tcg-accel-ops-rr.c
@@ -244,7 +244,7 @@ static void *rr_cpu_thread_fn(void *arg)
while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) {
/* Store rr_current_cpu before evaluating cpu_can_run(). */
- qatomic_mb_set(&rr_current_cpu, cpu);
+ qatomic_set_mb(&rr_current_cpu, cpu);
current_cpu = cpu;
@@ -287,7 +287,7 @@ static void *rr_cpu_thread_fn(void *arg)
qatomic_set(&rr_current_cpu, NULL);
if (cpu && cpu->exit_request) {
- qatomic_mb_set(&cpu->exit_request, 0);
+ qatomic_set_mb(&cpu->exit_request, 0);
}
if (icount_enabled() && all_cpu_threads_idle()) {
diff --git a/docs/devel/atomics.rst b/docs/devel/atomics.rst
index 248076375bfe..ff9b5ee30c88 100644
--- a/docs/devel/atomics.rst
+++ b/docs/devel/atomics.rst
@@ -102,28 +102,10 @@ Similar operations return the new value of ``*ptr``::
typeof(*ptr) qatomic_or_fetch(ptr, val)
typeof(*ptr) qatomic_xor_fetch(ptr, val)
-``qemu/atomic.h`` also provides loads and stores that cannot be reordered
-with each other::
+``qemu/atomic.h`` also provides an optimized shortcut for
+``qatomic_set`` followed by ``smp_mb``::
- typeof(*ptr) qatomic_mb_read(ptr)
- void qatomic_mb_set(ptr, val)
-
-However these do not provide sequential consistency and, in particular,
-they do not participate in the total ordering enforced by
-sequentially-consistent operations. For this reason they are deprecated.
-They should instead be replaced with any of the following (ordered from
-easiest to hardest):
-
-- accesses inside a mutex or spinlock
-
-- lightweight synchronization primitives such as ``QemuEvent``
-
-- RCU operations (``qatomic_rcu_read``, ``qatomic_rcu_set``) when publishing
- or accessing a new version of a data structure
-
-- other atomic accesses: ``qatomic_read`` and ``qatomic_load_acquire`` for
- loads, ``qatomic_set`` and ``qatomic_store_release`` for stores, ``smp_mb``
- to forbid reordering subsequent loads before a store.
+ void qatomic_set_mb(ptr, val)
Weak atomic access and manual memory barriers
@@ -523,8 +505,7 @@ and memory barriers, and the equivalents in QEMU:
| :: |
| |
| a = qatomic_read(&x); |
- | qatomic_set(&x, a + 2); |
- | smp_mb(); |
+ | qatomic_set_mb(&x, a + 2); |
| b = qatomic_read(&y); |
+--------------------------------+
diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h
index f85834ee8b20..dce552941b37 100644
--- a/include/qemu/atomic.h
+++ b/include/qemu/atomic.h
@@ -273,10 +273,10 @@
#if !defined(QEMU_SANITIZE_THREAD) && \
(defined(__i386__) || defined(__x86_64__) || defined(__s390x__))
/* This is more efficient than a store plus a fence. */
-# define qatomic_mb_set(ptr, i) \
+# define qatomic_set_mb(ptr, i) \
({ (void)qatomic_xchg(ptr, i); smp_mb__after_rmw(); })
#else
-# define qatomic_mb_set(ptr, i) \
+# define qatomic_set_mb(ptr, i) \
({ qatomic_store_release(ptr, i); smp_mb(); })
#endif
diff --git a/monitor/qmp.c b/monitor/qmp.c
index 8b465d80fb1a..2c7359c6cd3c 100644
--- a/monitor/qmp.c
+++ b/monitor/qmp.c
@@ -246,7 +246,7 @@ static QMPRequest *monitor_qmp_dispatcher_pop_any(void)
*
* Clear qmp_dispatcher_co_busy before reading request.
*/
- qatomic_mb_set(&qmp_dispatcher_co_busy, false);
+ qatomic_set_mb(&qmp_dispatcher_co_busy, false);
WITH_QEMU_LOCK_GUARD(&monitor_lock) {
QMPRequest *req_obj;
diff --git a/softmmu/cpus.c b/softmmu/cpus.c
index 9cbc8172b5f2..fed20ffb5dd2 100644
--- a/softmmu/cpus.c
+++ b/softmmu/cpus.c
@@ -405,7 +405,7 @@ static void qemu_cpu_stop(CPUState *cpu, bool exit)
void qemu_wait_io_event_common(CPUState *cpu)
{
- qatomic_mb_set(&cpu->thread_kicked, false);
+ qatomic_set_mb(&cpu->thread_kicked, false);
if (cpu->stop) {
qemu_cpu_stop(cpu, false);
}
diff --git a/softmmu/physmem.c b/softmmu/physmem.c
index 9d7e172260f1..588d0d166b91 100644
--- a/softmmu/physmem.c
+++ b/softmmu/physmem.c
@@ -3132,7 +3132,7 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
bounce.buffer = NULL;
memory_region_unref(bounce.mr);
/* Clear in_use before reading map_client_list. */
- qatomic_mb_set(&bounce.in_use, false);
+ qatomic_set_mb(&bounce.in_use, false);
cpu_notify_map_clients();
}
diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c
index ad65603445e5..5900dc788f4c 100644
--- a/target/arm/hvf/hvf.c
+++ b/target/arm/hvf/hvf.c
@@ -1229,7 +1229,7 @@ static void hvf_wait_for_ipi(CPUState *cpu, struct timespec *ts)
* Use pselect to sleep so that other threads can IPI us while we're
* sleeping.
*/
- qatomic_mb_set(&cpu->thread_kicked, false);
+ qatomic_set_mb(&cpu->thread_kicked, false);
qemu_mutex_unlock_iothread();
pselect(0, 0, 0, 0, ts, &cpu->hvf->unblock_ipi_mask);
qemu_mutex_lock_iothread();
diff --git a/tests/unit/test-aio-multithread.c b/tests/unit/test-aio-multithread.c
index 80c5d4e2e6e3..08d4570ccb14 100644
--- a/tests/unit/test-aio-multithread.c
+++ b/tests/unit/test-aio-multithread.c
@@ -154,7 +154,7 @@ static coroutine_fn void test_multi_co_schedule_entry(void *opaque)
n = g_test_rand_int_range(0, NUM_CONTEXTS);
schedule_next(n);
- qatomic_mb_set(&to_schedule[id], qemu_coroutine_self());
+ qatomic_set_mb(&to_schedule[id], qemu_coroutine_self());
/* finish_cb can run here. */
qemu_coroutine_yield();
g_assert(to_schedule[id] == NULL);
diff --git a/util/qemu-coroutine-lock.c b/util/qemu-coroutine-lock.c
index 84a50a9e9117..2534435388f3 100644
--- a/util/qemu-coroutine-lock.c
+++ b/util/qemu-coroutine-lock.c
@@ -202,7 +202,7 @@ static void coroutine_fn qemu_co_mutex_lock_slowpath(AioContext *ctx,
push_waiter(mutex, &w);
/*
- * Add waiter before reading mutex->handoff. Pairs with qatomic_mb_set
+ * Add waiter before reading mutex->handoff. Pairs with qatomic_set_mb
* in qemu_co_mutex_unlock.
*/
smp_mb__after_rmw();
@@ -310,7 +310,7 @@ void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex)
our_handoff = mutex->sequence;
/* Set handoff before checking for waiters. */
- qatomic_mb_set(&mutex->handoff, our_handoff);
+ qatomic_set_mb(&mutex->handoff, our_handoff);
if (!has_waiters(mutex)) {
/* The concurrent lock has not added itself yet, so it
* will be able to pick our handoff.
--
2.40.1
On 5/26/23 01:18, Paolo Bonzini wrote: > qatomic_mb_read and qatomic_mb_set were the very first atomic primitives > introduced for QEMU; their semantics are unclear and they provide a false > sense of safety. > > The last use of qatomic_mb_read() has been removed, so delete it. > qatomic_mb_set() instead can survive as an optimized > qatomic_set()+smp_mb(), similar to Linux's smp_store_mb(), but > rename it to qatomic_set_mb() to match the order of the two > operations. > > Signed-off-by: Paolo Bonzini<pbonzini@redhat.com> > --- > accel/tcg/cpu-exec.c | 2 +- > accel/tcg/tcg-accel-ops-mttcg.c | 2 +- > accel/tcg/tcg-accel-ops-rr.c | 4 ++-- > docs/devel/atomics.rst | 27 ++++----------------------- > include/qemu/atomic.h | 4 ++-- > monitor/qmp.c | 2 +- > softmmu/cpus.c | 2 +- > softmmu/physmem.c | 2 +- > target/arm/hvf/hvf.c | 2 +- > tests/unit/test-aio-multithread.c | 2 +- > util/qemu-coroutine-lock.c | 4 ++-- > 11 files changed, 17 insertions(+), 36 deletions(-) Reviewed-by: Richard Henderson <richard.henderson@linaro.org> r~
© 2016 - 2023 Red Hat, Inc.