1 | The following changes since commit 928173659d6e5dc368284f73f90ea1d129e1f57d: | 1 | The following changes since commit 0fc0142828b5bc965790a1c5c6e241897d3387cb: |
---|---|---|---|
2 | 2 | ||
3 | Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20200130' into staging (2020-01-30 16:19:04 +0000) | 3 | Merge remote-tracking branch 'remotes/kraxel/tags/input-20200921-pull-request' into staging (2020-09-22 21:11:10 +0100) |
4 | 4 | ||
5 | are available in the Git repository at: | 5 | are available in the Git repository at: |
6 | 6 | ||
7 | https://github.com/stefanha/qemu.git tags/block-pull-request | 7 | https://github.com/stefanha/qemu.git tags/block-pull-request |
8 | 8 | ||
9 | for you to fetch changes up to 8dff69b9415b4287e900358744b732195e1ab2e2: | 9 | for you to fetch changes up to d73415a315471ac0b127ed3fad45c8ec5d711de1: |
10 | 10 | ||
11 | tests/qemu-iotests: use AIOMODE with various tests (2020-01-30 21:01:40 +0000) | 11 | qemu/atomic.h: rename atomic_ to qatomic_ (2020-09-23 16:07:44 +0100) |
12 | 12 | ||
13 | ---------------------------------------------------------------- | 13 | ---------------------------------------------------------------- |
14 | Pull request | 14 | Pull request |
15 | 15 | ||
16 | This includes the atomic_ -> qatomic_ rename that touches many files and is | ||
17 | prone to conflicts. | ||
18 | |||
16 | ---------------------------------------------------------------- | 19 | ---------------------------------------------------------------- |
17 | 20 | ||
18 | Aarushi Mehta (15): | 21 | Halil Pasic (1): |
19 | configure: permit use of io_uring | 22 | virtio: add vhost-user-fs-ccw device |
20 | qapi/block-core: add option for io_uring | ||
21 | block/block: add BDRV flag for io_uring | ||
22 | block/io_uring: implements interfaces for io_uring | ||
23 | stubs: add stubs for io_uring interface | ||
24 | util/async: add aio interfaces for io_uring | ||
25 | blockdev: adds bdrv_parse_aio to use io_uring | ||
26 | block/file-posix.c: extend to use io_uring | ||
27 | block: add trace events for io_uring | ||
28 | block/io_uring: adds userspace completion polling | ||
29 | qemu-io: adds option to use aio engine | ||
30 | qemu-img: adds option to use aio engine for benchmarking | ||
31 | qemu-nbd: adds option for aio engines | ||
32 | tests/qemu-iotests: enable testing with aio options | ||
33 | tests/qemu-iotests: use AIOMODE with various tests | ||
34 | 23 | ||
35 | Paolo Bonzini (3): | 24 | Marc Hartmayer (1): |
36 | block: eliminate BDRV_REQ_NO_SERIALISING | 25 | libvhost-user: handle endianness as mandated by the spec |
37 | block/io: wait for serialising requests when a request becomes | ||
38 | serialising | ||
39 | block/io: take bs->reqs_lock in bdrv_mark_request_serialising | ||
40 | 26 | ||
41 | MAINTAINERS | 9 + | 27 | Stefan Hajnoczi (11): |
42 | block.c | 22 ++ | 28 | MAINTAINERS: add Stefan Hajnoczi as block/nvme.c maintainer |
43 | block/Makefile.objs | 3 + | 29 | util/iov: add iov_discard_undo() |
44 | block/file-posix.c | 99 ++++++-- | 30 | virtio-blk: undo destructive iov_discard_*() operations |
45 | block/io.c | 162 +++++++------ | 31 | virtio-crypto: don't modify elem->in/out_sg |
46 | block/io_uring.c | 433 ++++++++++++++++++++++++++++++++++ | 32 | docs/system: clarify deprecation schedule |
47 | block/trace-events | 12 + | 33 | gitmodules: switch to qemu.org qboot mirror |
48 | blockdev.c | 12 +- | 34 | gitmodules: switch to qemu.org meson mirror |
49 | configure | 27 +++ | 35 | gitmodules: add qemu.org vbootrom submodule |
50 | docs/interop/qemu-nbd.rst | 4 +- | 36 | fdmon-poll: reset npfd when upgrading to fdmon-epoll |
51 | include/block/aio.h | 16 +- | 37 | tests: add test-fdmon-epoll |
52 | include/block/block.h | 14 +- | 38 | qemu/atomic.h: rename atomic_ to qatomic_ |
53 | include/block/block_int.h | 3 +- | 39 | |
54 | include/block/raw-aio.h | 12 + | 40 | MAINTAINERS | 5 +- |
55 | qapi/block-core.json | 4 +- | 41 | include/qemu/atomic.h | 248 +++++++++--------- |
56 | qemu-img-cmds.hx | 4 +- | 42 | docs/devel/lockcnt.txt | 8 +- |
57 | qemu-img.c | 11 +- | 43 | docs/devel/rcu.txt | 34 +-- |
58 | qemu-img.texi | 5 +- | 44 | accel/tcg/atomic_template.h | 20 +- |
59 | qemu-io.c | 25 +- | 45 | include/block/aio-wait.h | 4 +- |
60 | qemu-nbd.c | 12 +- | 46 | include/block/aio.h | 8 +- |
61 | stubs/Makefile.objs | 1 + | 47 | include/exec/cpu_ldst.h | 2 +- |
62 | stubs/io_uring.c | 32 +++ | 48 | include/exec/exec-all.h | 6 +- |
63 | tests/qemu-iotests/028 | 2 +- | 49 | include/exec/log.h | 6 +- |
64 | tests/qemu-iotests/058 | 2 +- | 50 | include/exec/memory.h | 2 +- |
65 | tests/qemu-iotests/089 | 4 +- | 51 | include/exec/ram_addr.h | 26 +- |
66 | tests/qemu-iotests/091 | 4 +- | 52 | include/exec/ramlist.h | 2 +- |
67 | tests/qemu-iotests/109 | 2 +- | 53 | include/exec/tb-lookup.h | 4 +- |
68 | tests/qemu-iotests/147 | 5 +- | 54 | include/hw/core/cpu.h | 2 +- |
69 | tests/qemu-iotests/181 | 8 +- | 55 | include/hw/virtio/virtio-blk.h | 2 + |
70 | tests/qemu-iotests/183 | 4 +- | 56 | include/qemu/atomic128.h | 6 +- |
71 | tests/qemu-iotests/185 | 10 +- | 57 | include/qemu/bitops.h | 2 +- |
72 | tests/qemu-iotests/200 | 2 +- | 58 | include/qemu/coroutine.h | 2 +- |
73 | tests/qemu-iotests/201 | 8 +- | 59 | include/qemu/iov.h | 23 ++ |
74 | tests/qemu-iotests/check | 15 +- | 60 | include/qemu/log.h | 6 +- |
75 | tests/qemu-iotests/common.rc | 14 ++ | 61 | include/qemu/queue.h | 7 +- |
76 | tests/qemu-iotests/iotests.py | 12 +- | 62 | include/qemu/rcu.h | 10 +- |
77 | util/async.c | 36 +++ | 63 | include/qemu/rcu_queue.h | 100 +++---- |
78 | 37 files changed, 878 insertions(+), 172 deletions(-) | 64 | include/qemu/seqlock.h | 8 +- |
79 | create mode 100644 block/io_uring.c | 65 | include/qemu/stats64.h | 28 +- |
80 | create mode 100644 stubs/io_uring.c | 66 | include/qemu/thread.h | 24 +- |
67 | .../infiniband/hw/vmw_pvrdma/pvrdma_ring.h | 14 +- | ||
68 | linux-user/qemu.h | 2 +- | ||
69 | tcg/i386/tcg-target.h | 2 +- | ||
70 | tcg/s390/tcg-target.h | 2 +- | ||
71 | tcg/tci/tcg-target.h | 2 +- | ||
72 | accel/kvm/kvm-all.c | 12 +- | ||
73 | accel/tcg/cpu-exec.c | 15 +- | ||
74 | accel/tcg/cputlb.c | 24 +- | ||
75 | accel/tcg/tcg-all.c | 2 +- | ||
76 | accel/tcg/translate-all.c | 55 ++-- | ||
77 | audio/jackaudio.c | 18 +- | ||
78 | block.c | 4 +- | ||
79 | block/block-backend.c | 15 +- | ||
80 | block/io.c | 48 ++-- | ||
81 | block/nfs.c | 2 +- | ||
82 | block/sheepdog.c | 2 +- | ||
83 | block/throttle-groups.c | 12 +- | ||
84 | block/throttle.c | 4 +- | ||
85 | blockdev.c | 2 +- | ||
86 | blockjob.c | 2 +- | ||
87 | contrib/libvhost-user/libvhost-user.c | 79 +++--- | ||
88 | cpus-common.c | 26 +- | ||
89 | dump/dump.c | 8 +- | ||
90 | exec.c | 49 ++-- | ||
91 | hw/block/virtio-blk.c | 11 +- | ||
92 | hw/core/cpu.c | 6 +- | ||
93 | hw/display/qxl.c | 4 +- | ||
94 | hw/hyperv/hyperv.c | 10 +- | ||
95 | hw/hyperv/vmbus.c | 2 +- | ||
96 | hw/i386/xen/xen-hvm.c | 2 +- | ||
97 | hw/intc/rx_icu.c | 12 +- | ||
98 | hw/intc/sifive_plic.c | 4 +- | ||
99 | hw/misc/edu.c | 16 +- | ||
100 | hw/net/virtio-net.c | 10 +- | ||
101 | hw/rdma/rdma_backend.c | 18 +- | ||
102 | hw/rdma/rdma_rm.c | 2 +- | ||
103 | hw/rdma/vmw/pvrdma_dev_ring.c | 4 +- | ||
104 | hw/s390x/s390-pci-bus.c | 2 +- | ||
105 | hw/s390x/vhost-user-fs-ccw.c | 75 ++++++ | ||
106 | hw/s390x/virtio-ccw.c | 2 +- | ||
107 | hw/virtio/vhost.c | 2 +- | ||
108 | hw/virtio/virtio-crypto.c | 17 +- | ||
109 | hw/virtio/virtio-mmio.c | 6 +- | ||
110 | hw/virtio/virtio-pci.c | 6 +- | ||
111 | hw/virtio/virtio.c | 16 +- | ||
112 | hw/xtensa/pic_cpu.c | 4 +- | ||
113 | iothread.c | 6 +- | ||
114 | linux-user/hppa/cpu_loop.c | 11 +- | ||
115 | linux-user/signal.c | 8 +- | ||
116 | migration/colo-failover.c | 4 +- | ||
117 | migration/migration.c | 8 +- | ||
118 | migration/multifd.c | 18 +- | ||
119 | migration/postcopy-ram.c | 34 +-- | ||
120 | migration/rdma.c | 34 +-- | ||
121 | monitor/hmp.c | 6 +- | ||
122 | monitor/misc.c | 2 +- | ||
123 | monitor/monitor.c | 6 +- | ||
124 | qemu-nbd.c | 2 +- | ||
125 | qga/commands.c | 12 +- | ||
126 | qom/object.c | 20 +- | ||
127 | scsi/qemu-pr-helper.c | 4 +- | ||
128 | softmmu/cpu-throttle.c | 10 +- | ||
129 | softmmu/cpus.c | 42 +-- | ||
130 | softmmu/memory.c | 6 +- | ||
131 | softmmu/vl.c | 2 +- | ||
132 | target/arm/mte_helper.c | 6 +- | ||
133 | target/hppa/op_helper.c | 2 +- | ||
134 | target/i386/mem_helper.c | 2 +- | ||
135 | target/i386/whpx-all.c | 6 +- | ||
136 | target/riscv/cpu_helper.c | 2 +- | ||
137 | target/s390x/mem_helper.c | 4 +- | ||
138 | target/xtensa/exc_helper.c | 4 +- | ||
139 | target/xtensa/op_helper.c | 2 +- | ||
140 | tcg/tcg.c | 58 ++-- | ||
141 | tcg/tci.c | 2 +- | ||
142 | tests/atomic64-bench.c | 14 +- | ||
143 | tests/atomic_add-bench.c | 14 +- | ||
144 | tests/iothread.c | 2 +- | ||
145 | tests/qht-bench.c | 12 +- | ||
146 | tests/rcutorture.c | 24 +- | ||
147 | tests/test-aio-multithread.c | 52 ++-- | ||
148 | tests/test-fdmon-epoll.c | 73 ++++++ | ||
149 | tests/test-iov.c | 165 ++++++++++++ | ||
150 | tests/test-logging.c | 4 +- | ||
151 | tests/test-rcu-list.c | 38 +-- | ||
152 | tests/test-thread-pool.c | 10 +- | ||
153 | util/aio-posix.c | 14 +- | ||
154 | util/aio-wait.c | 2 +- | ||
155 | util/aio-win32.c | 5 +- | ||
156 | util/async.c | 28 +- | ||
157 | util/atomic64.c | 10 +- | ||
158 | util/bitmap.c | 14 +- | ||
159 | util/cacheinfo.c | 2 +- | ||
160 | util/fdmon-epoll.c | 4 +- | ||
161 | util/fdmon-io_uring.c | 12 +- | ||
162 | util/fdmon-poll.c | 1 + | ||
163 | util/iov.c | 50 +++- | ||
164 | util/lockcnt.c | 52 ++-- | ||
165 | util/log.c | 10 +- | ||
166 | util/qemu-coroutine-lock.c | 18 +- | ||
167 | util/qemu-coroutine-sleep.c | 4 +- | ||
168 | util/qemu-coroutine.c | 6 +- | ||
169 | util/qemu-sockets.c | 4 +- | ||
170 | util/qemu-thread-posix.c | 12 +- | ||
171 | util/qemu-thread-win32.c | 12 +- | ||
172 | util/qemu-timer.c | 12 +- | ||
173 | util/qht.c | 57 ++-- | ||
174 | util/qsp.c | 50 ++-- | ||
175 | util/rcu.c | 36 +-- | ||
176 | util/stats64.c | 34 +-- | ||
177 | .gitmodules | 6 +- | ||
178 | docs/devel/atomics.rst | 134 +++++----- | ||
179 | docs/system/deprecated.rst | 9 +- | ||
180 | hw/s390x/meson.build | 1 + | ||
181 | scripts/kernel-doc | 2 +- | ||
182 | tcg/aarch64/tcg-target.c.inc | 2 +- | ||
183 | tcg/mips/tcg-target.c.inc | 2 +- | ||
184 | tcg/ppc/tcg-target.c.inc | 6 +- | ||
185 | tcg/sparc/tcg-target.c.inc | 5 +- | ||
186 | tests/meson.build | 3 + | ||
187 | 147 files changed, 1508 insertions(+), 1069 deletions(-) | ||
188 | create mode 100644 hw/s390x/vhost-user-fs-ccw.c | ||
189 | create mode 100644 tests/test-fdmon-epoll.c | ||
81 | 190 | ||
82 | -- | 191 | -- |
83 | 2.24.1 | 192 | 2.26.2 |
84 | 193 | ||
85 | diff view generated by jsdifflib |
1 | From: Aarushi Mehta <mehta.aaru20@gmail.com> | 1 | Development of the userspace NVMe block driver picked up again recently. |
---|---|---|---|
2 | After talking with Fam I am stepping up as block/nvme.c maintainer. | ||
3 | Patches will be merged through my 'block' tree. | ||
2 | 4 | ||
3 | Since io_uring is the actual name of the Linux API, we use it as enum | 5 | Cc: Kevin Wolf <kwolf@redhat.com> |
4 | value even though the QAPI schema conventions would prefer io-uring. | 6 | Cc: Klaus Jensen <k.jensen@samsung.com> |
5 | 7 | Cc: Fam Zheng <fam@euphon.net> | |
6 | Signed-off-by: Aarushi Mehta <mehta.aaru20@gmail.com> | ||
7 | Acked-by: Markus Armbruster <armbru@redhat.com> | ||
8 | Acked-by: Stefano Garzarella <sgarzare@redhat.com> | ||
9 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 8 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
10 | Message-id: 20200120141858.587874-3-stefanha@redhat.com | 9 | Acked-by: Kevin Wolf <kwolf@redhat.com> |
11 | Message-Id: <20200120141858.587874-3-stefanha@redhat.com> | 10 | Acked-by: Klaus Jensen <k.jensen@samsung.com> |
11 | Acked-by: Fam Zheng <fam@euphon.net> | ||
12 | Message-id: 20200907111632.90499-1-stefanha@redhat.com | ||
12 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 13 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
13 | --- | 14 | --- |
14 | qapi/block-core.json | 4 +++- | 15 | MAINTAINERS | 4 +++- |
15 | 1 file changed, 3 insertions(+), 1 deletion(-) | 16 | 1 file changed, 3 insertions(+), 1 deletion(-) |
16 | 17 | ||
17 | diff --git a/qapi/block-core.json b/qapi/block-core.json | 18 | diff --git a/MAINTAINERS b/MAINTAINERS |
18 | index XXXXXXX..XXXXXXX 100644 | 19 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/qapi/block-core.json | 20 | --- a/MAINTAINERS |
20 | +++ b/qapi/block-core.json | 21 | +++ b/MAINTAINERS |
21 | @@ -XXX,XX +XXX,XX @@ | 22 | @@ -XXX,XX +XXX,XX @@ S: Supported |
22 | # | 23 | F: block/null.c |
23 | # @threads: Use qemu's thread pool | 24 | |
24 | # @native: Use native AIO backend (only Linux and Windows) | 25 | NVMe Block Driver |
25 | +# @io_uring: Use linux io_uring (since 5.0) | 26 | -M: Fam Zheng <fam@euphon.net> |
26 | # | 27 | +M: Stefan Hajnoczi <stefanha@redhat.com> |
27 | # Since: 2.9 | 28 | +R: Fam Zheng <fam@euphon.net> |
28 | ## | 29 | L: qemu-block@nongnu.org |
29 | { 'enum': 'BlockdevAioOptions', | 30 | S: Supported |
30 | - 'data': [ 'threads', 'native' ] } | 31 | F: block/nvme* |
31 | + 'data': [ 'threads', 'native', | 32 | +T: git https://github.com/stefanha/qemu.git block |
32 | + { 'name': 'io_uring', 'if': 'defined(CONFIG_LINUX_IO_URING)' } ] } | 33 | |
33 | 34 | Bootdevice | |
34 | ## | 35 | M: Gonglei <arei.gonglei@huawei.com> |
35 | # @BlockdevCacheOptions: | ||
36 | -- | 36 | -- |
37 | 2.24.1 | 37 | 2.26.2 |
38 | 38 | ||
39 | diff view generated by jsdifflib |
1 | From: Paolo Bonzini <pbonzini@redhat.com> | 1 | From: Marc Hartmayer <mhartmay@linux.ibm.com> |
---|---|---|---|
2 | 2 | ||
3 | bdrv_mark_request_serialising is writing the overlap_offset and | 3 | Since virtio existed even before it got standardized, the virtio |
4 | overlap_bytes fields of BdrvTrackedRequest. Take bs->reqs_lock | 4 | standard defines the following types of virtio devices: |
5 | for the whole duration of it, and not just when waiting for | 5 | |
6 | serialising requests, so that tracked_request_overlaps does not | 6 | + legacy device (pre-virtio 1.0) |
7 | look at a half-updated request. | 7 | + non-legacy or VIRTIO 1.0 device |
8 | 8 | + transitional device (which can act both as legacy and non-legacy) | |
9 | The new code does not unlock/relock around retries. This is unnecessary | 9 | |
10 | because a retry is always preceded by a CoQueue wait, which already | 10 | Virtio 1.0 defines the fields of the virtqueues as little endian, |
11 | releases and reacquires bs->reqs_lock. | 11 | while legacy uses guest's native endian [1]. Currently libvhost-user |
12 | 12 | does not handle virtio endianness at all, i.e. it works only if the | |
13 | Reported-by: Peter Lieven <pl@kamp.de> | 13 | native endianness matches with whatever is actually needed. That means |
14 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | 14 | things break spectacularly on big-endian targets. Let us handle virtio |
15 | Message-id: 1578495356-46219-4-git-send-email-pbonzini@redhat.com | 15 | endianness for non-legacy as required by the virtio specification [1] |
16 | Message-Id: <1578495356-46219-4-git-send-email-pbonzini@redhat.com> | 16 | and fence legacy virtio, as there is no safe way to figure out the |
17 | needed endianness conversions for all cases. The fencing of legacy | ||
18 | virtio devices is done in `vu_set_features_exec`. | ||
19 | |||
20 | [1] https://docs.oasis-open.org/virtio/virtio/v1.1/cs01/virtio-v1.1-cs01.html#x1-210003 | ||
21 | |||
22 | Reviewed-by: Michael S. Tsirkin <mst@redhat.com> | ||
23 | Signed-off-by: Marc Hartmayer <mhartmay@linux.ibm.com> | ||
24 | Message-id: 20200901150019.29229-3-mhartmay@linux.ibm.com | ||
17 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 25 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
18 | --- | 26 | --- |
19 | block/io.c | 112 ++++++++++++++++++++++++++++++----------------------- | 27 | contrib/libvhost-user/libvhost-user.c | 77 +++++++++++++++------------ |
20 | 1 file changed, 63 insertions(+), 49 deletions(-) | 28 | 1 file changed, 43 insertions(+), 34 deletions(-) |
21 | 29 | ||
22 | diff --git a/block/io.c b/block/io.c | 30 | diff --git a/contrib/libvhost-user/libvhost-user.c b/contrib/libvhost-user/libvhost-user.c |
23 | index XXXXXXX..XXXXXXX 100644 | 31 | index XXXXXXX..XXXXXXX 100644 |
24 | --- a/block/io.c | 32 | --- a/contrib/libvhost-user/libvhost-user.c |
25 | +++ b/block/io.c | 33 | +++ b/contrib/libvhost-user/libvhost-user.c |
26 | @@ -XXX,XX +XXX,XX @@ | 34 | @@ -XXX,XX +XXX,XX @@ |
27 | #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS) | 35 | |
28 | 36 | #include "qemu/atomic.h" | |
29 | static void bdrv_parent_cb_resize(BlockDriverState *bs); | 37 | #include "qemu/osdep.h" |
30 | -static bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self); | 38 | +#include "qemu/bswap.h" |
31 | static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, | 39 | #include "qemu/memfd.h" |
32 | int64_t offset, int bytes, BdrvRequestFlags flags); | 40 | |
33 | 41 | #include "libvhost-user.h" | |
34 | @@ -XXX,XX +XXX,XX @@ static void tracked_request_begin(BdrvTrackedRequest *req, | 42 | @@ -XXX,XX +XXX,XX @@ vu_set_features_exec(VuDev *dev, VhostUserMsg *vmsg) |
35 | qemu_co_mutex_unlock(&bs->reqs_lock); | 43 | DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64); |
36 | } | 44 | |
37 | 45 | dev->features = vmsg->payload.u64; | |
38 | +static bool tracked_request_overlaps(BdrvTrackedRequest *req, | 46 | + if (!vu_has_feature(dev, VIRTIO_F_VERSION_1)) { |
39 | + int64_t offset, uint64_t bytes) | 47 | + /* |
40 | +{ | 48 | + * We only support devices conforming to VIRTIO 1.0 or |
41 | + /* aaaa bbbb */ | 49 | + * later |
42 | + if (offset >= req->overlap_offset + req->overlap_bytes) { | 50 | + */ |
51 | + vu_panic(dev, "virtio legacy devices aren't supported by libvhost-user"); | ||
43 | + return false; | 52 | + return false; |
44 | + } | 53 | + } |
45 | + /* bbbb aaaa */ | 54 | |
46 | + if (req->overlap_offset >= offset + bytes) { | 55 | if (!(dev->features & VHOST_USER_F_PROTOCOL_FEATURES)) { |
47 | + return false; | 56 | vu_set_enable_all_rings(dev, true); |
48 | + } | 57 | @@ -XXX,XX +XXX,XX @@ vu_set_vring_addr_exec(VuDev *dev, VhostUserMsg *vmsg) |
49 | + return true; | ||
50 | +} | ||
51 | + | ||
52 | +static bool coroutine_fn | ||
53 | +bdrv_wait_serialising_requests_locked(BlockDriverState *bs, | ||
54 | + BdrvTrackedRequest *self) | ||
55 | +{ | ||
56 | + BdrvTrackedRequest *req; | ||
57 | + bool retry; | ||
58 | + bool waited = false; | ||
59 | + | ||
60 | + do { | ||
61 | + retry = false; | ||
62 | + QLIST_FOREACH(req, &bs->tracked_requests, list) { | ||
63 | + if (req == self || (!req->serialising && !self->serialising)) { | ||
64 | + continue; | ||
65 | + } | ||
66 | + if (tracked_request_overlaps(req, self->overlap_offset, | ||
67 | + self->overlap_bytes)) | ||
68 | + { | ||
69 | + /* Hitting this means there was a reentrant request, for | ||
70 | + * example, a block driver issuing nested requests. This must | ||
71 | + * never happen since it means deadlock. | ||
72 | + */ | ||
73 | + assert(qemu_coroutine_self() != req->co); | ||
74 | + | ||
75 | + /* If the request is already (indirectly) waiting for us, or | ||
76 | + * will wait for us as soon as it wakes up, then just go on | ||
77 | + * (instead of producing a deadlock in the former case). */ | ||
78 | + if (!req->waiting_for) { | ||
79 | + self->waiting_for = req; | ||
80 | + qemu_co_queue_wait(&req->wait_queue, &bs->reqs_lock); | ||
81 | + self->waiting_for = NULL; | ||
82 | + retry = true; | ||
83 | + waited = true; | ||
84 | + break; | ||
85 | + } | ||
86 | + } | ||
87 | + } | ||
88 | + } while (retry); | ||
89 | + return waited; | ||
90 | +} | ||
91 | + | ||
92 | bool bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) | ||
93 | { | ||
94 | + BlockDriverState *bs = req->bs; | ||
95 | int64_t overlap_offset = req->offset & ~(align - 1); | ||
96 | uint64_t overlap_bytes = ROUND_UP(req->offset + req->bytes, align) | ||
97 | - overlap_offset; | ||
98 | + bool waited; | ||
99 | |||
100 | + qemu_co_mutex_lock(&bs->reqs_lock); | ||
101 | if (!req->serialising) { | ||
102 | atomic_inc(&req->bs->serialising_in_flight); | ||
103 | req->serialising = true; | ||
104 | @@ -XXX,XX +XXX,XX @@ bool bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) | ||
105 | |||
106 | req->overlap_offset = MIN(req->overlap_offset, overlap_offset); | ||
107 | req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes); | ||
108 | - return bdrv_wait_serialising_requests(req); | ||
109 | + waited = bdrv_wait_serialising_requests_locked(bs, req); | ||
110 | + qemu_co_mutex_unlock(&bs->reqs_lock); | ||
111 | + return waited; | ||
112 | } | ||
113 | |||
114 | /** | ||
115 | @@ -XXX,XX +XXX,XX @@ static int bdrv_get_cluster_size(BlockDriverState *bs) | ||
116 | } | ||
117 | } | ||
118 | |||
119 | -static bool tracked_request_overlaps(BdrvTrackedRequest *req, | ||
120 | - int64_t offset, uint64_t bytes) | ||
121 | -{ | ||
122 | - /* aaaa bbbb */ | ||
123 | - if (offset >= req->overlap_offset + req->overlap_bytes) { | ||
124 | - return false; | ||
125 | - } | ||
126 | - /* bbbb aaaa */ | ||
127 | - if (req->overlap_offset >= offset + bytes) { | ||
128 | - return false; | ||
129 | - } | ||
130 | - return true; | ||
131 | -} | ||
132 | - | ||
133 | void bdrv_inc_in_flight(BlockDriverState *bs) | ||
134 | { | ||
135 | atomic_inc(&bs->in_flight); | ||
136 | @@ -XXX,XX +XXX,XX @@ void bdrv_dec_in_flight(BlockDriverState *bs) | ||
137 | static bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self) | ||
138 | { | ||
139 | BlockDriverState *bs = self->bs; | ||
140 | - BdrvTrackedRequest *req; | ||
141 | - bool retry; | ||
142 | bool waited = false; | ||
143 | |||
144 | if (!atomic_read(&bs->serialising_in_flight)) { | ||
145 | return false; | 58 | return false; |
146 | } | 59 | } |
147 | 60 | ||
148 | - do { | 61 | - vq->used_idx = vq->vring.used->idx; |
149 | - retry = false; | 62 | + vq->used_idx = lduw_le_p(&vq->vring.used->idx); |
150 | - qemu_co_mutex_lock(&bs->reqs_lock); | 63 | |
151 | - QLIST_FOREACH(req, &bs->tracked_requests, list) { | 64 | if (vq->last_avail_idx != vq->used_idx) { |
152 | - if (req == self || (!req->serialising && !self->serialising)) { | 65 | bool resume = dev->iface->queue_is_processed_in_order && |
153 | - continue; | 66 | @@ -XXX,XX +XXX,XX @@ vu_check_queue_inflights(VuDev *dev, VuVirtq *vq) |
154 | - } | 67 | return 0; |
155 | - if (tracked_request_overlaps(req, self->overlap_offset, | 68 | } |
156 | - self->overlap_bytes)) | 69 | |
157 | - { | 70 | - vq->used_idx = vq->vring.used->idx; |
158 | - /* Hitting this means there was a reentrant request, for | 71 | + vq->used_idx = lduw_le_p(&vq->vring.used->idx); |
159 | - * example, a block driver issuing nested requests. This must | 72 | vq->resubmit_num = 0; |
160 | - * never happen since it means deadlock. | 73 | vq->resubmit_list = NULL; |
161 | - */ | 74 | vq->counter = 0; |
162 | - assert(qemu_coroutine_self() != req->co); | 75 | @@ -XXX,XX +XXX,XX @@ vu_queue_started(const VuDev *dev, const VuVirtq *vq) |
163 | - | 76 | static inline uint16_t |
164 | - /* If the request is already (indirectly) waiting for us, or | 77 | vring_avail_flags(VuVirtq *vq) |
165 | - * will wait for us as soon as it wakes up, then just go on | 78 | { |
166 | - * (instead of producing a deadlock in the former case). */ | 79 | - return vq->vring.avail->flags; |
167 | - if (!req->waiting_for) { | 80 | + return lduw_le_p(&vq->vring.avail->flags); |
168 | - self->waiting_for = req; | 81 | } |
169 | - qemu_co_queue_wait(&req->wait_queue, &bs->reqs_lock); | 82 | |
170 | - self->waiting_for = NULL; | 83 | static inline uint16_t |
171 | - retry = true; | 84 | vring_avail_idx(VuVirtq *vq) |
172 | - waited = true; | 85 | { |
173 | - break; | 86 | - vq->shadow_avail_idx = vq->vring.avail->idx; |
174 | - } | 87 | + vq->shadow_avail_idx = lduw_le_p(&vq->vring.avail->idx); |
175 | - } | 88 | |
176 | - } | 89 | return vq->shadow_avail_idx; |
177 | - qemu_co_mutex_unlock(&bs->reqs_lock); | 90 | } |
178 | - } while (retry); | 91 | @@ -XXX,XX +XXX,XX @@ vring_avail_idx(VuVirtq *vq) |
179 | + qemu_co_mutex_lock(&bs->reqs_lock); | 92 | static inline uint16_t |
180 | + waited = bdrv_wait_serialising_requests_locked(bs, self); | 93 | vring_avail_ring(VuVirtq *vq, int i) |
181 | + qemu_co_mutex_unlock(&bs->reqs_lock); | 94 | { |
182 | 95 | - return vq->vring.avail->ring[i]; | |
183 | return waited; | 96 | + return lduw_le_p(&vq->vring.avail->ring[i]); |
184 | } | 97 | } |
98 | |||
99 | static inline uint16_t | ||
100 | @@ -XXX,XX +XXX,XX @@ virtqueue_read_next_desc(VuDev *dev, struct vring_desc *desc, | ||
101 | int i, unsigned int max, unsigned int *next) | ||
102 | { | ||
103 | /* If this descriptor says it doesn't chain, we're done. */ | ||
104 | - if (!(desc[i].flags & VRING_DESC_F_NEXT)) { | ||
105 | + if (!(lduw_le_p(&desc[i].flags) & VRING_DESC_F_NEXT)) { | ||
106 | return VIRTQUEUE_READ_DESC_DONE; | ||
107 | } | ||
108 | |||
109 | /* Check they're not leading us off end of descriptors. */ | ||
110 | - *next = desc[i].next; | ||
111 | + *next = lduw_le_p(&desc[i].next); | ||
112 | /* Make sure compiler knows to grab that: we don't want it changing! */ | ||
113 | smp_wmb(); | ||
114 | |||
115 | @@ -XXX,XX +XXX,XX @@ vu_queue_get_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int *in_bytes, | ||
116 | } | ||
117 | desc = vq->vring.desc; | ||
118 | |||
119 | - if (desc[i].flags & VRING_DESC_F_INDIRECT) { | ||
120 | - if (desc[i].len % sizeof(struct vring_desc)) { | ||
121 | + if (lduw_le_p(&desc[i].flags) & VRING_DESC_F_INDIRECT) { | ||
122 | + if (ldl_le_p(&desc[i].len) % sizeof(struct vring_desc)) { | ||
123 | vu_panic(dev, "Invalid size for indirect buffer table"); | ||
124 | goto err; | ||
125 | } | ||
126 | @@ -XXX,XX +XXX,XX @@ vu_queue_get_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int *in_bytes, | ||
127 | |||
128 | /* loop over the indirect descriptor table */ | ||
129 | indirect = 1; | ||
130 | - desc_addr = desc[i].addr; | ||
131 | - desc_len = desc[i].len; | ||
132 | + desc_addr = ldq_le_p(&desc[i].addr); | ||
133 | + desc_len = ldl_le_p(&desc[i].len); | ||
134 | max = desc_len / sizeof(struct vring_desc); | ||
135 | read_len = desc_len; | ||
136 | desc = vu_gpa_to_va(dev, &read_len, desc_addr); | ||
137 | @@ -XXX,XX +XXX,XX @@ vu_queue_get_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int *in_bytes, | ||
138 | goto err; | ||
139 | } | ||
140 | |||
141 | - if (desc[i].flags & VRING_DESC_F_WRITE) { | ||
142 | - in_total += desc[i].len; | ||
143 | + if (lduw_le_p(&desc[i].flags) & VRING_DESC_F_WRITE) { | ||
144 | + in_total += ldl_le_p(&desc[i].len); | ||
145 | } else { | ||
146 | - out_total += desc[i].len; | ||
147 | + out_total += ldl_le_p(&desc[i].len); | ||
148 | } | ||
149 | if (in_total >= max_in_bytes && out_total >= max_out_bytes) { | ||
150 | goto done; | ||
151 | @@ -XXX,XX +XXX,XX @@ vring_used_flags_set_bit(VuVirtq *vq, int mask) | ||
152 | |||
153 | flags = (uint16_t *)((char*)vq->vring.used + | ||
154 | offsetof(struct vring_used, flags)); | ||
155 | - *flags |= mask; | ||
156 | + stw_le_p(flags, lduw_le_p(flags) | mask); | ||
157 | } | ||
158 | |||
159 | static inline void | ||
160 | @@ -XXX,XX +XXX,XX @@ vring_used_flags_unset_bit(VuVirtq *vq, int mask) | ||
161 | |||
162 | flags = (uint16_t *)((char*)vq->vring.used + | ||
163 | offsetof(struct vring_used, flags)); | ||
164 | - *flags &= ~mask; | ||
165 | + stw_le_p(flags, lduw_le_p(flags) & ~mask); | ||
166 | } | ||
167 | |||
168 | static inline void | ||
169 | @@ -XXX,XX +XXX,XX @@ vring_set_avail_event(VuVirtq *vq, uint16_t val) | ||
170 | return; | ||
171 | } | ||
172 | |||
173 | - *((uint16_t *) &vq->vring.used->ring[vq->vring.num]) = val; | ||
174 | + stw_le_p(&vq->vring.used->ring[vq->vring.num], val); | ||
175 | } | ||
176 | |||
177 | void | ||
178 | @@ -XXX,XX +XXX,XX @@ vu_queue_map_desc(VuDev *dev, VuVirtq *vq, unsigned int idx, size_t sz) | ||
179 | struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE]; | ||
180 | int rc; | ||
181 | |||
182 | - if (desc[i].flags & VRING_DESC_F_INDIRECT) { | ||
183 | - if (desc[i].len % sizeof(struct vring_desc)) { | ||
184 | + if (lduw_le_p(&desc[i].flags) & VRING_DESC_F_INDIRECT) { | ||
185 | + if (ldl_le_p(&desc[i].len) % sizeof(struct vring_desc)) { | ||
186 | vu_panic(dev, "Invalid size for indirect buffer table"); | ||
187 | } | ||
188 | |||
189 | /* loop over the indirect descriptor table */ | ||
190 | - desc_addr = desc[i].addr; | ||
191 | - desc_len = desc[i].len; | ||
192 | + desc_addr = ldq_le_p(&desc[i].addr); | ||
193 | + desc_len = ldl_le_p(&desc[i].len); | ||
194 | max = desc_len / sizeof(struct vring_desc); | ||
195 | read_len = desc_len; | ||
196 | desc = vu_gpa_to_va(dev, &read_len, desc_addr); | ||
197 | @@ -XXX,XX +XXX,XX @@ vu_queue_map_desc(VuDev *dev, VuVirtq *vq, unsigned int idx, size_t sz) | ||
198 | |||
199 | /* Collect all the descriptors */ | ||
200 | do { | ||
201 | - if (desc[i].flags & VRING_DESC_F_WRITE) { | ||
202 | + if (lduw_le_p(&desc[i].flags) & VRING_DESC_F_WRITE) { | ||
203 | virtqueue_map_desc(dev, &in_num, iov + out_num, | ||
204 | VIRTQUEUE_MAX_SIZE - out_num, true, | ||
205 | - desc[i].addr, desc[i].len); | ||
206 | + ldq_le_p(&desc[i].addr), ldl_le_p(&desc[i].len)); | ||
207 | } else { | ||
208 | if (in_num) { | ||
209 | vu_panic(dev, "Incorrect order for descriptors"); | ||
210 | @@ -XXX,XX +XXX,XX @@ vu_queue_map_desc(VuDev *dev, VuVirtq *vq, unsigned int idx, size_t sz) | ||
211 | } | ||
212 | virtqueue_map_desc(dev, &out_num, iov, | ||
213 | VIRTQUEUE_MAX_SIZE, false, | ||
214 | - desc[i].addr, desc[i].len); | ||
215 | + ldq_le_p(&desc[i].addr), ldl_le_p(&desc[i].len)); | ||
216 | } | ||
217 | |||
218 | /* If we've got too many, that implies a descriptor loop. */ | ||
219 | @@ -XXX,XX +XXX,XX @@ vu_log_queue_fill(VuDev *dev, VuVirtq *vq, | ||
220 | max = vq->vring.num; | ||
221 | i = elem->index; | ||
222 | |||
223 | - if (desc[i].flags & VRING_DESC_F_INDIRECT) { | ||
224 | - if (desc[i].len % sizeof(struct vring_desc)) { | ||
225 | + if (lduw_le_p(&desc[i].flags) & VRING_DESC_F_INDIRECT) { | ||
226 | + if (ldl_le_p(&desc[i].len) % sizeof(struct vring_desc)) { | ||
227 | vu_panic(dev, "Invalid size for indirect buffer table"); | ||
228 | } | ||
229 | |||
230 | /* loop over the indirect descriptor table */ | ||
231 | - desc_addr = desc[i].addr; | ||
232 | - desc_len = desc[i].len; | ||
233 | + desc_addr = ldq_le_p(&desc[i].addr); | ||
234 | + desc_len = ldl_le_p(&desc[i].len); | ||
235 | max = desc_len / sizeof(struct vring_desc); | ||
236 | read_len = desc_len; | ||
237 | desc = vu_gpa_to_va(dev, &read_len, desc_addr); | ||
238 | @@ -XXX,XX +XXX,XX @@ vu_log_queue_fill(VuDev *dev, VuVirtq *vq, | ||
239 | return; | ||
240 | } | ||
241 | |||
242 | - if (desc[i].flags & VRING_DESC_F_WRITE) { | ||
243 | - min = MIN(desc[i].len, len); | ||
244 | - vu_log_write(dev, desc[i].addr, min); | ||
245 | + if (lduw_le_p(&desc[i].flags) & VRING_DESC_F_WRITE) { | ||
246 | + min = MIN(ldl_le_p(&desc[i].len), len); | ||
247 | + vu_log_write(dev, ldq_le_p(&desc[i].addr), min); | ||
248 | len -= min; | ||
249 | } | ||
250 | |||
251 | @@ -XXX,XX +XXX,XX @@ vu_queue_fill(VuDev *dev, VuVirtq *vq, | ||
252 | |||
253 | idx = (idx + vq->used_idx) % vq->vring.num; | ||
254 | |||
255 | - uelem.id = elem->index; | ||
256 | - uelem.len = len; | ||
257 | + stl_le_p(&uelem.id, elem->index); | ||
258 | + stl_le_p(&uelem.len, len); | ||
259 | vring_used_write(dev, vq, &uelem, idx); | ||
260 | } | ||
261 | |||
262 | static inline | ||
263 | void vring_used_idx_set(VuDev *dev, VuVirtq *vq, uint16_t val) | ||
264 | { | ||
265 | - vq->vring.used->idx = val; | ||
266 | + stw_le_p(&vq->vring.used->idx, val); | ||
267 | vu_log_write(dev, | ||
268 | vq->vring.log_guest_addr + offsetof(struct vring_used, idx), | ||
269 | sizeof(vq->vring.used->idx)); | ||
185 | -- | 270 | -- |
186 | 2.24.1 | 271 | 2.26.2 |
187 | 272 | ||
188 | diff view generated by jsdifflib |
1 | From: Aarushi Mehta <mehta.aaru20@gmail.com> | 1 | From: Halil Pasic <pasic@linux.ibm.com> |
---|---|---|---|
2 | 2 | ||
3 | Follow linux-aio.o and stub out the block/io_uring.o APIs that will be | 3 | Wire up the CCW device for vhost-user-fs. |
4 | missing when a binary is linked with obj-util-y but without | ||
5 | block-util-y (e.g. vhost-user-gpu). | ||
6 | 4 | ||
7 | For example, the stubs are necessary so that a binary using util/async.o | 5 | Reviewed-by: Cornelia Huck <cohuck@redhat.com> |
8 | from obj-util-y for qemu_bh_new() links successfully. In this case | 6 | Signed-off-by: Halil Pasic <pasic@linux.ibm.com> |
9 | block/io_uring.o from block-util-y isn't needed and we can avoid | 7 | Message-id: 20200901150019.29229-2-mhartmay@linux.ibm.com |
10 | dragging in the block layer by linking the stubs instead. The stub | ||
11 | functions never get called. | ||
12 | |||
13 | Signed-off-by: Aarushi Mehta <mehta.aaru20@gmail.com> | ||
14 | Acked-by: Stefano Garzarella <sgarzare@redhat.com> | ||
15 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
16 | Message-id: 20200120141858.587874-6-stefanha@redhat.com | ||
17 | Message-Id: <20200120141858.587874-6-stefanha@redhat.com> | ||
18 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 8 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
19 | --- | 9 | --- |
20 | MAINTAINERS | 1 + | 10 | hw/s390x/vhost-user-fs-ccw.c | 75 ++++++++++++++++++++++++++++++++++++ |
21 | stubs/Makefile.objs | 1 + | 11 | hw/s390x/meson.build | 1 + |
22 | stubs/io_uring.c | 32 ++++++++++++++++++++++++++++++++ | 12 | 2 files changed, 76 insertions(+) |
23 | 3 files changed, 34 insertions(+) | 13 | create mode 100644 hw/s390x/vhost-user-fs-ccw.c |
24 | create mode 100644 stubs/io_uring.c | ||
25 | 14 | ||
26 | diff --git a/MAINTAINERS b/MAINTAINERS | 15 | diff --git a/hw/s390x/vhost-user-fs-ccw.c b/hw/s390x/vhost-user-fs-ccw.c |
27 | index XXXXXXX..XXXXXXX 100644 | ||
28 | --- a/MAINTAINERS | ||
29 | +++ b/MAINTAINERS | ||
30 | @@ -XXX,XX +XXX,XX @@ M: Stefan Hajnoczi <stefanha@redhat.com> | ||
31 | L: qemu-block@nongnu.org | ||
32 | S: Maintained | ||
33 | F: block/io_uring.c | ||
34 | +F: stubs/io_uring.c | ||
35 | |||
36 | qcow2 | ||
37 | M: Kevin Wolf <kwolf@redhat.com> | ||
38 | diff --git a/stubs/Makefile.objs b/stubs/Makefile.objs | ||
39 | index XXXXXXX..XXXXXXX 100644 | ||
40 | --- a/stubs/Makefile.objs | ||
41 | +++ b/stubs/Makefile.objs | ||
42 | @@ -XXX,XX +XXX,XX @@ stub-obj-y += iothread.o | ||
43 | stub-obj-y += iothread-lock.o | ||
44 | stub-obj-y += is-daemonized.o | ||
45 | stub-obj-$(CONFIG_LINUX_AIO) += linux-aio.o | ||
46 | +stub-obj-$(CONFIG_LINUX_IO_URING) += io_uring.o | ||
47 | stub-obj-y += machine-init-done.o | ||
48 | stub-obj-y += migr-blocker.o | ||
49 | stub-obj-y += change-state-handler.o | ||
50 | diff --git a/stubs/io_uring.c b/stubs/io_uring.c | ||
51 | new file mode 100644 | 16 | new file mode 100644 |
52 | index XXXXXXX..XXXXXXX | 17 | index XXXXXXX..XXXXXXX |
53 | --- /dev/null | 18 | --- /dev/null |
54 | +++ b/stubs/io_uring.c | 19 | +++ b/hw/s390x/vhost-user-fs-ccw.c |
55 | @@ -XXX,XX +XXX,XX @@ | 20 | @@ -XXX,XX +XXX,XX @@ |
56 | +/* | 21 | +/* |
57 | + * Linux io_uring support. | 22 | + * virtio ccw vhost-user-fs implementation |
58 | + * | 23 | + * |
59 | + * Copyright (C) 2009 IBM, Corp. | 24 | + * Copyright 2020 IBM Corp. |
60 | + * Copyright (C) 2009 Red Hat, Inc. | ||
61 | + * | 25 | + * |
62 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. | 26 | + * This work is licensed under the terms of the GNU GPL, version 2 or (at |
63 | + * See the COPYING file in the top-level directory. | 27 | + * your option) any later version. See the COPYING file in the top-level |
28 | + * directory. | ||
64 | + */ | 29 | + */ |
65 | +#include "qemu/osdep.h" | 30 | +#include "qemu/osdep.h" |
66 | +#include "block/aio.h" | 31 | +#include "hw/qdev-properties.h" |
67 | +#include "block/raw-aio.h" | 32 | +#include "qapi/error.h" |
33 | +#include "hw/virtio/vhost-user-fs.h" | ||
34 | +#include "virtio-ccw.h" | ||
68 | + | 35 | + |
69 | +void luring_detach_aio_context(LuringState *s, AioContext *old_context) | 36 | +typedef struct VHostUserFSCcw { |
37 | + VirtioCcwDevice parent_obj; | ||
38 | + VHostUserFS vdev; | ||
39 | +} VHostUserFSCcw; | ||
40 | + | ||
41 | +#define TYPE_VHOST_USER_FS_CCW "vhost-user-fs-ccw" | ||
42 | +#define VHOST_USER_FS_CCW(obj) \ | ||
43 | + OBJECT_CHECK(VHostUserFSCcw, (obj), TYPE_VHOST_USER_FS_CCW) | ||
44 | + | ||
45 | + | ||
46 | +static Property vhost_user_fs_ccw_properties[] = { | ||
47 | + DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags, | ||
48 | + VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true), | ||
49 | + DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev, | ||
50 | + VIRTIO_CCW_MAX_REV), | ||
51 | + DEFINE_PROP_END_OF_LIST(), | ||
52 | +}; | ||
53 | + | ||
54 | +static void vhost_user_fs_ccw_realize(VirtioCcwDevice *ccw_dev, Error **errp) | ||
70 | +{ | 55 | +{ |
71 | + abort(); | 56 | + VHostUserFSCcw *dev = VHOST_USER_FS_CCW(ccw_dev); |
57 | + DeviceState *vdev = DEVICE(&dev->vdev); | ||
58 | + | ||
59 | + qdev_realize(vdev, BUS(&ccw_dev->bus), errp); | ||
72 | +} | 60 | +} |
73 | + | 61 | + |
74 | +void luring_attach_aio_context(LuringState *s, AioContext *new_context) | 62 | +static void vhost_user_fs_ccw_instance_init(Object *obj) |
75 | +{ | 63 | +{ |
76 | + abort(); | 64 | + VHostUserFSCcw *dev = VHOST_USER_FS_CCW(obj); |
65 | + VirtioCcwDevice *ccw_dev = VIRTIO_CCW_DEVICE(obj); | ||
66 | + | ||
67 | + ccw_dev->force_revision_1 = true; | ||
68 | + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), | ||
69 | + TYPE_VHOST_USER_FS); | ||
77 | +} | 70 | +} |
78 | + | 71 | + |
79 | +LuringState *luring_init(Error **errp) | 72 | +static void vhost_user_fs_ccw_class_init(ObjectClass *klass, void *data) |
80 | +{ | 73 | +{ |
81 | + abort(); | 74 | + DeviceClass *dc = DEVICE_CLASS(klass); |
75 | + VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass); | ||
76 | + | ||
77 | + k->realize = vhost_user_fs_ccw_realize; | ||
78 | + device_class_set_props(dc, vhost_user_fs_ccw_properties); | ||
79 | + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); | ||
82 | +} | 80 | +} |
83 | + | 81 | + |
84 | +void luring_cleanup(LuringState *s) | 82 | +static const TypeInfo vhost_user_fs_ccw = { |
83 | + .name = TYPE_VHOST_USER_FS_CCW, | ||
84 | + .parent = TYPE_VIRTIO_CCW_DEVICE, | ||
85 | + .instance_size = sizeof(VHostUserFSCcw), | ||
86 | + .instance_init = vhost_user_fs_ccw_instance_init, | ||
87 | + .class_init = vhost_user_fs_ccw_class_init, | ||
88 | +}; | ||
89 | + | ||
90 | +static void vhost_user_fs_ccw_register(void) | ||
85 | +{ | 91 | +{ |
86 | + abort(); | 92 | + type_register_static(&vhost_user_fs_ccw); |
87 | +} | 93 | +} |
94 | + | ||
95 | +type_init(vhost_user_fs_ccw_register) | ||
96 | diff --git a/hw/s390x/meson.build b/hw/s390x/meson.build | ||
97 | index XXXXXXX..XXXXXXX 100644 | ||
98 | --- a/hw/s390x/meson.build | ||
99 | +++ b/hw/s390x/meson.build | ||
100 | @@ -XXX,XX +XXX,XX @@ virtio_ss.add(when: 'CONFIG_VIRTIO_SCSI', if_true: files('virtio-ccw-scsi.c')) | ||
101 | virtio_ss.add(when: 'CONFIG_VIRTIO_SERIAL', if_true: files('virtio-ccw-serial.c')) | ||
102 | virtio_ss.add(when: ['CONFIG_VIRTIO_9P', 'CONFIG_VIRTFS'], if_true: files('virtio-ccw-blk.c')) | ||
103 | virtio_ss.add(when: 'CONFIG_VHOST_VSOCK', if_true: files('vhost-vsock-ccw.c')) | ||
104 | +virtio_ss.add(when: 'CONFIG_VHOST_USER_FS', if_true: files('vhost-user-fs-ccw.c')) | ||
105 | s390x_ss.add_all(when: 'CONFIG_VIRTIO_CCW', if_true: virtio_ss) | ||
106 | |||
107 | hw_arch += {'s390x': s390x_ss} | ||
88 | -- | 108 | -- |
89 | 2.24.1 | 109 | 2.26.2 |
90 | 110 | ||
91 | diff view generated by jsdifflib |
1 | From: Aarushi Mehta <mehta.aaru20@gmail.com> | 1 | The iov_discard_front/back() operations are useful for parsing iovecs |
---|---|---|---|
2 | 2 | but they modify the array elements. If the original array is needed | |
3 | Signed-off-by: Aarushi Mehta <mehta.aaru20@gmail.com> | 3 | after parsing finishes there is currently no way to restore it. |
4 | Acked-by: Stefano Garzarella <sgarzare@redhat.com> | 4 | |
5 | Although g_memdup() can be used before performing destructive | ||
6 | iov_discard_front/back() operations, this is inefficient. | ||
7 | |||
8 | Introduce iov_discard_undo() to restore the array to the state prior to | ||
9 | an iov_discard_front/back() operation. | ||
10 | |||
5 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
6 | Message-id: 20200120141858.587874-7-stefanha@redhat.com | 12 | Reviewed-by: Li Qiang <liq3ea@gmail.com> |
7 | Message-Id: <20200120141858.587874-7-stefanha@redhat.com> | 13 | Message-Id: <20200917094455.822379-2-stefanha@redhat.com> |
8 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
9 | --- | 14 | --- |
10 | util/async.c | 36 ++++++++++++++++++++++++++++++++++++ | 15 | include/qemu/iov.h | 23 +++++++ |
11 | 1 file changed, 36 insertions(+) | 16 | tests/test-iov.c | 165 +++++++++++++++++++++++++++++++++++++++++++++ |
12 | 17 | util/iov.c | 50 ++++++++++++-- | |
13 | diff --git a/util/async.c b/util/async.c | 18 | 3 files changed, 234 insertions(+), 4 deletions(-) |
19 | |||
20 | diff --git a/include/qemu/iov.h b/include/qemu/iov.h | ||
14 | index XXXXXXX..XXXXXXX 100644 | 21 | index XXXXXXX..XXXXXXX 100644 |
15 | --- a/util/async.c | 22 | --- a/include/qemu/iov.h |
16 | +++ b/util/async.c | 23 | +++ b/include/qemu/iov.h |
17 | @@ -XXX,XX +XXX,XX @@ aio_ctx_finalize(GSource *source) | 24 | @@ -XXX,XX +XXX,XX @@ size_t iov_discard_front(struct iovec **iov, unsigned int *iov_cnt, |
25 | size_t iov_discard_back(struct iovec *iov, unsigned int *iov_cnt, | ||
26 | size_t bytes); | ||
27 | |||
28 | +/* Information needed to undo an iov_discard_*() operation */ | ||
29 | +typedef struct { | ||
30 | + struct iovec *modified_iov; | ||
31 | + struct iovec orig; | ||
32 | +} IOVDiscardUndo; | ||
33 | + | ||
34 | +/* | ||
35 | + * Undo an iov_discard_front_undoable() or iov_discard_back_undoable() | ||
36 | + * operation. If multiple operations are made then each one needs a separate | ||
37 | + * IOVDiscardUndo and iov_discard_undo() must be called in the reverse order | ||
38 | + * that the operations were made. | ||
39 | + */ | ||
40 | +void iov_discard_undo(IOVDiscardUndo *undo); | ||
41 | + | ||
42 | +/* | ||
43 | + * Undoable versions of iov_discard_front() and iov_discard_back(). Use | ||
44 | + * iov_discard_undo() to reset to the state before the discard operations. | ||
45 | + */ | ||
46 | +size_t iov_discard_front_undoable(struct iovec **iov, unsigned int *iov_cnt, | ||
47 | + size_t bytes, IOVDiscardUndo *undo); | ||
48 | +size_t iov_discard_back_undoable(struct iovec *iov, unsigned int *iov_cnt, | ||
49 | + size_t bytes, IOVDiscardUndo *undo); | ||
50 | + | ||
51 | typedef struct QEMUIOVector { | ||
52 | struct iovec *iov; | ||
53 | int niov; | ||
54 | diff --git a/tests/test-iov.c b/tests/test-iov.c | ||
55 | index XXXXXXX..XXXXXXX 100644 | ||
56 | --- a/tests/test-iov.c | ||
57 | +++ b/tests/test-iov.c | ||
58 | @@ -XXX,XX +XXX,XX @@ static void iov_free(struct iovec *iov, unsigned niov) | ||
59 | g_free(iov); | ||
60 | } | ||
61 | |||
62 | +static bool iov_equals(const struct iovec *a, const struct iovec *b, | ||
63 | + unsigned niov) | ||
64 | +{ | ||
65 | + return memcmp(a, b, sizeof(a[0]) * niov) == 0; | ||
66 | +} | ||
67 | + | ||
68 | static void test_iov_bytes(struct iovec *iov, unsigned niov, | ||
69 | size_t offset, size_t bytes) | ||
70 | { | ||
71 | @@ -XXX,XX +XXX,XX @@ static void test_discard_front(void) | ||
72 | iov_free(iov, iov_cnt); | ||
73 | } | ||
74 | |||
75 | +static void test_discard_front_undo(void) | ||
76 | +{ | ||
77 | + IOVDiscardUndo undo; | ||
78 | + struct iovec *iov; | ||
79 | + struct iovec *iov_tmp; | ||
80 | + struct iovec *iov_orig; | ||
81 | + unsigned int iov_cnt; | ||
82 | + unsigned int iov_cnt_tmp; | ||
83 | + size_t size; | ||
84 | + | ||
85 | + /* Discard zero bytes */ | ||
86 | + iov_random(&iov, &iov_cnt); | ||
87 | + iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); | ||
88 | + iov_tmp = iov; | ||
89 | + iov_cnt_tmp = iov_cnt; | ||
90 | + iov_discard_front_undoable(&iov_tmp, &iov_cnt_tmp, 0, &undo); | ||
91 | + iov_discard_undo(&undo); | ||
92 | + assert(iov_equals(iov, iov_orig, iov_cnt)); | ||
93 | + g_free(iov_orig); | ||
94 | + iov_free(iov, iov_cnt); | ||
95 | + | ||
96 | + /* Discard more bytes than vector size */ | ||
97 | + iov_random(&iov, &iov_cnt); | ||
98 | + iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); | ||
99 | + iov_tmp = iov; | ||
100 | + iov_cnt_tmp = iov_cnt; | ||
101 | + size = iov_size(iov, iov_cnt); | ||
102 | + iov_discard_front_undoable(&iov_tmp, &iov_cnt_tmp, size + 1, &undo); | ||
103 | + iov_discard_undo(&undo); | ||
104 | + assert(iov_equals(iov, iov_orig, iov_cnt)); | ||
105 | + g_free(iov_orig); | ||
106 | + iov_free(iov, iov_cnt); | ||
107 | + | ||
108 | + /* Discard entire vector */ | ||
109 | + iov_random(&iov, &iov_cnt); | ||
110 | + iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); | ||
111 | + iov_tmp = iov; | ||
112 | + iov_cnt_tmp = iov_cnt; | ||
113 | + size = iov_size(iov, iov_cnt); | ||
114 | + iov_discard_front_undoable(&iov_tmp, &iov_cnt_tmp, size, &undo); | ||
115 | + iov_discard_undo(&undo); | ||
116 | + assert(iov_equals(iov, iov_orig, iov_cnt)); | ||
117 | + g_free(iov_orig); | ||
118 | + iov_free(iov, iov_cnt); | ||
119 | + | ||
120 | + /* Discard within first element */ | ||
121 | + iov_random(&iov, &iov_cnt); | ||
122 | + iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); | ||
123 | + iov_tmp = iov; | ||
124 | + iov_cnt_tmp = iov_cnt; | ||
125 | + size = g_test_rand_int_range(1, iov->iov_len); | ||
126 | + iov_discard_front_undoable(&iov_tmp, &iov_cnt_tmp, size, &undo); | ||
127 | + iov_discard_undo(&undo); | ||
128 | + assert(iov_equals(iov, iov_orig, iov_cnt)); | ||
129 | + g_free(iov_orig); | ||
130 | + iov_free(iov, iov_cnt); | ||
131 | + | ||
132 | + /* Discard entire first element */ | ||
133 | + iov_random(&iov, &iov_cnt); | ||
134 | + iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); | ||
135 | + iov_tmp = iov; | ||
136 | + iov_cnt_tmp = iov_cnt; | ||
137 | + iov_discard_front_undoable(&iov_tmp, &iov_cnt_tmp, iov->iov_len, &undo); | ||
138 | + iov_discard_undo(&undo); | ||
139 | + assert(iov_equals(iov, iov_orig, iov_cnt)); | ||
140 | + g_free(iov_orig); | ||
141 | + iov_free(iov, iov_cnt); | ||
142 | + | ||
143 | + /* Discard within second element */ | ||
144 | + iov_random(&iov, &iov_cnt); | ||
145 | + iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); | ||
146 | + iov_tmp = iov; | ||
147 | + iov_cnt_tmp = iov_cnt; | ||
148 | + size = iov->iov_len + g_test_rand_int_range(1, iov[1].iov_len); | ||
149 | + iov_discard_front_undoable(&iov_tmp, &iov_cnt_tmp, size, &undo); | ||
150 | + iov_discard_undo(&undo); | ||
151 | + assert(iov_equals(iov, iov_orig, iov_cnt)); | ||
152 | + g_free(iov_orig); | ||
153 | + iov_free(iov, iov_cnt); | ||
154 | +} | ||
155 | + | ||
156 | static void test_discard_back(void) | ||
157 | { | ||
158 | struct iovec *iov; | ||
159 | @@ -XXX,XX +XXX,XX @@ static void test_discard_back(void) | ||
160 | iov_free(iov, iov_cnt); | ||
161 | } | ||
162 | |||
163 | +static void test_discard_back_undo(void) | ||
164 | +{ | ||
165 | + IOVDiscardUndo undo; | ||
166 | + struct iovec *iov; | ||
167 | + struct iovec *iov_orig; | ||
168 | + unsigned int iov_cnt; | ||
169 | + unsigned int iov_cnt_tmp; | ||
170 | + size_t size; | ||
171 | + | ||
172 | + /* Discard zero bytes */ | ||
173 | + iov_random(&iov, &iov_cnt); | ||
174 | + iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); | ||
175 | + iov_cnt_tmp = iov_cnt; | ||
176 | + iov_discard_back_undoable(iov, &iov_cnt_tmp, 0, &undo); | ||
177 | + iov_discard_undo(&undo); | ||
178 | + assert(iov_equals(iov, iov_orig, iov_cnt)); | ||
179 | + g_free(iov_orig); | ||
180 | + iov_free(iov, iov_cnt); | ||
181 | + | ||
182 | + /* Discard more bytes than vector size */ | ||
183 | + iov_random(&iov, &iov_cnt); | ||
184 | + iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); | ||
185 | + iov_cnt_tmp = iov_cnt; | ||
186 | + size = iov_size(iov, iov_cnt); | ||
187 | + iov_discard_back_undoable(iov, &iov_cnt_tmp, size + 1, &undo); | ||
188 | + iov_discard_undo(&undo); | ||
189 | + assert(iov_equals(iov, iov_orig, iov_cnt)); | ||
190 | + g_free(iov_orig); | ||
191 | + iov_free(iov, iov_cnt); | ||
192 | + | ||
193 | + /* Discard entire vector */ | ||
194 | + iov_random(&iov, &iov_cnt); | ||
195 | + iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); | ||
196 | + iov_cnt_tmp = iov_cnt; | ||
197 | + size = iov_size(iov, iov_cnt); | ||
198 | + iov_discard_back_undoable(iov, &iov_cnt_tmp, size, &undo); | ||
199 | + iov_discard_undo(&undo); | ||
200 | + assert(iov_equals(iov, iov_orig, iov_cnt)); | ||
201 | + g_free(iov_orig); | ||
202 | + iov_free(iov, iov_cnt); | ||
203 | + | ||
204 | + /* Discard within last element */ | ||
205 | + iov_random(&iov, &iov_cnt); | ||
206 | + iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); | ||
207 | + iov_cnt_tmp = iov_cnt; | ||
208 | + size = g_test_rand_int_range(1, iov[iov_cnt - 1].iov_len); | ||
209 | + iov_discard_back_undoable(iov, &iov_cnt_tmp, size, &undo); | ||
210 | + iov_discard_undo(&undo); | ||
211 | + assert(iov_equals(iov, iov_orig, iov_cnt)); | ||
212 | + g_free(iov_orig); | ||
213 | + iov_free(iov, iov_cnt); | ||
214 | + | ||
215 | + /* Discard entire last element */ | ||
216 | + iov_random(&iov, &iov_cnt); | ||
217 | + iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); | ||
218 | + iov_cnt_tmp = iov_cnt; | ||
219 | + size = iov[iov_cnt - 1].iov_len; | ||
220 | + iov_discard_back_undoable(iov, &iov_cnt_tmp, size, &undo); | ||
221 | + iov_discard_undo(&undo); | ||
222 | + assert(iov_equals(iov, iov_orig, iov_cnt)); | ||
223 | + g_free(iov_orig); | ||
224 | + iov_free(iov, iov_cnt); | ||
225 | + | ||
226 | + /* Discard within second-to-last element */ | ||
227 | + iov_random(&iov, &iov_cnt); | ||
228 | + iov_orig = g_memdup(iov, sizeof(iov[0]) * iov_cnt); | ||
229 | + iov_cnt_tmp = iov_cnt; | ||
230 | + size = iov[iov_cnt - 1].iov_len + | ||
231 | + g_test_rand_int_range(1, iov[iov_cnt - 2].iov_len); | ||
232 | + iov_discard_back_undoable(iov, &iov_cnt_tmp, size, &undo); | ||
233 | + iov_discard_undo(&undo); | ||
234 | + assert(iov_equals(iov, iov_orig, iov_cnt)); | ||
235 | + g_free(iov_orig); | ||
236 | + iov_free(iov, iov_cnt); | ||
237 | +} | ||
238 | + | ||
239 | int main(int argc, char **argv) | ||
240 | { | ||
241 | g_test_init(&argc, &argv, NULL); | ||
242 | @@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv) | ||
243 | g_test_add_func("/basic/iov/io", test_io); | ||
244 | g_test_add_func("/basic/iov/discard-front", test_discard_front); | ||
245 | g_test_add_func("/basic/iov/discard-back", test_discard_back); | ||
246 | + g_test_add_func("/basic/iov/discard-front-undo", test_discard_front_undo); | ||
247 | + g_test_add_func("/basic/iov/discard-back-undo", test_discard_back_undo); | ||
248 | return g_test_run(); | ||
249 | } | ||
250 | diff --git a/util/iov.c b/util/iov.c | ||
251 | index XXXXXXX..XXXXXXX 100644 | ||
252 | --- a/util/iov.c | ||
253 | +++ b/util/iov.c | ||
254 | @@ -XXX,XX +XXX,XX @@ void qemu_iovec_clone(QEMUIOVector *dest, const QEMUIOVector *src, void *buf) | ||
18 | } | 255 | } |
19 | #endif | 256 | } |
20 | 257 | ||
21 | +#ifdef CONFIG_LINUX_IO_URING | 258 | -size_t iov_discard_front(struct iovec **iov, unsigned int *iov_cnt, |
22 | + if (ctx->linux_io_uring) { | 259 | - size_t bytes) |
23 | + luring_detach_aio_context(ctx->linux_io_uring, ctx); | 260 | +void iov_discard_undo(IOVDiscardUndo *undo) |
24 | + luring_cleanup(ctx->linux_io_uring); | 261 | +{ |
25 | + ctx->linux_io_uring = NULL; | 262 | + /* Restore original iovec if it was modified */ |
263 | + if (undo->modified_iov) { | ||
264 | + *undo->modified_iov = undo->orig; | ||
26 | + } | 265 | + } |
27 | +#endif | 266 | +} |
28 | + | 267 | + |
29 | assert(QSLIST_EMPTY(&ctx->scheduled_coroutines)); | 268 | +size_t iov_discard_front_undoable(struct iovec **iov, |
30 | qemu_bh_delete(ctx->co_schedule_bh); | 269 | + unsigned int *iov_cnt, |
31 | 270 | + size_t bytes, | |
32 | @@ -XXX,XX +XXX,XX @@ LinuxAioState *aio_get_linux_aio(AioContext *ctx) | 271 | + IOVDiscardUndo *undo) |
33 | } | 272 | { |
34 | #endif | 273 | size_t total = 0; |
35 | 274 | struct iovec *cur; | |
36 | +#ifdef CONFIG_LINUX_IO_URING | 275 | |
37 | +LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp) | 276 | + if (undo) { |
38 | +{ | 277 | + undo->modified_iov = NULL; |
39 | + if (ctx->linux_io_uring) { | ||
40 | + return ctx->linux_io_uring; | ||
41 | + } | 278 | + } |
42 | + | 279 | + |
43 | + ctx->linux_io_uring = luring_init(errp); | 280 | for (cur = *iov; *iov_cnt > 0; cur++) { |
44 | + if (!ctx->linux_io_uring) { | 281 | if (cur->iov_len > bytes) { |
45 | + return NULL; | 282 | + if (undo) { |
283 | + undo->modified_iov = cur; | ||
284 | + undo->orig = *cur; | ||
285 | + } | ||
286 | + | ||
287 | cur->iov_base += bytes; | ||
288 | cur->iov_len -= bytes; | ||
289 | total += bytes; | ||
290 | @@ -XXX,XX +XXX,XX @@ size_t iov_discard_front(struct iovec **iov, unsigned int *iov_cnt, | ||
291 | return total; | ||
292 | } | ||
293 | |||
294 | -size_t iov_discard_back(struct iovec *iov, unsigned int *iov_cnt, | ||
295 | - size_t bytes) | ||
296 | +size_t iov_discard_front(struct iovec **iov, unsigned int *iov_cnt, | ||
297 | + size_t bytes) | ||
298 | +{ | ||
299 | + return iov_discard_front_undoable(iov, iov_cnt, bytes, NULL); | ||
300 | +} | ||
301 | + | ||
302 | +size_t iov_discard_back_undoable(struct iovec *iov, | ||
303 | + unsigned int *iov_cnt, | ||
304 | + size_t bytes, | ||
305 | + IOVDiscardUndo *undo) | ||
306 | { | ||
307 | size_t total = 0; | ||
308 | struct iovec *cur; | ||
309 | |||
310 | + if (undo) { | ||
311 | + undo->modified_iov = NULL; | ||
46 | + } | 312 | + } |
47 | + | 313 | + |
48 | + luring_attach_aio_context(ctx->linux_io_uring, ctx); | 314 | if (*iov_cnt == 0) { |
49 | + return ctx->linux_io_uring; | 315 | return 0; |
50 | +} | 316 | } |
51 | + | 317 | @@ -XXX,XX +XXX,XX @@ size_t iov_discard_back(struct iovec *iov, unsigned int *iov_cnt, |
52 | +LuringState *aio_get_linux_io_uring(AioContext *ctx) | 318 | |
53 | +{ | 319 | while (*iov_cnt > 0) { |
54 | + assert(ctx->linux_io_uring); | 320 | if (cur->iov_len > bytes) { |
55 | + return ctx->linux_io_uring; | 321 | + if (undo) { |
56 | +} | 322 | + undo->modified_iov = cur; |
57 | +#endif | 323 | + undo->orig = *cur; |
58 | + | 324 | + } |
59 | void aio_notify(AioContext *ctx) | 325 | + |
60 | { | 326 | cur->iov_len -= bytes; |
61 | /* Write e.g. bh->scheduled before reading ctx->notify_me. Pairs | 327 | total += bytes; |
62 | @@ -XXX,XX +XXX,XX @@ AioContext *aio_context_new(Error **errp) | 328 | break; |
63 | #ifdef CONFIG_LINUX_AIO | 329 | @@ -XXX,XX +XXX,XX @@ size_t iov_discard_back(struct iovec *iov, unsigned int *iov_cnt, |
64 | ctx->linux_aio = NULL; | 330 | return total; |
65 | #endif | 331 | } |
66 | + | 332 | |
67 | +#ifdef CONFIG_LINUX_IO_URING | 333 | +size_t iov_discard_back(struct iovec *iov, unsigned int *iov_cnt, |
68 | + ctx->linux_io_uring = NULL; | 334 | + size_t bytes) |
69 | +#endif | 335 | +{ |
70 | + | 336 | + return iov_discard_back_undoable(iov, iov_cnt, bytes, NULL); |
71 | ctx->thread_pool = NULL; | 337 | +} |
72 | qemu_rec_mutex_init(&ctx->lock); | 338 | + |
73 | timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx); | 339 | void qemu_iovec_discard_back(QEMUIOVector *qiov, size_t bytes) |
340 | { | ||
341 | size_t total; | ||
74 | -- | 342 | -- |
75 | 2.24.1 | 343 | 2.26.2 |
76 | 344 | ||
77 | diff view generated by jsdifflib |
1 | From: Paolo Bonzini <pbonzini@redhat.com> | 1 | Fuzzing discovered that virtqueue_unmap_sg() is being called on modified |
---|---|---|---|
2 | req->in/out_sg iovecs. This means dma_memory_map() and | ||
3 | dma_memory_unmap() calls do not have matching memory addresses. | ||
2 | 4 | ||
3 | Marking without waiting would not result in actual serialising behavior. | 5 | Fuzzing discovered that non-RAM addresses trigger a bug: |
4 | Thus, make a call bdrv_mark_request_serialising sufficient for | ||
5 | serialisation to happen. | ||
6 | 6 | ||
7 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | 7 | void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, |
8 | Message-id: 1578495356-46219-3-git-send-email-pbonzini@redhat.com | 8 | bool is_write, hwaddr access_len) |
9 | Message-Id: <1578495356-46219-3-git-send-email-pbonzini@redhat.com> | 9 | { |
10 | if (buffer != bounce.buffer) { | ||
11 | ^^^^^^^^^^^^^^^^^^^^^^^ | ||
12 | |||
13 | A modified iov->iov_base is no longer recognized as a bounce buffer and | ||
14 | the wrong branch is taken. | ||
15 | |||
16 | There are more potential bugs: dirty memory is not tracked correctly and | ||
17 | MemoryRegion refcounts can be leaked. | ||
18 | |||
19 | Use the new iov_discard_undo() API to restore elem->in/out_sg before | ||
20 | virtqueue_push() is called. | ||
21 | |||
22 | Fixes: 827805a2492c1bbf1c0712ed18ee069b4ebf3dd6 ("virtio-blk: Convert VirtIOBlockReq.out to structrue") | ||
23 | Reported-by: Alexander Bulekov <alxndr@bu.edu> | ||
10 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 24 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
25 | Reviewed-by: Li Qiang <liq3ea@gmail.com> | ||
26 | Buglink: https://bugs.launchpad.net/qemu/+bug/1890360 | ||
27 | Message-Id: <20200917094455.822379-3-stefanha@redhat.com> | ||
11 | --- | 28 | --- |
12 | block/file-posix.c | 1 - | 29 | include/hw/virtio/virtio-blk.h | 2 ++ |
13 | block/io.c | 40 +++++++++++++++++---------------------- | 30 | hw/block/virtio-blk.c | 11 +++++++++-- |
14 | include/block/block_int.h | 3 +-- | 31 | 2 files changed, 11 insertions(+), 2 deletions(-) |
15 | 3 files changed, 18 insertions(+), 26 deletions(-) | ||
16 | 32 | ||
17 | diff --git a/block/file-posix.c b/block/file-posix.c | 33 | diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h |
18 | index XXXXXXX..XXXXXXX 100644 | 34 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/block/file-posix.c | 35 | --- a/include/hw/virtio/virtio-blk.h |
20 | +++ b/block/file-posix.c | 36 | +++ b/include/hw/virtio/virtio-blk.h |
21 | @@ -XXX,XX +XXX,XX @@ raw_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int bytes, | 37 | @@ -XXX,XX +XXX,XX @@ typedef struct VirtIOBlockReq { |
22 | req->overlap_bytes = req->bytes; | 38 | int64_t sector_num; |
23 | 39 | VirtIOBlock *dev; | |
24 | bdrv_mark_request_serialising(req, bs->bl.request_alignment); | 40 | VirtQueue *vq; |
25 | - bdrv_wait_serialising_requests(req); | 41 | + IOVDiscardUndo inhdr_undo; |
42 | + IOVDiscardUndo outhdr_undo; | ||
43 | struct virtio_blk_inhdr *in; | ||
44 | struct virtio_blk_outhdr out; | ||
45 | QEMUIOVector qiov; | ||
46 | diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c | ||
47 | index XXXXXXX..XXXXXXX 100644 | ||
48 | --- a/hw/block/virtio-blk.c | ||
49 | +++ b/hw/block/virtio-blk.c | ||
50 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status) | ||
51 | trace_virtio_blk_req_complete(vdev, req, status); | ||
52 | |||
53 | stb_p(&req->in->status, status); | ||
54 | + iov_discard_undo(&req->inhdr_undo); | ||
55 | + iov_discard_undo(&req->outhdr_undo); | ||
56 | virtqueue_push(req->vq, &req->elem, req->in_len); | ||
57 | if (s->dataplane_started && !s->dataplane_disabled) { | ||
58 | virtio_blk_data_plane_notify(s->dataplane, req->vq); | ||
59 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) | ||
60 | return -1; | ||
26 | } | 61 | } |
27 | #endif | 62 | |
28 | 63 | - iov_discard_front(&out_iov, &out_num, sizeof(req->out)); | |
29 | diff --git a/block/io.c b/block/io.c | 64 | + iov_discard_front_undoable(&out_iov, &out_num, sizeof(req->out), |
30 | index XXXXXXX..XXXXXXX 100644 | 65 | + &req->outhdr_undo); |
31 | --- a/block/io.c | 66 | |
32 | +++ b/block/io.c | 67 | if (in_iov[in_num - 1].iov_len < sizeof(struct virtio_blk_inhdr)) { |
33 | @@ -XXX,XX +XXX,XX @@ | 68 | virtio_error(vdev, "virtio-blk request inhdr too short"); |
34 | #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS) | 69 | + iov_discard_undo(&req->outhdr_undo); |
35 | 70 | return -1; | |
36 | static void bdrv_parent_cb_resize(BlockDriverState *bs); | ||
37 | +static bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self); | ||
38 | static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, | ||
39 | int64_t offset, int bytes, BdrvRequestFlags flags); | ||
40 | |||
41 | @@ -XXX,XX +XXX,XX @@ static void tracked_request_begin(BdrvTrackedRequest *req, | ||
42 | qemu_co_mutex_unlock(&bs->reqs_lock); | ||
43 | } | ||
44 | |||
45 | -void bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) | ||
46 | +bool bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) | ||
47 | { | ||
48 | int64_t overlap_offset = req->offset & ~(align - 1); | ||
49 | uint64_t overlap_bytes = ROUND_UP(req->offset + req->bytes, align) | ||
50 | @@ -XXX,XX +XXX,XX @@ void bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) | ||
51 | |||
52 | req->overlap_offset = MIN(req->overlap_offset, overlap_offset); | ||
53 | req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes); | ||
54 | -} | ||
55 | - | ||
56 | -static bool is_request_serialising_and_aligned(BdrvTrackedRequest *req) | ||
57 | -{ | ||
58 | - /* | ||
59 | - * If the request is serialising, overlap_offset and overlap_bytes are set, | ||
60 | - * so we can check if the request is aligned. Otherwise, don't care and | ||
61 | - * return false. | ||
62 | - */ | ||
63 | - | ||
64 | - return req->serialising && (req->offset == req->overlap_offset) && | ||
65 | - (req->bytes == req->overlap_bytes); | ||
66 | + return bdrv_wait_serialising_requests(req); | ||
67 | } | ||
68 | |||
69 | /** | ||
70 | @@ -XXX,XX +XXX,XX @@ void bdrv_dec_in_flight(BlockDriverState *bs) | ||
71 | bdrv_wakeup(bs); | ||
72 | } | ||
73 | |||
74 | -bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self) | ||
75 | +static bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self) | ||
76 | { | ||
77 | BlockDriverState *bs = self->bs; | ||
78 | BdrvTrackedRequest *req; | ||
79 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child, | ||
80 | * it ensures that the CoR read and write operations are atomic and | ||
81 | * guest writes cannot interleave between them. */ | ||
82 | bdrv_mark_request_serialising(req, bdrv_get_cluster_size(bs)); | ||
83 | + } else { | ||
84 | + bdrv_wait_serialising_requests(req); | ||
85 | } | 71 | } |
86 | 72 | ||
87 | - bdrv_wait_serialising_requests(req); | 73 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) |
88 | - | 74 | req->in = (void *)in_iov[in_num - 1].iov_base |
89 | if (flags & BDRV_REQ_COPY_ON_READ) { | 75 | + in_iov[in_num - 1].iov_len |
90 | int64_t pnum; | 76 | - sizeof(struct virtio_blk_inhdr); |
91 | 77 | - iov_discard_back(in_iov, &in_num, sizeof(struct virtio_blk_inhdr)); | |
92 | @@ -XXX,XX +XXX,XX @@ bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, uint64_t bytes, | 78 | + iov_discard_back_undoable(in_iov, &in_num, sizeof(struct virtio_blk_inhdr), |
93 | assert(!(flags & ~BDRV_REQ_MASK)); | 79 | + &req->inhdr_undo); |
94 | 80 | ||
95 | if (flags & BDRV_REQ_SERIALISING) { | 81 | type = virtio_ldl_p(vdev, &req->out.type); |
96 | - bdrv_mark_request_serialising(req, bdrv_get_cluster_size(bs)); | 82 | |
97 | + waited = bdrv_mark_request_serialising(req, bdrv_get_cluster_size(bs)); | 83 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) |
98 | + /* | 84 | |
99 | + * For a misaligned request we should have already waited earlier, | 85 | if (unlikely(iov_to_buf(out_iov, out_num, 0, &dwz_hdr, |
100 | + * because we come after bdrv_padding_rmw_read which must be called | 86 | sizeof(dwz_hdr)) != sizeof(dwz_hdr))) { |
101 | + * with the request already marked as serialising. | 87 | + iov_discard_undo(&req->inhdr_undo); |
102 | + */ | 88 | + iov_discard_undo(&req->outhdr_undo); |
103 | + assert(!waited || | 89 | virtio_error(vdev, "virtio-blk discard/write_zeroes header" |
104 | + (req->offset == req->overlap_offset && | 90 | " too short"); |
105 | + req->bytes == req->overlap_bytes)); | 91 | return -1; |
106 | + } else { | ||
107 | + bdrv_wait_serialising_requests(req); | ||
108 | } | ||
109 | |||
110 | - waited = bdrv_wait_serialising_requests(req); | ||
111 | - | ||
112 | - assert(!waited || !req->serialising || | ||
113 | - is_request_serialising_and_aligned(req)); | ||
114 | assert(req->overlap_offset <= offset); | ||
115 | assert(offset + bytes <= req->overlap_offset + req->overlap_bytes); | ||
116 | assert(end_sector <= bs->total_sectors || child->perm & BLK_PERM_RESIZE); | ||
117 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child, | ||
118 | padding = bdrv_init_padding(bs, offset, bytes, &pad); | ||
119 | if (padding) { | ||
120 | bdrv_mark_request_serialising(req, align); | ||
121 | - bdrv_wait_serialising_requests(req); | ||
122 | |||
123 | bdrv_padding_rmw_read(child, req, &pad, true); | ||
124 | |||
125 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child, | ||
126 | |||
127 | if (bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad)) { | ||
128 | bdrv_mark_request_serialising(&req, align); | ||
129 | - bdrv_wait_serialising_requests(&req); | ||
130 | bdrv_padding_rmw_read(child, &req, &pad, false); | ||
131 | } | ||
132 | |||
133 | diff --git a/include/block/block_int.h b/include/block/block_int.h | ||
134 | index XXXXXXX..XXXXXXX 100644 | ||
135 | --- a/include/block/block_int.h | ||
136 | +++ b/include/block/block_int.h | ||
137 | @@ -XXX,XX +XXX,XX @@ extern unsigned int bdrv_drain_all_count; | ||
138 | void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent); | ||
139 | void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent); | ||
140 | |||
141 | -bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self); | ||
142 | -void bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align); | ||
143 | +bool coroutine_fn bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align); | ||
144 | BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs); | ||
145 | |||
146 | int get_tmp_filename(char *filename, int size); | ||
147 | -- | 92 | -- |
148 | 2.24.1 | 93 | 2.26.2 |
149 | 94 | ||
150 | diff view generated by jsdifflib |
1 | From: Aarushi Mehta <mehta.aaru20@gmail.com> | 1 | A number of iov_discard_front/back() operations are made by |
---|---|---|---|
2 | virtio-crypto. The elem->in/out_sg iovec arrays are modified by these | ||
3 | operations, resulting virtqueue_unmap_sg() calls on different addresses | ||
4 | than were originally mapped. | ||
2 | 5 | ||
3 | Signed-off-by: Aarushi Mehta <mehta.aaru20@gmail.com> | 6 | This is problematic because dirty memory may not be logged correctly, |
4 | Reviewed-by: Maxim Levitsky <maximlevitsky@gmail.com> | 7 | MemoryRegion refcounts may be leaked, and the non-RAM bounce buffer can |
5 | Acked-by: Stefano Garzarella <sgarzare@redhat.com> | 8 | be leaked. |
9 | |||
10 | Take a copy of the elem->in/out_sg arrays so that the originals are | ||
11 | preserved. The iov_discard_undo() API could be used instead (with better | ||
12 | performance) but requires careful auditing of the code, so do the simple | ||
13 | thing instead. | ||
14 | |||
6 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 15 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
7 | Message-id: 20200120141858.587874-9-stefanha@redhat.com | 16 | Reviewed-by: Li Qiang <liq3ea@gmail.com> |
8 | Message-Id: <20200120141858.587874-9-stefanha@redhat.com> | 17 | Message-Id: <20200917094455.822379-4-stefanha@redhat.com> |
9 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
10 | --- | 18 | --- |
11 | block/file-posix.c | 98 +++++++++++++++++++++++++++++++++++++--------- | 19 | hw/virtio/virtio-crypto.c | 17 ++++++++++++++--- |
12 | 1 file changed, 79 insertions(+), 19 deletions(-) | 20 | 1 file changed, 14 insertions(+), 3 deletions(-) |
13 | 21 | ||
14 | diff --git a/block/file-posix.c b/block/file-posix.c | 22 | diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c |
15 | index XXXXXXX..XXXXXXX 100644 | 23 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/block/file-posix.c | 24 | --- a/hw/virtio/virtio-crypto.c |
17 | +++ b/block/file-posix.c | 25 | +++ b/hw/virtio/virtio-crypto.c |
18 | @@ -XXX,XX +XXX,XX @@ typedef struct BDRVRawState { | 26 | @@ -XXX,XX +XXX,XX @@ static void virtio_crypto_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) |
19 | bool has_write_zeroes:1; | 27 | size_t s; |
20 | bool discard_zeroes:1; | 28 | |
21 | bool use_linux_aio:1; | 29 | for (;;) { |
22 | + bool use_linux_io_uring:1; | 30 | + g_autofree struct iovec *out_iov_copy = NULL; |
23 | bool page_cache_inconsistent:1; | 31 | + |
24 | bool has_fallocate; | 32 | elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); |
25 | bool needs_alignment; | 33 | if (!elem) { |
26 | @@ -XXX,XX +XXX,XX @@ static QemuOptsList raw_runtime_opts = { | 34 | break; |
27 | { | 35 | @@ -XXX,XX +XXX,XX @@ static void virtio_crypto_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) |
28 | .name = "aio", | 36 | } |
29 | .type = QEMU_OPT_STRING, | 37 | |
30 | - .help = "host AIO implementation (threads, native)", | 38 | out_num = elem->out_num; |
31 | + .help = "host AIO implementation (threads, native, io_uring)", | 39 | - out_iov = elem->out_sg; |
32 | }, | 40 | + out_iov_copy = g_memdup(elem->out_sg, sizeof(out_iov[0]) * out_num); |
33 | { | 41 | + out_iov = out_iov_copy; |
34 | .name = "locking", | 42 | + |
35 | @@ -XXX,XX +XXX,XX @@ static int raw_open_common(BlockDriverState *bs, QDict *options, | 43 | in_num = elem->in_num; |
36 | goto fail; | 44 | in_iov = elem->in_sg; |
45 | + | ||
46 | if (unlikely(iov_to_buf(out_iov, out_num, 0, &ctrl, sizeof(ctrl)) | ||
47 | != sizeof(ctrl))) { | ||
48 | virtio_error(vdev, "virtio-crypto request ctrl_hdr too short"); | ||
49 | @@ -XXX,XX +XXX,XX @@ virtio_crypto_handle_request(VirtIOCryptoReq *request) | ||
50 | int queue_index = virtio_crypto_vq2q(virtio_get_queue_index(request->vq)); | ||
51 | struct virtio_crypto_op_data_req req; | ||
52 | int ret; | ||
53 | + g_autofree struct iovec *in_iov_copy = NULL; | ||
54 | + g_autofree struct iovec *out_iov_copy = NULL; | ||
55 | struct iovec *in_iov; | ||
56 | struct iovec *out_iov; | ||
57 | unsigned in_num; | ||
58 | @@ -XXX,XX +XXX,XX @@ virtio_crypto_handle_request(VirtIOCryptoReq *request) | ||
37 | } | 59 | } |
38 | 60 | ||
39 | - aio_default = (bdrv_flags & BDRV_O_NATIVE_AIO) | 61 | out_num = elem->out_num; |
40 | - ? BLOCKDEV_AIO_OPTIONS_NATIVE | 62 | - out_iov = elem->out_sg; |
41 | - : BLOCKDEV_AIO_OPTIONS_THREADS; | 63 | + out_iov_copy = g_memdup(elem->out_sg, sizeof(out_iov[0]) * out_num); |
42 | + if (bdrv_flags & BDRV_O_NATIVE_AIO) { | 64 | + out_iov = out_iov_copy; |
43 | + aio_default = BLOCKDEV_AIO_OPTIONS_NATIVE; | ||
44 | +#ifdef CONFIG_LINUX_IO_URING | ||
45 | + } else if (bdrv_flags & BDRV_O_IO_URING) { | ||
46 | + aio_default = BLOCKDEV_AIO_OPTIONS_IO_URING; | ||
47 | +#endif | ||
48 | + } else { | ||
49 | + aio_default = BLOCKDEV_AIO_OPTIONS_THREADS; | ||
50 | + } | ||
51 | + | 65 | + |
52 | aio = qapi_enum_parse(&BlockdevAioOptions_lookup, | 66 | in_num = elem->in_num; |
53 | qemu_opt_get(opts, "aio"), | 67 | - in_iov = elem->in_sg; |
54 | aio_default, &local_err); | 68 | + in_iov_copy = g_memdup(elem->in_sg, sizeof(in_iov[0]) * in_num); |
55 | @@ -XXX,XX +XXX,XX @@ static int raw_open_common(BlockDriverState *bs, QDict *options, | 69 | + in_iov = in_iov_copy; |
56 | ret = -EINVAL; | ||
57 | goto fail; | ||
58 | } | ||
59 | + | 70 | + |
60 | s->use_linux_aio = (aio == BLOCKDEV_AIO_OPTIONS_NATIVE); | 71 | if (unlikely(iov_to_buf(out_iov, out_num, 0, &req, sizeof(req)) |
61 | +#ifdef CONFIG_LINUX_IO_URING | 72 | != sizeof(req))) { |
62 | + s->use_linux_io_uring = (aio == BLOCKDEV_AIO_OPTIONS_IO_URING); | 73 | virtio_error(vdev, "virtio-crypto request outhdr too short"); |
63 | +#endif | ||
64 | |||
65 | locking = qapi_enum_parse(&OnOffAuto_lookup, | ||
66 | qemu_opt_get(opts, "locking"), | ||
67 | @@ -XXX,XX +XXX,XX @@ static int raw_open_common(BlockDriverState *bs, QDict *options, | ||
68 | } | ||
69 | #endif /* !defined(CONFIG_LINUX_AIO) */ | ||
70 | |||
71 | +#ifdef CONFIG_LINUX_IO_URING | ||
72 | + if (s->use_linux_io_uring) { | ||
73 | + if (!aio_setup_linux_io_uring(bdrv_get_aio_context(bs), errp)) { | ||
74 | + error_prepend(errp, "Unable to use io_uring: "); | ||
75 | + goto fail; | ||
76 | + } | ||
77 | + } | ||
78 | +#else | ||
79 | + if (s->use_linux_io_uring) { | ||
80 | + error_setg(errp, "aio=io_uring was specified, but is not supported " | ||
81 | + "in this build."); | ||
82 | + ret = -EINVAL; | ||
83 | + goto fail; | ||
84 | + } | ||
85 | +#endif /* !defined(CONFIG_LINUX_IO_URING) */ | ||
86 | + | ||
87 | s->has_discard = true; | ||
88 | s->has_write_zeroes = true; | ||
89 | if ((bs->open_flags & BDRV_O_NOCACHE) != 0) { | ||
90 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset, | ||
91 | return -EIO; | ||
92 | |||
93 | /* | ||
94 | - * Check if the underlying device requires requests to be aligned, | ||
95 | - * and if the request we are trying to submit is aligned or not. | ||
96 | - * If this is the case tell the low-level driver that it needs | ||
97 | - * to copy the buffer. | ||
98 | + * When using O_DIRECT, the request must be aligned to be able to use | ||
99 | + * either libaio or io_uring interface. If not fail back to regular thread | ||
100 | + * pool read/write code which emulates this for us if we | ||
101 | + * set QEMU_AIO_MISALIGNED. | ||
102 | */ | ||
103 | - if (s->needs_alignment) { | ||
104 | - if (!bdrv_qiov_is_aligned(bs, qiov)) { | ||
105 | - type |= QEMU_AIO_MISALIGNED; | ||
106 | + if (s->needs_alignment && !bdrv_qiov_is_aligned(bs, qiov)) { | ||
107 | + type |= QEMU_AIO_MISALIGNED; | ||
108 | +#ifdef CONFIG_LINUX_IO_URING | ||
109 | + } else if (s->use_linux_io_uring) { | ||
110 | + LuringState *aio = aio_get_linux_io_uring(bdrv_get_aio_context(bs)); | ||
111 | + assert(qiov->size == bytes); | ||
112 | + return luring_co_submit(bs, aio, s->fd, offset, qiov, type); | ||
113 | +#endif | ||
114 | #ifdef CONFIG_LINUX_AIO | ||
115 | - } else if (s->use_linux_aio) { | ||
116 | - LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs)); | ||
117 | - assert(qiov->size == bytes); | ||
118 | - return laio_co_submit(bs, aio, s->fd, offset, qiov, type); | ||
119 | + } else if (s->use_linux_aio) { | ||
120 | + LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs)); | ||
121 | + assert(qiov->size == bytes); | ||
122 | + return laio_co_submit(bs, aio, s->fd, offset, qiov, type); | ||
123 | #endif | ||
124 | - } | ||
125 | } | ||
126 | |||
127 | acb = (RawPosixAIOData) { | ||
128 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, uint64_t offset, | ||
129 | |||
130 | static void raw_aio_plug(BlockDriverState *bs) | ||
131 | { | ||
132 | + BDRVRawState __attribute__((unused)) *s = bs->opaque; | ||
133 | #ifdef CONFIG_LINUX_AIO | ||
134 | - BDRVRawState *s = bs->opaque; | ||
135 | if (s->use_linux_aio) { | ||
136 | LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs)); | ||
137 | laio_io_plug(bs, aio); | ||
138 | } | ||
139 | #endif | ||
140 | +#ifdef CONFIG_LINUX_IO_URING | ||
141 | + if (s->use_linux_io_uring) { | ||
142 | + LuringState *aio = aio_get_linux_io_uring(bdrv_get_aio_context(bs)); | ||
143 | + luring_io_plug(bs, aio); | ||
144 | + } | ||
145 | +#endif | ||
146 | } | ||
147 | |||
148 | static void raw_aio_unplug(BlockDriverState *bs) | ||
149 | { | ||
150 | + BDRVRawState __attribute__((unused)) *s = bs->opaque; | ||
151 | #ifdef CONFIG_LINUX_AIO | ||
152 | - BDRVRawState *s = bs->opaque; | ||
153 | if (s->use_linux_aio) { | ||
154 | LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs)); | ||
155 | laio_io_unplug(bs, aio); | ||
156 | } | ||
157 | #endif | ||
158 | +#ifdef CONFIG_LINUX_IO_URING | ||
159 | + if (s->use_linux_io_uring) { | ||
160 | + LuringState *aio = aio_get_linux_io_uring(bdrv_get_aio_context(bs)); | ||
161 | + luring_io_unplug(bs, aio); | ||
162 | + } | ||
163 | +#endif | ||
164 | } | ||
165 | |||
166 | static int raw_co_flush_to_disk(BlockDriverState *bs) | ||
167 | @@ -XXX,XX +XXX,XX @@ static int raw_co_flush_to_disk(BlockDriverState *bs) | ||
168 | .aio_type = QEMU_AIO_FLUSH, | ||
169 | }; | ||
170 | |||
171 | +#ifdef CONFIG_LINUX_IO_URING | ||
172 | + if (s->use_linux_io_uring) { | ||
173 | + LuringState *aio = aio_get_linux_io_uring(bdrv_get_aio_context(bs)); | ||
174 | + return luring_co_submit(bs, aio, s->fd, 0, NULL, QEMU_AIO_FLUSH); | ||
175 | + } | ||
176 | +#endif | ||
177 | return raw_thread_pool_submit(bs, handle_aiocb_flush, &acb); | ||
178 | } | ||
179 | |||
180 | static void raw_aio_attach_aio_context(BlockDriverState *bs, | ||
181 | AioContext *new_context) | ||
182 | { | ||
183 | + BDRVRawState __attribute__((unused)) *s = bs->opaque; | ||
184 | #ifdef CONFIG_LINUX_AIO | ||
185 | - BDRVRawState *s = bs->opaque; | ||
186 | if (s->use_linux_aio) { | ||
187 | Error *local_err = NULL; | ||
188 | if (!aio_setup_linux_aio(new_context, &local_err)) { | ||
189 | @@ -XXX,XX +XXX,XX @@ static void raw_aio_attach_aio_context(BlockDriverState *bs, | ||
190 | } | ||
191 | } | ||
192 | #endif | ||
193 | +#ifdef CONFIG_LINUX_IO_URING | ||
194 | + if (s->use_linux_io_uring) { | ||
195 | + Error *local_err; | ||
196 | + if (!aio_setup_linux_io_uring(new_context, &local_err)) { | ||
197 | + error_reportf_err(local_err, "Unable to use linux io_uring, " | ||
198 | + "falling back to thread pool: "); | ||
199 | + s->use_linux_io_uring = false; | ||
200 | + } | ||
201 | + } | ||
202 | +#endif | ||
203 | } | ||
204 | |||
205 | static void raw_close(BlockDriverState *bs) | ||
206 | -- | 74 | -- |
207 | 2.24.1 | 75 | 2.26.2 |
208 | 76 | ||
209 | diff view generated by jsdifflib |
1 | From: Aarushi Mehta <mehta.aaru20@gmail.com> | 1 | The sentence explaining the deprecation schedule is ambiguous. Make it |
---|---|---|---|
2 | clear that a feature deprecated in the Nth release is guaranteed to | ||
3 | remain available in the N+1th release. Removal can occur in the N+2nd | ||
4 | release or later. | ||
2 | 5 | ||
3 | Signed-off-by: Aarushi Mehta <mehta.aaru20@gmail.com> | 6 | As an example of this in action, see commit |
4 | Acked-by: Stefano Garzarella <sgarzare@redhat.com> | 7 | 25956af3fe5dd0385ad8017bc768a6afe41e2a74 ("block: Finish deprecation of |
8 | 'qemu-img convert -n -o'"). The feature was deprecated in QEMU 4.2.0. It | ||
9 | was present in the 5.0.0 release and removed in the 5.1.0 release. | ||
10 | |||
11 | Suggested-by: Daniel P. Berrangé <berrange@redhat.com> | ||
5 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 12 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
6 | Message-id: 20200120141858.587874-16-stefanha@redhat.com | 13 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> |
7 | Message-Id: <20200120141858.587874-16-stefanha@redhat.com> | 14 | Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> |
8 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 15 | Message-Id: <20200915150734.711426-1-stefanha@redhat.com> |
9 | --- | 16 | --- |
10 | tests/qemu-iotests/028 | 2 +- | 17 | docs/system/deprecated.rst | 9 +++++---- |
11 | tests/qemu-iotests/058 | 2 +- | 18 | 1 file changed, 5 insertions(+), 4 deletions(-) |
12 | tests/qemu-iotests/089 | 4 ++-- | ||
13 | tests/qemu-iotests/091 | 4 ++-- | ||
14 | tests/qemu-iotests/109 | 2 +- | ||
15 | tests/qemu-iotests/147 | 5 +++-- | ||
16 | tests/qemu-iotests/181 | 8 ++++---- | ||
17 | tests/qemu-iotests/183 | 4 ++-- | ||
18 | tests/qemu-iotests/185 | 10 +++++----- | ||
19 | tests/qemu-iotests/200 | 2 +- | ||
20 | tests/qemu-iotests/201 | 8 ++++---- | ||
21 | 11 files changed, 26 insertions(+), 25 deletions(-) | ||
22 | 19 | ||
23 | diff --git a/tests/qemu-iotests/028 b/tests/qemu-iotests/028 | 20 | diff --git a/docs/system/deprecated.rst b/docs/system/deprecated.rst |
24 | index XXXXXXX..XXXXXXX 100755 | 21 | index XXXXXXX..XXXXXXX 100644 |
25 | --- a/tests/qemu-iotests/028 | 22 | --- a/docs/system/deprecated.rst |
26 | +++ b/tests/qemu-iotests/028 | 23 | +++ b/docs/system/deprecated.rst |
27 | @@ -XXX,XX +XXX,XX @@ echo block-backup | 24 | @@ -XXX,XX +XXX,XX @@ Deprecated features |
28 | echo | 25 | |
29 | 26 | In general features are intended to be supported indefinitely once | |
30 | qemu_comm_method="monitor" | 27 | introduced into QEMU. In the event that a feature needs to be removed, |
31 | -_launch_qemu -drive file="${TEST_IMG}",cache=${CACHEMODE},id=disk | 28 | -it will be listed in this section. The feature will remain functional |
32 | +_launch_qemu -drive file="${TEST_IMG}",cache=${CACHEMODE},aio=${AIOMODE},id=disk | 29 | -for 2 releases prior to actual removal. Deprecated features may also |
33 | h=$QEMU_HANDLE | 30 | -generate warnings on the console when QEMU starts up, or if activated |
34 | if [ "${VALGRIND_QEMU}" == "y" ]; then | 31 | -via a monitor command, however, this is not a mandatory requirement. |
35 | QEMU_COMM_TIMEOUT=7 | 32 | +it will be listed in this section. The feature will remain functional for the |
36 | diff --git a/tests/qemu-iotests/058 b/tests/qemu-iotests/058 | 33 | +release in which it was deprecated and one further release. After these two |
37 | index XXXXXXX..XXXXXXX 100755 | 34 | +releases, the feature is liable to be removed. Deprecated features may also |
38 | --- a/tests/qemu-iotests/058 | 35 | +generate warnings on the console when QEMU starts up, or if activated via a |
39 | +++ b/tests/qemu-iotests/058 | 36 | +monitor command, however, this is not a mandatory requirement. |
40 | @@ -XXX,XX +XXX,XX @@ nbd_snapshot_img="nbd:unix:$nbd_unix_socket" | 37 | |
41 | converted_image=$TEST_IMG.converted | 38 | Prior to the 2.10.0 release there was no official policy on how |
42 | 39 | long features would be deprecated prior to their removal, nor | |
43 | # Use -f raw instead of -f $IMGFMT for the NBD connection | ||
44 | -QEMU_IO_NBD="$QEMU_IO -f raw --cache=$CACHEMODE" | ||
45 | +QEMU_IO_NBD="$QEMU_IO -f raw --cache=$CACHEMODE --aio=$AIOMODE" | ||
46 | |||
47 | echo | ||
48 | echo "== preparing image ==" | ||
49 | diff --git a/tests/qemu-iotests/089 b/tests/qemu-iotests/089 | ||
50 | index XXXXXXX..XXXXXXX 100755 | ||
51 | --- a/tests/qemu-iotests/089 | ||
52 | +++ b/tests/qemu-iotests/089 | ||
53 | @@ -XXX,XX +XXX,XX @@ $QEMU_IO -c 'write -P 42 0 512' -c 'write -P 23 512 512' \ | ||
54 | |||
55 | $QEMU_IMG convert -f raw -O $IMGFMT "$TEST_IMG.base" "$TEST_IMG" | ||
56 | |||
57 | -$QEMU_IO_PROG --cache $CACHEMODE \ | ||
58 | +$QEMU_IO_PROG --cache $CACHEMODE --aio $AIOMODE \ | ||
59 | -c 'read -P 42 0 512' -c 'read -P 23 512 512' \ | ||
60 | -c 'read -P 66 1024 512' "json:{ | ||
61 | \"driver\": \"$IMGFMT\", | ||
62 | @@ -XXX,XX +XXX,XX @@ $QEMU_IO -c 'write -P 42 0x38000 512' "$TEST_IMG" | _filter_qemu_io | ||
63 | |||
64 | # The "image.filename" part tests whether "a": { "b": "c" } and "a.b": "c" do | ||
65 | # the same (which they should). | ||
66 | -$QEMU_IO_PROG --cache $CACHEMODE \ | ||
67 | +$QEMU_IO_PROG --cache $CACHEMODE --aio $AIOMODE \ | ||
68 | -c 'read -P 42 0x38000 512' "json:{ | ||
69 | \"driver\": \"$IMGFMT\", | ||
70 | \"file\": { | ||
71 | diff --git a/tests/qemu-iotests/091 b/tests/qemu-iotests/091 | ||
72 | index XXXXXXX..XXXXXXX 100755 | ||
73 | --- a/tests/qemu-iotests/091 | ||
74 | +++ b/tests/qemu-iotests/091 | ||
75 | @@ -XXX,XX +XXX,XX @@ echo === Starting QEMU VM1 === | ||
76 | echo | ||
77 | |||
78 | qemu_comm_method="monitor" | ||
79 | -_launch_qemu -drive file="${TEST_IMG}",cache=${CACHEMODE},id=disk | ||
80 | +_launch_qemu -drive file="${TEST_IMG}",cache=${CACHEMODE},aio=${AIOMODE},id=disk | ||
81 | h1=$QEMU_HANDLE | ||
82 | |||
83 | echo | ||
84 | echo === Starting QEMU VM2 === | ||
85 | echo | ||
86 | -_launch_qemu -drive file="${TEST_IMG}",cache=${CACHEMODE},id=disk \ | ||
87 | +_launch_qemu -drive file="${TEST_IMG}",cache=${CACHEMODE},aio=${AIOMODE},id=disk \ | ||
88 | -incoming "exec: cat '${MIG_FIFO}'" | ||
89 | h2=$QEMU_HANDLE | ||
90 | |||
91 | diff --git a/tests/qemu-iotests/109 b/tests/qemu-iotests/109 | ||
92 | index XXXXXXX..XXXXXXX 100755 | ||
93 | --- a/tests/qemu-iotests/109 | ||
94 | +++ b/tests/qemu-iotests/109 | ||
95 | @@ -XXX,XX +XXX,XX @@ run_qemu() | ||
96 | local qmp_format="$3" | ||
97 | local qmp_event="$4" | ||
98 | |||
99 | - _launch_qemu -drive file="${source_img}",format=raw,cache=${CACHEMODE},id=src | ||
100 | + _launch_qemu -drive file="${source_img}",format=raw,cache=${CACHEMODE},aio=${AIOMODE},id=src | ||
101 | _send_qemu_cmd $QEMU_HANDLE "{ 'execute': 'qmp_capabilities' }" "return" | ||
102 | |||
103 | _send_qemu_cmd $QEMU_HANDLE \ | ||
104 | diff --git a/tests/qemu-iotests/147 b/tests/qemu-iotests/147 | ||
105 | index XXXXXXX..XXXXXXX 100755 | ||
106 | --- a/tests/qemu-iotests/147 | ||
107 | +++ b/tests/qemu-iotests/147 | ||
108 | @@ -XXX,XX +XXX,XX @@ import socket | ||
109 | import stat | ||
110 | import time | ||
111 | import iotests | ||
112 | -from iotests import cachemode, imgfmt, qemu_img, qemu_nbd, qemu_nbd_early_pipe | ||
113 | +from iotests import cachemode, aiomode, imgfmt, qemu_img, qemu_nbd, qemu_nbd_early_pipe | ||
114 | |||
115 | NBD_PORT_START = 32768 | ||
116 | NBD_PORT_END = NBD_PORT_START + 1024 | ||
117 | @@ -XXX,XX +XXX,XX @@ class BuiltinNBD(NBDBlockdevAddBase): | ||
118 | self.server.add_drive_raw('if=none,id=nbd-export,' + | ||
119 | 'file=%s,' % test_img + | ||
120 | 'format=%s,' % imgfmt + | ||
121 | - 'cache=%s' % cachemode) | ||
122 | + 'cache=%s' % cachemode + | ||
123 | + 'aio=%s' % aiomode) | ||
124 | self.server.launch() | ||
125 | |||
126 | def tearDown(self): | ||
127 | diff --git a/tests/qemu-iotests/181 b/tests/qemu-iotests/181 | ||
128 | index XXXXXXX..XXXXXXX 100755 | ||
129 | --- a/tests/qemu-iotests/181 | ||
130 | +++ b/tests/qemu-iotests/181 | ||
131 | @@ -XXX,XX +XXX,XX @@ qemu_comm_method="monitor" | ||
132 | |||
133 | if [ "$IMGOPTSSYNTAX" = "true" ]; then | ||
134 | _launch_qemu \ | ||
135 | - -drive "${TEST_IMG}",cache=${CACHEMODE},id=disk | ||
136 | + -drive "${TEST_IMG}",cache=${CACHEMODE},aio=$AIOMODE,id=disk | ||
137 | else | ||
138 | _launch_qemu \ | ||
139 | - -drive file="${TEST_IMG}",cache=${CACHEMODE},driver=$IMGFMT,id=disk | ||
140 | + -drive file="${TEST_IMG}",cache=${CACHEMODE},aio=$AIOMODE,driver=$IMGFMT,id=disk | ||
141 | fi | ||
142 | src=$QEMU_HANDLE | ||
143 | |||
144 | if [ "$IMGOPTSSYNTAX" = "true" ]; then | ||
145 | _launch_qemu \ | ||
146 | - -drive "${TEST_IMG}",cache=${CACHEMODE},id=disk \ | ||
147 | + -drive "${TEST_IMG}",cache=${CACHEMODE},aio=$AIOMODE,id=disk \ | ||
148 | -incoming "unix:${MIG_SOCKET}" | ||
149 | else | ||
150 | _launch_qemu \ | ||
151 | - -drive file="${TEST_IMG}",cache=${CACHEMODE},driver=$IMGFMT,id=disk \ | ||
152 | + -drive file="${TEST_IMG}",cache=${CACHEMODE},aio=$AIOMODE,driver=$IMGFMT,id=disk \ | ||
153 | -incoming "unix:${MIG_SOCKET}" | ||
154 | fi | ||
155 | dest=$QEMU_HANDLE | ||
156 | diff --git a/tests/qemu-iotests/183 b/tests/qemu-iotests/183 | ||
157 | index XXXXXXX..XXXXXXX 100755 | ||
158 | --- a/tests/qemu-iotests/183 | ||
159 | +++ b/tests/qemu-iotests/183 | ||
160 | @@ -XXX,XX +XXX,XX @@ echo | ||
161 | qemu_comm_method="qmp" | ||
162 | |||
163 | _launch_qemu \ | ||
164 | - -drive file="${TEST_IMG}",cache=$CACHEMODE,driver=$IMGFMT,id=disk | ||
165 | + -drive file="${TEST_IMG}",cache=$CACHEMODE,aio=$AIOMODE,driver=$IMGFMT,id=disk | ||
166 | src=$QEMU_HANDLE | ||
167 | _send_qemu_cmd $src "{ 'execute': 'qmp_capabilities' }" 'return' | ||
168 | |||
169 | _launch_qemu \ | ||
170 | - -drive file="${TEST_IMG}.dest",cache=$CACHEMODE,driver=$IMGFMT,id=disk \ | ||
171 | + -drive file="${TEST_IMG}.dest",cache=$CACHEMODE,aio=$AIOMODE,driver=$IMGFMT,id=disk \ | ||
172 | -incoming "unix:${MIG_SOCKET}" | ||
173 | dest=$QEMU_HANDLE | ||
174 | _send_qemu_cmd $dest "{ 'execute': 'qmp_capabilities' }" 'return' | ||
175 | diff --git a/tests/qemu-iotests/185 b/tests/qemu-iotests/185 | ||
176 | index XXXXXXX..XXXXXXX 100755 | ||
177 | --- a/tests/qemu-iotests/185 | ||
178 | +++ b/tests/qemu-iotests/185 | ||
179 | @@ -XXX,XX +XXX,XX @@ echo | ||
180 | qemu_comm_method="qmp" | ||
181 | |||
182 | _launch_qemu \ | ||
183 | - -drive file="${TEST_IMG}.base",cache=$CACHEMODE,driver=$IMGFMT,id=disk | ||
184 | + -drive file="${TEST_IMG}.base",cache=$CACHEMODE,aio=$AIOMODE,driver=$IMGFMT,id=disk | ||
185 | h=$QEMU_HANDLE | ||
186 | _send_qemu_cmd $h "{ 'execute': 'qmp_capabilities' }" 'return' | ||
187 | |||
188 | @@ -XXX,XX +XXX,XX @@ echo === Start active commit job and exit qemu === | ||
189 | echo | ||
190 | |||
191 | _launch_qemu \ | ||
192 | - -drive file="${TEST_IMG}",cache=$CACHEMODE,driver=$IMGFMT,id=disk | ||
193 | + -drive file="${TEST_IMG}",cache=$CACHEMODE,aio=$AIOMODE,driver=$IMGFMT,id=disk | ||
194 | h=$QEMU_HANDLE | ||
195 | _send_qemu_cmd $h "{ 'execute': 'qmp_capabilities' }" 'return' | ||
196 | |||
197 | @@ -XXX,XX +XXX,XX @@ echo === Start mirror job and exit qemu === | ||
198 | echo | ||
199 | |||
200 | _launch_qemu \ | ||
201 | - -drive file="${TEST_IMG}",cache=$CACHEMODE,driver=$IMGFMT,id=disk | ||
202 | + -drive file="${TEST_IMG}",cache=$CACHEMODE,aio=$AIOMODE,driver=$IMGFMT,id=disk | ||
203 | h=$QEMU_HANDLE | ||
204 | _send_qemu_cmd $h "{ 'execute': 'qmp_capabilities' }" 'return' | ||
205 | |||
206 | @@ -XXX,XX +XXX,XX @@ echo === Start backup job and exit qemu === | ||
207 | echo | ||
208 | |||
209 | _launch_qemu \ | ||
210 | - -drive file="${TEST_IMG}",cache=$CACHEMODE,driver=$IMGFMT,id=disk | ||
211 | + -drive file="${TEST_IMG}",cache=$CACHEMODE,aio=$AIOMODE,driver=$IMGFMT,id=disk | ||
212 | h=$QEMU_HANDLE | ||
213 | _send_qemu_cmd $h "{ 'execute': 'qmp_capabilities' }" 'return' | ||
214 | |||
215 | @@ -XXX,XX +XXX,XX @@ echo === Start streaming job and exit qemu === | ||
216 | echo | ||
217 | |||
218 | _launch_qemu \ | ||
219 | - -drive file="${TEST_IMG}",cache=$CACHEMODE,driver=$IMGFMT,id=disk | ||
220 | + -drive file="${TEST_IMG}",cache=$CACHEMODE,aio=$AIOMODE,driver=$IMGFMT,id=disk | ||
221 | h=$QEMU_HANDLE | ||
222 | _send_qemu_cmd $h "{ 'execute': 'qmp_capabilities' }" 'return' | ||
223 | |||
224 | diff --git a/tests/qemu-iotests/200 b/tests/qemu-iotests/200 | ||
225 | index XXXXXXX..XXXXXXX 100755 | ||
226 | --- a/tests/qemu-iotests/200 | ||
227 | +++ b/tests/qemu-iotests/200 | ||
228 | @@ -XXX,XX +XXX,XX @@ echo === Starting QEMU VM === | ||
229 | echo | ||
230 | qemu_comm_method="qmp" | ||
231 | _launch_qemu -object iothread,id=iothread0 $virtio_scsi \ | ||
232 | - -drive file="${TEST_IMG}",media=disk,if=none,cache=$CACHEMODE,id=drive_sysdisk,format=$IMGFMT \ | ||
233 | + -drive file="${TEST_IMG}",media=disk,if=none,cache=$CACHEMODE,aio=$AIOMODE,id=drive_sysdisk,format=$IMGFMT \ | ||
234 | -device scsi-hd,drive=drive_sysdisk,bus=scsi0.0,id=sysdisk,bootindex=0 | ||
235 | h1=$QEMU_HANDLE | ||
236 | |||
237 | diff --git a/tests/qemu-iotests/201 b/tests/qemu-iotests/201 | ||
238 | index XXXXXXX..XXXXXXX 100755 | ||
239 | --- a/tests/qemu-iotests/201 | ||
240 | +++ b/tests/qemu-iotests/201 | ||
241 | @@ -XXX,XX +XXX,XX @@ qemu_comm_method="monitor" | ||
242 | |||
243 | if [ "$IMGOPTSSYNTAX" = "true" ]; then | ||
244 | _launch_qemu \ | ||
245 | - -drive "${TEST_IMG}",cache=${CACHEMODE},id=disk | ||
246 | + -drive "${TEST_IMG}",cache=${CACHEMODE},aio=$AIOMODE,id=disk | ||
247 | else | ||
248 | _launch_qemu \ | ||
249 | - -drive file="${TEST_IMG}",cache=${CACHEMODE},driver=$IMGFMT,id=disk | ||
250 | + -drive file="${TEST_IMG}",cache=${CACHEMODE},aio=$AIOMODE,driver=$IMGFMT,id=disk | ||
251 | fi | ||
252 | src=$QEMU_HANDLE | ||
253 | |||
254 | if [ "$IMGOPTSSYNTAX" = "true" ]; then | ||
255 | _launch_qemu \ | ||
256 | - -drive "${TEST_IMG}",cache=${CACHEMODE},id=disk \ | ||
257 | + -drive "${TEST_IMG}",cache=${CACHEMODE},aio=$AIOMODE,id=disk \ | ||
258 | -incoming "unix:${MIG_SOCKET}" | ||
259 | else | ||
260 | _launch_qemu \ | ||
261 | - -drive file="${TEST_IMG}",cache=${CACHEMODE},driver=$IMGFMT,id=disk \ | ||
262 | + -drive file="${TEST_IMG}",cache=${CACHEMODE},aio=$AIOMODE,driver=$IMGFMT,id=disk \ | ||
263 | -incoming "unix:${MIG_SOCKET}" | ||
264 | fi | ||
265 | dest=$QEMU_HANDLE | ||
266 | -- | 40 | -- |
267 | 2.24.1 | 41 | 2.26.2 |
268 | 42 | ||
269 | diff view generated by jsdifflib |
1 | From: Aarushi Mehta <mehta.aaru20@gmail.com> | 1 | QEMU now hosts a mirror of qboot.git. QEMU mirrors third-party code to |
---|---|---|---|
2 | ensure that users can always build QEMU even if the dependency goes | ||
3 | offline and so QEMU meets its responsibilities to provide full source | ||
4 | code under software licenses. | ||
2 | 5 | ||
3 | Signed-off-by: Aarushi Mehta <mehta.aaru20@gmail.com> | 6 | Suggested-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Acked-by: Stefano Garzarella <sgarzare@redhat.com> | ||
5 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 7 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
6 | Message-id: 20200120141858.587874-15-stefanha@redhat.com | 8 | Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com> |
7 | Message-Id: <20200120141858.587874-15-stefanha@redhat.com> | 9 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> |
8 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 10 | Cc: Paolo Bonzini <pbonzini@redhat.com> |
11 | Message-Id: <20200915130834.706758-2-stefanha@redhat.com> | ||
9 | --- | 12 | --- |
10 | tests/qemu-iotests/check | 15 ++++++++++++++- | 13 | .gitmodules | 2 +- |
11 | tests/qemu-iotests/common.rc | 14 ++++++++++++++ | 14 | 1 file changed, 1 insertion(+), 1 deletion(-) |
12 | tests/qemu-iotests/iotests.py | 12 ++++++++++-- | ||
13 | 3 files changed, 38 insertions(+), 3 deletions(-) | ||
14 | 15 | ||
15 | diff --git a/tests/qemu-iotests/check b/tests/qemu-iotests/check | 16 | diff --git a/.gitmodules b/.gitmodules |
16 | index XXXXXXX..XXXXXXX 100755 | ||
17 | --- a/tests/qemu-iotests/check | ||
18 | +++ b/tests/qemu-iotests/check | ||
19 | @@ -XXX,XX +XXX,XX @@ sortme=false | ||
20 | expunge=true | ||
21 | have_test_arg=false | ||
22 | cachemode=false | ||
23 | +aiomode=false | ||
24 | |||
25 | tmp="${TEST_DIR}"/$$ | ||
26 | rm -f $tmp.list $tmp.tmp $tmp.sed | ||
27 | @@ -XXX,XX +XXX,XX @@ export IMGFMT_GENERIC=true | ||
28 | export IMGPROTO=file | ||
29 | export IMGOPTS="" | ||
30 | export CACHEMODE="writeback" | ||
31 | +export AIOMODE="threads" | ||
32 | export QEMU_IO_OPTIONS="" | ||
33 | export QEMU_IO_OPTIONS_NO_FMT="" | ||
34 | export CACHEMODE_IS_DEFAULT=true | ||
35 | @@ -XXX,XX +XXX,XX @@ s/ .*//p | ||
36 | CACHEMODE_IS_DEFAULT=false | ||
37 | cachemode=false | ||
38 | continue | ||
39 | + elif $aiomode | ||
40 | + then | ||
41 | + AIOMODE="$r" | ||
42 | + aiomode=false | ||
43 | + continue | ||
44 | fi | ||
45 | |||
46 | xpand=true | ||
47 | @@ -XXX,XX +XXX,XX @@ other options | ||
48 | -n show me, do not run tests | ||
49 | -o options -o options to pass to qemu-img create/convert | ||
50 | -c mode cache mode | ||
51 | + -i mode AIO mode | ||
52 | -makecheck pretty print output for make check | ||
53 | |||
54 | testlist options | ||
55 | @@ -XXX,XX +XXX,XX @@ testlist options | ||
56 | cachemode=true | ||
57 | xpand=false | ||
58 | ;; | ||
59 | + -i) | ||
60 | + aiomode=true | ||
61 | + xpand=false | ||
62 | + ;; | ||
63 | -T) # deprecated timestamp option | ||
64 | xpand=false | ||
65 | ;; | ||
66 | - | ||
67 | -v) | ||
68 | verbose=true | ||
69 | xpand=false | ||
70 | @@ -XXX,XX +XXX,XX @@ done | ||
71 | |||
72 | # Set qemu-io cache mode with $CACHEMODE we have | ||
73 | QEMU_IO_OPTIONS="$QEMU_IO_OPTIONS --cache $CACHEMODE" | ||
74 | +# Set qemu-io aio mode with $AIOMODE we have | ||
75 | +QEMU_IO_OPTIONS="$QEMU_IO_OPTIONS --aio $AIOMODE" | ||
76 | |||
77 | QEMU_IO_OPTIONS_NO_FMT="$QEMU_IO_OPTIONS" | ||
78 | if [ "$IMGOPTSSYNTAX" != "true" ]; then | ||
79 | diff --git a/tests/qemu-iotests/common.rc b/tests/qemu-iotests/common.rc | ||
80 | index XXXXXXX..XXXXXXX 100644 | 17 | index XXXXXXX..XXXXXXX 100644 |
81 | --- a/tests/qemu-iotests/common.rc | 18 | --- a/.gitmodules |
82 | +++ b/tests/qemu-iotests/common.rc | 19 | +++ b/.gitmodules |
83 | @@ -XXX,XX +XXX,XX @@ _default_cache_mode() | 20 | @@ -XXX,XX +XXX,XX @@ |
84 | return | 21 | url = https://git.qemu.org/git/opensbi.git |
85 | fi | 22 | [submodule "roms/qboot"] |
86 | } | 23 | path = roms/qboot |
87 | +_supported_aio_modes() | 24 | - url = https://github.com/bonzini/qboot |
88 | +{ | 25 | + url = https://git.qemu.org/git/qboot.git |
89 | + for mode; do | 26 | [submodule "meson"] |
90 | + if [ "$mode" = "$AIOMODE" ]; then | 27 | path = meson |
91 | + return | 28 | url = https://github.com/mesonbuild/meson/ |
92 | + fi | ||
93 | + done | ||
94 | + _notrun "not suitable for aio mode: $AIOMODE" | ||
95 | +} | ||
96 | +_default_aio_mode() | ||
97 | +{ | ||
98 | + AIOMODE="$1" | ||
99 | + QEMU_IO="$QEMU_IO --aio $1" | ||
100 | +} | ||
101 | |||
102 | _unsupported_imgopts() | ||
103 | { | ||
104 | diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py | ||
105 | index XXXXXXX..XXXXXXX 100644 | ||
106 | --- a/tests/qemu-iotests/iotests.py | ||
107 | +++ b/tests/qemu-iotests/iotests.py | ||
108 | @@ -XXX,XX +XXX,XX @@ test_dir = os.environ.get('TEST_DIR') | ||
109 | sock_dir = os.environ.get('SOCK_DIR') | ||
110 | output_dir = os.environ.get('OUTPUT_DIR', '.') | ||
111 | cachemode = os.environ.get('CACHEMODE') | ||
112 | +aiomode = os.environ.get('AIOMODE') | ||
113 | qemu_default_machine = os.environ.get('QEMU_DEFAULT_MACHINE') | ||
114 | |||
115 | socket_scm_helper = os.environ.get('SOCKET_SCM_HELPER', 'socket_scm_helper') | ||
116 | @@ -XXX,XX +XXX,XX @@ class VM(qtest.QEMUQtestMachine): | ||
117 | options.append('file=%s' % path) | ||
118 | options.append('format=%s' % format) | ||
119 | options.append('cache=%s' % cachemode) | ||
120 | + options.append('aio=%s' % aiomode) | ||
121 | |||
122 | if opts: | ||
123 | options.append(opts) | ||
124 | @@ -XXX,XX +XXX,XX @@ def verify_cache_mode(supported_cache_modes=[]): | ||
125 | if supported_cache_modes and (cachemode not in supported_cache_modes): | ||
126 | notrun('not suitable for this cache mode: %s' % cachemode) | ||
127 | |||
128 | +def verify_aio_mode(supported_aio_modes=[]): | ||
129 | + if supported_aio_modes and (aiomode not in supported_aio_modes): | ||
130 | + notrun('not suitable for this aio mode: %s' % aiomode) | ||
131 | + | ||
132 | def supports_quorum(): | ||
133 | return 'quorum' in qemu_img_pipe('--help') | ||
134 | |||
135 | @@ -XXX,XX +XXX,XX @@ def execute_unittest(output, verbosity, debug): | ||
136 | |||
137 | def execute_test(test_function=None, | ||
138 | supported_fmts=[], supported_oses=['linux'], | ||
139 | - supported_cache_modes=[], unsupported_fmts=[], | ||
140 | - supported_protocols=[], unsupported_protocols=[]): | ||
141 | + supported_cache_modes=[], supported_aio_modes={}, | ||
142 | + unsupported_fmts=[], supported_protocols=[], | ||
143 | + unsupported_protocols=[]): | ||
144 | """Run either unittest or script-style tests.""" | ||
145 | |||
146 | # We are using TEST_DIR and QEMU_DEFAULT_MACHINE as proxies to | ||
147 | @@ -XXX,XX +XXX,XX @@ def execute_test(test_function=None, | ||
148 | verify_protocol(supported_protocols, unsupported_protocols) | ||
149 | verify_platform(supported_oses) | ||
150 | verify_cache_mode(supported_cache_modes) | ||
151 | + verify_aio_mode(supported_aio_modes) | ||
152 | |||
153 | if debug: | ||
154 | output = sys.stdout | ||
155 | -- | 29 | -- |
156 | 2.24.1 | 30 | 2.26.2 |
157 | 31 | ||
158 | diff view generated by jsdifflib |
1 | From: Aarushi Mehta <mehta.aaru20@gmail.com> | 1 | QEMU now hosts a mirror of meson.git. QEMU mirrors third-party code to |
---|---|---|---|
2 | ensure that users can always build QEMU even if the dependency goes | ||
3 | offline and so QEMU meets its responsibilities to provide full source | ||
4 | code under software licenses. | ||
2 | 5 | ||
3 | Signed-off-by: Aarushi Mehta <mehta.aaru20@gmail.com> | 6 | Suggested-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Acked-by: Eric Blake <eblake@redhat.com> | ||
5 | Acked-by: Stefano Garzarella <sgarzare@redhat.com> | ||
6 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 7 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
7 | Message-id: 20200120141858.587874-14-stefanha@redhat.com | 8 | Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com> |
8 | Message-Id: <20200120141858.587874-14-stefanha@redhat.com> | 9 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> |
9 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 10 | Cc: Marc-André Lureau <marcandre.lureau@redhat.com> |
11 | Cc: Paolo Bonzini <pbonzini@redhat.com> | ||
12 | Message-Id: <20200915130834.706758-3-stefanha@redhat.com> | ||
10 | --- | 13 | --- |
11 | docs/interop/qemu-nbd.rst | 4 ++-- | 14 | .gitmodules | 2 +- |
12 | qemu-nbd.c | 12 ++++-------- | 15 | 1 file changed, 1 insertion(+), 1 deletion(-) |
13 | 2 files changed, 6 insertions(+), 10 deletions(-) | ||
14 | 16 | ||
15 | diff --git a/docs/interop/qemu-nbd.rst b/docs/interop/qemu-nbd.rst | 17 | diff --git a/.gitmodules b/.gitmodules |
16 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/docs/interop/qemu-nbd.rst | 19 | --- a/.gitmodules |
18 | +++ b/docs/interop/qemu-nbd.rst | 20 | +++ b/.gitmodules |
19 | @@ -XXX,XX +XXX,XX @@ driver options if ``--image-opts`` is specified. | 21 | @@ -XXX,XX +XXX,XX @@ |
20 | 22 | url = https://git.qemu.org/git/qboot.git | |
21 | .. option:: --aio=AIO | 23 | [submodule "meson"] |
22 | 24 | path = meson | |
23 | - Set the asynchronous I/O mode between ``threads`` (the default) | 25 | - url = https://github.com/mesonbuild/meson/ |
24 | - and ``native`` (Linux only). | 26 | + url = https://git.qemu.org/git/meson.git |
25 | + Set the asynchronous I/O mode between ``threads`` (the default), | 27 | [submodule "roms/vbootrom"] |
26 | + ``native`` (Linux only), and ``io_uring`` (Linux 5.1+). | 28 | path = roms/vbootrom |
27 | 29 | url = https://github.com/google/vbootrom.git | |
28 | .. option:: --discard=DISCARD | ||
29 | |||
30 | diff --git a/qemu-nbd.c b/qemu-nbd.c | ||
31 | index XXXXXXX..XXXXXXX 100644 | ||
32 | --- a/qemu-nbd.c | ||
33 | +++ b/qemu-nbd.c | ||
34 | @@ -XXX,XX +XXX,XX @@ static void usage(const char *name) | ||
35 | " '[ID_OR_NAME]'\n" | ||
36 | " -n, --nocache disable host cache\n" | ||
37 | " --cache=MODE set cache mode (none, writeback, ...)\n" | ||
38 | -" --aio=MODE set AIO mode (native or threads)\n" | ||
39 | +" --aio=MODE set AIO mode (native, io_uring or threads)\n" | ||
40 | " --discard=MODE set discard mode (ignore, unmap)\n" | ||
41 | " --detect-zeroes=MODE set detect-zeroes mode (off, on, unmap)\n" | ||
42 | " --image-opts treat FILE as a full set of image options\n" | ||
43 | @@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv) | ||
44 | exit(EXIT_FAILURE); | ||
45 | } | ||
46 | seen_aio = true; | ||
47 | - if (!strcmp(optarg, "native")) { | ||
48 | - flags |= BDRV_O_NATIVE_AIO; | ||
49 | - } else if (!strcmp(optarg, "threads")) { | ||
50 | - /* this is the default */ | ||
51 | - } else { | ||
52 | - error_report("invalid aio mode `%s'", optarg); | ||
53 | - exit(EXIT_FAILURE); | ||
54 | + if (bdrv_parse_aio(optarg, &flags) < 0) { | ||
55 | + error_report("Invalid aio mode '%s'", optarg); | ||
56 | + exit(EXIT_FAILURE); | ||
57 | } | ||
58 | break; | ||
59 | case QEMU_NBD_OPT_DISCARD: | ||
60 | -- | 30 | -- |
61 | 2.24.1 | 31 | 2.26.2 |
62 | 32 | ||
63 | diff view generated by jsdifflib |
1 | From: Aarushi Mehta <mehta.aaru20@gmail.com> | 1 | The vbootrom module is needed for the new NPCM7xx ARM SoCs. The |
---|---|---|---|
2 | vbootrom.git repo is now mirrored on qemu.org. QEMU mirrors third-party | ||
3 | code to ensure that users can always build QEMU even if the dependency | ||
4 | goes offline and so QEMU meets its responsibilities to provide full | ||
5 | source code under software licenses. | ||
2 | 6 | ||
3 | Signed-off-by: Aarushi Mehta <mehta.aaru20@gmail.com> | 7 | Suggested-by: Peter Maydell <peter.maydell@linaro.org> |
4 | Acked-by: Stefano Garzarella <sgarzare@redhat.com> | 8 | Cc: Havard Skinnemoen <hskinnemoen@google.com> |
9 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
10 | Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
5 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
6 | Message-id: 20200120141858.587874-13-stefanha@redhat.com | 12 | Message-Id: <20200915130834.706758-4-stefanha@redhat.com> |
7 | Message-Id: <20200120141858.587874-13-stefanha@redhat.com> | ||
8 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
9 | --- | 13 | --- |
10 | qemu-img-cmds.hx | 4 ++-- | 14 | .gitmodules | 2 +- |
11 | qemu-img.c | 11 ++++++++++- | 15 | 1 file changed, 1 insertion(+), 1 deletion(-) |
12 | qemu-img.texi | 5 ++++- | ||
13 | 3 files changed, 16 insertions(+), 4 deletions(-) | ||
14 | 16 | ||
15 | diff --git a/qemu-img-cmds.hx b/qemu-img-cmds.hx | 17 | diff --git a/.gitmodules b/.gitmodules |
16 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/qemu-img-cmds.hx | 19 | --- a/.gitmodules |
18 | +++ b/qemu-img-cmds.hx | 20 | +++ b/.gitmodules |
19 | @@ -XXX,XX +XXX,XX @@ STEXI | 21 | @@ -XXX,XX +XXX,XX @@ |
20 | ETEXI | 22 | url = https://git.qemu.org/git/meson.git |
21 | 23 | [submodule "roms/vbootrom"] | |
22 | DEF("bench", img_bench, | 24 | path = roms/vbootrom |
23 | - "bench [-c count] [-d depth] [-f fmt] [--flush-interval=flush_interval] [-n] [--no-drain] [-o offset] [--pattern=pattern] [-q] [-s buffer_size] [-S step_size] [-t cache] [-w] [-U] filename") | 25 | - url = https://github.com/google/vbootrom.git |
24 | + "bench [-c count] [-d depth] [-f fmt] [--flush-interval=flush_interval] [-n] [--no-drain] [-o offset] [--pattern=pattern] [-q] [-s buffer_size] [-S step_size] [-t cache] [-i aio] [-w] [-U] filename") | 26 | + url = https://git.qemu.org/git/vbootrom.git |
25 | STEXI | ||
26 | -@item bench [-c @var{count}] [-d @var{depth}] [-f @var{fmt}] [--flush-interval=@var{flush_interval}] [-n] [--no-drain] [-o @var{offset}] [--pattern=@var{pattern}] [-q] [-s @var{buffer_size}] [-S @var{step_size}] [-t @var{cache}] [-w] [-U] @var{filename} | ||
27 | +@item bench [-c @var{count}] [-d @var{depth}] [-f @var{fmt}] [--flush-interval=@var{flush_interval}] [-n] [--no-drain] [-o @var{offset}] [--pattern=@var{pattern}] [-q] [-s @var{buffer_size}] [-S @var{step_size}] [-t @var{cache}] [-i @var{aio}] [-w] [-U] @var{filename} | ||
28 | ETEXI | ||
29 | |||
30 | DEF("check", img_check, | ||
31 | diff --git a/qemu-img.c b/qemu-img.c | ||
32 | index XXXXXXX..XXXXXXX 100644 | ||
33 | --- a/qemu-img.c | ||
34 | +++ b/qemu-img.c | ||
35 | @@ -XXX,XX +XXX,XX @@ static int img_bench(int argc, char **argv) | ||
36 | {"force-share", no_argument, 0, 'U'}, | ||
37 | {0, 0, 0, 0} | ||
38 | }; | ||
39 | - c = getopt_long(argc, argv, ":hc:d:f:no:qs:S:t:wU", long_options, NULL); | ||
40 | + c = getopt_long(argc, argv, ":hc:d:f:ni:o:qs:S:t:wU", long_options, | ||
41 | + NULL); | ||
42 | if (c == -1) { | ||
43 | break; | ||
44 | } | ||
45 | @@ -XXX,XX +XXX,XX @@ static int img_bench(int argc, char **argv) | ||
46 | case 'n': | ||
47 | flags |= BDRV_O_NATIVE_AIO; | ||
48 | break; | ||
49 | + case 'i': | ||
50 | + ret = bdrv_parse_aio(optarg, &flags); | ||
51 | + if (ret < 0) { | ||
52 | + error_report("Invalid aio option: %s", optarg); | ||
53 | + ret = -1; | ||
54 | + goto out; | ||
55 | + } | ||
56 | + break; | ||
57 | case 'o': | ||
58 | { | ||
59 | offset = cvtnum(optarg); | ||
60 | diff --git a/qemu-img.texi b/qemu-img.texi | ||
61 | index XXXXXXX..XXXXXXX 100644 | ||
62 | --- a/qemu-img.texi | ||
63 | +++ b/qemu-img.texi | ||
64 | @@ -XXX,XX +XXX,XX @@ Command description: | ||
65 | Amends the image format specific @var{options} for the image file | ||
66 | @var{filename}. Not all file formats support this operation. | ||
67 | |||
68 | -@item bench [-c @var{count}] [-d @var{depth}] [-f @var{fmt}] [--flush-interval=@var{flush_interval}] [-n] [--no-drain] [-o @var{offset}] [--pattern=@var{pattern}] [-q] [-s @var{buffer_size}] [-S @var{step_size}] [-t @var{cache}] [-w] [-U] @var{filename} | ||
69 | +@item bench [-c @var{count}] [-d @var{depth}] [-f @var{fmt}] [--flush-interval=@var{flush_interval}] [-n] [-i @var{aio}] [--no-drain] [-o @var{offset}] [--pattern=@var{pattern}] [-q] [-s @var{buffer_size}] [-S @var{step_size}] [-t @var{cache}] [-w] [-U] @var{filename} | ||
70 | |||
71 | Run a simple sequential I/O benchmark on the specified image. If @code{-w} is | ||
72 | specified, a write test is performed, otherwise a read test is performed. | ||
73 | @@ -XXX,XX +XXX,XX @@ If @code{-n} is specified, the native AIO backend is used if possible. On | ||
74 | Linux, this option only works if @code{-t none} or @code{-t directsync} is | ||
75 | specified as well. | ||
76 | |||
77 | +If @code{-i} is specified, aio option can be used to specify different AIO | ||
78 | +backends: @var{threads}, @var{native} or @var{io_uring}. | ||
79 | + | ||
80 | For write tests, by default a buffer filled with zeros is written. This can be | ||
81 | overridden with a pattern byte specified by @var{pattern}. | ||
82 | |||
83 | -- | 27 | -- |
84 | 2.24.1 | 28 | 2.26.2 |
85 | 29 | ||
86 | diff view generated by jsdifflib |
1 | From: Aarushi Mehta <mehta.aaru20@gmail.com> | 1 | npfd keeps track of how many pollfds are currently being monitored. It |
---|---|---|---|
2 | must be reset to 0 when fdmon_poll_wait() returns. | ||
2 | 3 | ||
3 | Signed-off-by: Aarushi Mehta <mehta.aaru20@gmail.com> | 4 | When npfd reaches a treshold we switch to fdmon-epoll because it scales |
4 | Reviewed-by: Maxim Levitsky <maximlevitsky@gmail.com> | 5 | better. |
5 | Acked-by: Stefano Garzarella <sgarzare@redhat.com> | 6 | |
7 | This patch resets npfd in the case where we switch to fdmon-epoll. | ||
8 | Forgetting to do so results in the following assertion failure: | ||
9 | |||
10 | util/fdmon-poll.c:65: fdmon_poll_wait: Assertion `npfd == 0' failed. | ||
11 | |||
12 | Fixes: 1f050a4690f62a1e7dabc4f44141e9f762c3769f ("aio-posix: extract ppoll(2) and epoll(7) fd monitoring") | ||
6 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 13 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
7 | Message-id: 20200120141858.587874-4-stefanha@redhat.com | 14 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> |
8 | Message-Id: <20200120141858.587874-4-stefanha@redhat.com> | 15 | Buglink: https://bugzilla.redhat.com/show_bug.cgi?id=1869952 |
9 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 16 | Message-Id: <20200915120339.702938-2-stefanha@redhat.com> |
10 | --- | 17 | --- |
11 | include/block/block.h | 1 + | 18 | util/fdmon-poll.c | 1 + |
12 | 1 file changed, 1 insertion(+) | 19 | 1 file changed, 1 insertion(+) |
13 | 20 | ||
14 | diff --git a/include/block/block.h b/include/block/block.h | 21 | diff --git a/util/fdmon-poll.c b/util/fdmon-poll.c |
15 | index XXXXXXX..XXXXXXX 100644 | 22 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/include/block/block.h | 23 | --- a/util/fdmon-poll.c |
17 | +++ b/include/block/block.h | 24 | +++ b/util/fdmon-poll.c |
18 | @@ -XXX,XX +XXX,XX @@ typedef struct HDGeometry { | 25 | @@ -XXX,XX +XXX,XX @@ static int fdmon_poll_wait(AioContext *ctx, AioHandlerList *ready_list, |
19 | ignoring the format layer */ | 26 | |
20 | #define BDRV_O_NO_IO 0x10000 /* don't initialize for I/O */ | 27 | /* epoll(7) is faster above a certain number of fds */ |
21 | #define BDRV_O_AUTO_RDONLY 0x20000 /* degrade to read-only if opening read-write fails */ | 28 | if (fdmon_epoll_try_upgrade(ctx, npfd)) { |
22 | +#define BDRV_O_IO_URING 0x40000 /* use io_uring instead of the thread pool */ | 29 | + npfd = 0; /* we won't need pollfds[], reset npfd */ |
23 | 30 | return ctx->fdmon_ops->wait(ctx, ready_list, timeout); | |
24 | #define BDRV_O_CACHE_MASK (BDRV_O_NOCACHE | BDRV_O_NO_FLUSH) | 31 | } |
25 | 32 | ||
26 | -- | 33 | -- |
27 | 2.24.1 | 34 | 2.26.2 |
28 | 35 | ||
29 | diff view generated by jsdifflib |
1 | From: Aarushi Mehta <mehta.aaru20@gmail.com> | 1 | Test aio_disable_external(), which switches from fdmon-epoll back to |
---|---|---|---|
2 | fdmon-poll. This resulted in an assertion failure that was fixed in the | ||
3 | previous patch. | ||
2 | 4 | ||
3 | Aborts when sqe fails to be set as sqes cannot be returned to the | ||
4 | ring. Adds slow path for short reads for older kernels | ||
5 | |||
6 | Signed-off-by: Aarushi Mehta <mehta.aaru20@gmail.com> | ||
7 | Acked-by: Stefano Garzarella <sgarzare@redhat.com> | ||
8 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 5 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
9 | Message-id: 20200120141858.587874-5-stefanha@redhat.com | 6 | Message-Id: <20200915120339.702938-3-stefanha@redhat.com> |
10 | Message-Id: <20200120141858.587874-5-stefanha@redhat.com> | ||
11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
12 | --- | 7 | --- |
13 | MAINTAINERS | 8 + | 8 | MAINTAINERS | 1 + |
14 | block/Makefile.objs | 3 + | 9 | tests/test-fdmon-epoll.c | 73 ++++++++++++++++++++++++++++++++++++++++ |
15 | block/io_uring.c | 401 ++++++++++++++++++++++++++++++++++++++++ | 10 | tests/meson.build | 3 ++ |
16 | include/block/aio.h | 16 +- | 11 | 3 files changed, 77 insertions(+) |
17 | include/block/raw-aio.h | 12 ++ | 12 | create mode 100644 tests/test-fdmon-epoll.c |
18 | 5 files changed, 439 insertions(+), 1 deletion(-) | ||
19 | create mode 100644 block/io_uring.c | ||
20 | 13 | ||
21 | diff --git a/MAINTAINERS b/MAINTAINERS | 14 | diff --git a/MAINTAINERS b/MAINTAINERS |
22 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
23 | --- a/MAINTAINERS | 16 | --- a/MAINTAINERS |
24 | +++ b/MAINTAINERS | 17 | +++ b/MAINTAINERS |
25 | @@ -XXX,XX +XXX,XX @@ F: block/file-posix.c | 18 | @@ -XXX,XX +XXX,XX @@ F: migration/block* |
26 | F: block/file-win32.c | 19 | F: include/block/aio.h |
27 | F: block/win32-aio.c | 20 | F: include/block/aio-wait.h |
28 | 21 | F: scripts/qemugdb/aio.py | |
29 | +Linux io_uring | 22 | +F: tests/test-fdmon-epoll.c |
30 | +M: Aarushi Mehta <mehta.aaru20@gmail.com> | 23 | T: git https://github.com/stefanha/qemu.git block |
31 | +M: Julia Suvorova <jusual@redhat.com> | 24 | |
32 | +M: Stefan Hajnoczi <stefanha@redhat.com> | 25 | Block SCSI subsystem |
33 | +L: qemu-block@nongnu.org | 26 | diff --git a/tests/test-fdmon-epoll.c b/tests/test-fdmon-epoll.c |
34 | +S: Maintained | ||
35 | +F: block/io_uring.c | ||
36 | + | ||
37 | qcow2 | ||
38 | M: Kevin Wolf <kwolf@redhat.com> | ||
39 | M: Max Reitz <mreitz@redhat.com> | ||
40 | diff --git a/block/Makefile.objs b/block/Makefile.objs | ||
41 | index XXXXXXX..XXXXXXX 100644 | ||
42 | --- a/block/Makefile.objs | ||
43 | +++ b/block/Makefile.objs | ||
44 | @@ -XXX,XX +XXX,XX @@ block-obj-y += block-backend.o snapshot.o qapi.o | ||
45 | block-obj-$(CONFIG_WIN32) += file-win32.o win32-aio.o | ||
46 | block-obj-$(CONFIG_POSIX) += file-posix.o | ||
47 | block-obj-$(CONFIG_LINUX_AIO) += linux-aio.o | ||
48 | +block-obj-$(CONFIG_LINUX_IO_URING) += io_uring.o | ||
49 | block-obj-y += null.o mirror.o commit.o io.o create.o | ||
50 | block-obj-y += throttle-groups.o | ||
51 | block-obj-$(CONFIG_LINUX) += nvme.o | ||
52 | @@ -XXX,XX +XXX,XX @@ block-obj-$(if $(CONFIG_LZFSE),m,n) += dmg-lzfse.o | ||
53 | dmg-lzfse.o-libs := $(LZFSE_LIBS) | ||
54 | qcow.o-libs := -lz | ||
55 | linux-aio.o-libs := -laio | ||
56 | +io_uring.o-cflags := $(LINUX_IO_URING_CFLAGS) | ||
57 | +io_uring.o-libs := $(LINUX_IO_URING_LIBS) | ||
58 | parallels.o-cflags := $(LIBXML2_CFLAGS) | ||
59 | parallels.o-libs := $(LIBXML2_LIBS) | ||
60 | diff --git a/block/io_uring.c b/block/io_uring.c | ||
61 | new file mode 100644 | 27 | new file mode 100644 |
62 | index XXXXXXX..XXXXXXX | 28 | index XXXXXXX..XXXXXXX |
63 | --- /dev/null | 29 | --- /dev/null |
64 | +++ b/block/io_uring.c | 30 | +++ b/tests/test-fdmon-epoll.c |
65 | @@ -XXX,XX +XXX,XX @@ | 31 | @@ -XXX,XX +XXX,XX @@ |
32 | +/* SPDX-License-Identifier: GPL-2.0-or-later */ | ||
66 | +/* | 33 | +/* |
67 | + * Linux io_uring support. | 34 | + * fdmon-epoll tests |
68 | + * | 35 | + * |
69 | + * Copyright (C) 2009 IBM, Corp. | 36 | + * Copyright (c) 2020 Red Hat, Inc. |
70 | + * Copyright (C) 2009 Red Hat, Inc. | ||
71 | + * Copyright (C) 2019 Aarushi Mehta | ||
72 | + * | ||
73 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. | ||
74 | + * See the COPYING file in the top-level directory. | ||
75 | + */ | 37 | + */ |
38 | + | ||
76 | +#include "qemu/osdep.h" | 39 | +#include "qemu/osdep.h" |
77 | +#include <liburing.h> | ||
78 | +#include "qemu-common.h" | ||
79 | +#include "block/aio.h" | 40 | +#include "block/aio.h" |
80 | +#include "qemu/queue.h" | ||
81 | +#include "block/block.h" | ||
82 | +#include "block/raw-aio.h" | ||
83 | +#include "qemu/coroutine.h" | ||
84 | +#include "qapi/error.h" | 41 | +#include "qapi/error.h" |
42 | +#include "qemu/main-loop.h" | ||
85 | + | 43 | + |
86 | +/* io_uring ring size */ | 44 | +static AioContext *ctx; |
87 | +#define MAX_ENTRIES 128 | ||
88 | + | 45 | + |
89 | +typedef struct LuringAIOCB { | 46 | +static void dummy_fd_handler(EventNotifier *notifier) |
90 | + Coroutine *co; | ||
91 | + struct io_uring_sqe sqeq; | ||
92 | + ssize_t ret; | ||
93 | + QEMUIOVector *qiov; | ||
94 | + bool is_read; | ||
95 | + QSIMPLEQ_ENTRY(LuringAIOCB) next; | ||
96 | + | ||
97 | + /* | ||
98 | + * Buffered reads may require resubmission, see | ||
99 | + * luring_resubmit_short_read(). | ||
100 | + */ | ||
101 | + int total_read; | ||
102 | + QEMUIOVector resubmit_qiov; | ||
103 | +} LuringAIOCB; | ||
104 | + | ||
105 | +typedef struct LuringQueue { | ||
106 | + int plugged; | ||
107 | + unsigned int in_queue; | ||
108 | + unsigned int in_flight; | ||
109 | + bool blocked; | ||
110 | + QSIMPLEQ_HEAD(, LuringAIOCB) submit_queue; | ||
111 | +} LuringQueue; | ||
112 | + | ||
113 | +typedef struct LuringState { | ||
114 | + AioContext *aio_context; | ||
115 | + | ||
116 | + struct io_uring ring; | ||
117 | + | ||
118 | + /* io queue for submit at batch. Protected by AioContext lock. */ | ||
119 | + LuringQueue io_q; | ||
120 | + | ||
121 | + /* I/O completion processing. Only runs in I/O thread. */ | ||
122 | + QEMUBH *completion_bh; | ||
123 | +} LuringState; | ||
124 | + | ||
125 | +/** | ||
126 | + * luring_resubmit: | ||
127 | + * | ||
128 | + * Resubmit a request by appending it to submit_queue. The caller must ensure | ||
129 | + * that ioq_submit() is called later so that submit_queue requests are started. | ||
130 | + */ | ||
131 | +static void luring_resubmit(LuringState *s, LuringAIOCB *luringcb) | ||
132 | +{ | 47 | +{ |
133 | + QSIMPLEQ_INSERT_TAIL(&s->io_q.submit_queue, luringcb, next); | 48 | + event_notifier_test_and_clear(notifier); |
134 | + s->io_q.in_queue++; | ||
135 | +} | 49 | +} |
136 | + | 50 | + |
137 | +/** | 51 | +static void add_event_notifiers(EventNotifier *notifiers, size_t n) |
138 | + * luring_resubmit_short_read: | ||
139 | + * | ||
140 | + * Before Linux commit 9d93a3f5a0c ("io_uring: punt short reads to async | ||
141 | + * context") a buffered I/O request with the start of the file range in the | ||
142 | + * page cache could result in a short read. Applications need to resubmit the | ||
143 | + * remaining read request. | ||
144 | + * | ||
145 | + * This is a slow path but recent kernels never take it. | ||
146 | + */ | ||
147 | +static void luring_resubmit_short_read(LuringState *s, LuringAIOCB *luringcb, | ||
148 | + int nread) | ||
149 | +{ | 52 | +{ |
150 | + QEMUIOVector *resubmit_qiov; | 53 | + for (size_t i = 0; i < n; i++) { |
151 | + size_t remaining; | 54 | + event_notifier_init(¬ifiers[i], false); |
152 | + | 55 | + aio_set_event_notifier(ctx, ¬ifiers[i], false, |
153 | + /* Update read position */ | 56 | + dummy_fd_handler, NULL); |
154 | + luringcb->total_read = nread; | ||
155 | + remaining = luringcb->qiov->size - luringcb->total_read; | ||
156 | + | ||
157 | + /* Shorten qiov */ | ||
158 | + resubmit_qiov = &luringcb->resubmit_qiov; | ||
159 | + if (resubmit_qiov->iov == NULL) { | ||
160 | + qemu_iovec_init(resubmit_qiov, luringcb->qiov->niov); | ||
161 | + } else { | ||
162 | + qemu_iovec_reset(resubmit_qiov); | ||
163 | + } | ||
164 | + qemu_iovec_concat(resubmit_qiov, luringcb->qiov, luringcb->total_read, | ||
165 | + remaining); | ||
166 | + | ||
167 | + /* Update sqe */ | ||
168 | + luringcb->sqeq.off = nread; | ||
169 | + luringcb->sqeq.addr = (__u64)(uintptr_t)luringcb->resubmit_qiov.iov; | ||
170 | + luringcb->sqeq.len = luringcb->resubmit_qiov.niov; | ||
171 | + | ||
172 | + luring_resubmit(s, luringcb); | ||
173 | +} | ||
174 | + | ||
175 | +/** | ||
176 | + * luring_process_completions: | ||
177 | + * @s: AIO state | ||
178 | + * | ||
179 | + * Fetches completed I/O requests, consumes cqes and invokes their callbacks | ||
180 | + * The function is somewhat tricky because it supports nested event loops, for | ||
181 | + * example when a request callback invokes aio_poll(). | ||
182 | + * | ||
183 | + * Function schedules BH completion so it can be called again in a nested | ||
184 | + * event loop. When there are no events left to complete the BH is being | ||
185 | + * canceled. | ||
186 | + * | ||
187 | + */ | ||
188 | +static void luring_process_completions(LuringState *s) | ||
189 | +{ | ||
190 | + struct io_uring_cqe *cqes; | ||
191 | + int total_bytes; | ||
192 | + /* | ||
193 | + * Request completion callbacks can run the nested event loop. | ||
194 | + * Schedule ourselves so the nested event loop will "see" remaining | ||
195 | + * completed requests and process them. Without this, completion | ||
196 | + * callbacks that wait for other requests using a nested event loop | ||
197 | + * would hang forever. | ||
198 | + * | ||
199 | + * This workaround is needed because io_uring uses poll_wait, which | ||
200 | + * is woken up when new events are added to the uring, thus polling on | ||
201 | + * the same uring fd will block unless more events are received. | ||
202 | + * | ||
203 | + * Other leaf block drivers (drivers that access the data themselves) | ||
204 | + * are networking based, so they poll sockets for data and run the | ||
205 | + * correct coroutine. | ||
206 | + */ | ||
207 | + qemu_bh_schedule(s->completion_bh); | ||
208 | + | ||
209 | + while (io_uring_peek_cqe(&s->ring, &cqes) == 0) { | ||
210 | + LuringAIOCB *luringcb; | ||
211 | + int ret; | ||
212 | + | ||
213 | + if (!cqes) { | ||
214 | + break; | ||
215 | + } | ||
216 | + | ||
217 | + luringcb = io_uring_cqe_get_data(cqes); | ||
218 | + ret = cqes->res; | ||
219 | + io_uring_cqe_seen(&s->ring, cqes); | ||
220 | + cqes = NULL; | ||
221 | + | ||
222 | + /* Change counters one-by-one because we can be nested. */ | ||
223 | + s->io_q.in_flight--; | ||
224 | + | ||
225 | + /* total_read is non-zero only for resubmitted read requests */ | ||
226 | + total_bytes = ret + luringcb->total_read; | ||
227 | + | ||
228 | + if (ret < 0) { | ||
229 | + if (ret == -EINTR) { | ||
230 | + luring_resubmit(s, luringcb); | ||
231 | + continue; | ||
232 | + } | ||
233 | + } else if (!luringcb->qiov) { | ||
234 | + goto end; | ||
235 | + } else if (total_bytes == luringcb->qiov->size) { | ||
236 | + ret = 0; | ||
237 | + /* Only read/write */ | ||
238 | + } else { | ||
239 | + /* Short Read/Write */ | ||
240 | + if (luringcb->is_read) { | ||
241 | + if (ret > 0) { | ||
242 | + luring_resubmit_short_read(s, luringcb, ret); | ||
243 | + continue; | ||
244 | + } else { | ||
245 | + /* Pad with zeroes */ | ||
246 | + qemu_iovec_memset(luringcb->qiov, total_bytes, 0, | ||
247 | + luringcb->qiov->size - total_bytes); | ||
248 | + ret = 0; | ||
249 | + } | ||
250 | + } else { | ||
251 | + ret = -ENOSPC;; | ||
252 | + } | ||
253 | + } | ||
254 | +end: | ||
255 | + luringcb->ret = ret; | ||
256 | + qemu_iovec_destroy(&luringcb->resubmit_qiov); | ||
257 | + | ||
258 | + /* | ||
259 | + * If the coroutine is already entered it must be in ioq_submit() | ||
260 | + * and will notice luringcb->ret has been filled in when it | ||
261 | + * eventually runs later. Coroutines cannot be entered recursively | ||
262 | + * so avoid doing that! | ||
263 | + */ | ||
264 | + if (!qemu_coroutine_entered(luringcb->co)) { | ||
265 | + aio_co_wake(luringcb->co); | ||
266 | + } | ||
267 | + } | ||
268 | + qemu_bh_cancel(s->completion_bh); | ||
269 | +} | ||
270 | + | ||
271 | +static int ioq_submit(LuringState *s) | ||
272 | +{ | ||
273 | + int ret = 0; | ||
274 | + LuringAIOCB *luringcb, *luringcb_next; | ||
275 | + | ||
276 | + while (s->io_q.in_queue > 0) { | ||
277 | + /* | ||
278 | + * Try to fetch sqes from the ring for requests waiting in | ||
279 | + * the overflow queue | ||
280 | + */ | ||
281 | + QSIMPLEQ_FOREACH_SAFE(luringcb, &s->io_q.submit_queue, next, | ||
282 | + luringcb_next) { | ||
283 | + struct io_uring_sqe *sqes = io_uring_get_sqe(&s->ring); | ||
284 | + if (!sqes) { | ||
285 | + break; | ||
286 | + } | ||
287 | + /* Prep sqe for submission */ | ||
288 | + *sqes = luringcb->sqeq; | ||
289 | + QSIMPLEQ_REMOVE_HEAD(&s->io_q.submit_queue, next); | ||
290 | + } | ||
291 | + ret = io_uring_submit(&s->ring); | ||
292 | + /* Prevent infinite loop if submission is refused */ | ||
293 | + if (ret <= 0) { | ||
294 | + if (ret == -EAGAIN) { | ||
295 | + continue; | ||
296 | + } | ||
297 | + break; | ||
298 | + } | ||
299 | + s->io_q.in_flight += ret; | ||
300 | + s->io_q.in_queue -= ret; | ||
301 | + } | ||
302 | + s->io_q.blocked = (s->io_q.in_queue > 0); | ||
303 | + | ||
304 | + if (s->io_q.in_flight) { | ||
305 | + /* | ||
306 | + * We can try to complete something just right away if there are | ||
307 | + * still requests in-flight. | ||
308 | + */ | ||
309 | + luring_process_completions(s); | ||
310 | + } | ||
311 | + return ret; | ||
312 | +} | ||
313 | + | ||
314 | +static void luring_process_completions_and_submit(LuringState *s) | ||
315 | +{ | ||
316 | + aio_context_acquire(s->aio_context); | ||
317 | + luring_process_completions(s); | ||
318 | + | ||
319 | + if (!s->io_q.plugged && s->io_q.in_queue > 0) { | ||
320 | + ioq_submit(s); | ||
321 | + } | ||
322 | + aio_context_release(s->aio_context); | ||
323 | +} | ||
324 | + | ||
325 | +static void qemu_luring_completion_bh(void *opaque) | ||
326 | +{ | ||
327 | + LuringState *s = opaque; | ||
328 | + luring_process_completions_and_submit(s); | ||
329 | +} | ||
330 | + | ||
331 | +static void qemu_luring_completion_cb(void *opaque) | ||
332 | +{ | ||
333 | + LuringState *s = opaque; | ||
334 | + luring_process_completions_and_submit(s); | ||
335 | +} | ||
336 | + | ||
337 | +static void ioq_init(LuringQueue *io_q) | ||
338 | +{ | ||
339 | + QSIMPLEQ_INIT(&io_q->submit_queue); | ||
340 | + io_q->plugged = 0; | ||
341 | + io_q->in_queue = 0; | ||
342 | + io_q->in_flight = 0; | ||
343 | + io_q->blocked = false; | ||
344 | +} | ||
345 | + | ||
346 | +void luring_io_plug(BlockDriverState *bs, LuringState *s) | ||
347 | +{ | ||
348 | + s->io_q.plugged++; | ||
349 | +} | ||
350 | + | ||
351 | +void luring_io_unplug(BlockDriverState *bs, LuringState *s) | ||
352 | +{ | ||
353 | + assert(s->io_q.plugged); | ||
354 | + if (--s->io_q.plugged == 0 && | ||
355 | + !s->io_q.blocked && s->io_q.in_queue > 0) { | ||
356 | + ioq_submit(s); | ||
357 | + } | 57 | + } |
358 | +} | 58 | +} |
359 | + | 59 | + |
360 | +/** | 60 | +static void remove_event_notifiers(EventNotifier *notifiers, size_t n) |
361 | + * luring_do_submit: | ||
362 | + * @fd: file descriptor for I/O | ||
363 | + * @luringcb: AIO control block | ||
364 | + * @s: AIO state | ||
365 | + * @offset: offset for request | ||
366 | + * @type: type of request | ||
367 | + * | ||
368 | + * Fetches sqes from ring, adds to pending queue and preps them | ||
369 | + * | ||
370 | + */ | ||
371 | +static int luring_do_submit(int fd, LuringAIOCB *luringcb, LuringState *s, | ||
372 | + uint64_t offset, int type) | ||
373 | +{ | 61 | +{ |
374 | + struct io_uring_sqe *sqes = &luringcb->sqeq; | 62 | + for (size_t i = 0; i < n; i++) { |
375 | + | 63 | + aio_set_event_notifier(ctx, ¬ifiers[i], false, NULL, NULL); |
376 | + switch (type) { | 64 | + event_notifier_cleanup(¬ifiers[i]); |
377 | + case QEMU_AIO_WRITE: | ||
378 | + io_uring_prep_writev(sqes, fd, luringcb->qiov->iov, | ||
379 | + luringcb->qiov->niov, offset); | ||
380 | + break; | ||
381 | + case QEMU_AIO_READ: | ||
382 | + io_uring_prep_readv(sqes, fd, luringcb->qiov->iov, | ||
383 | + luringcb->qiov->niov, offset); | ||
384 | + break; | ||
385 | + case QEMU_AIO_FLUSH: | ||
386 | + io_uring_prep_fsync(sqes, fd, IORING_FSYNC_DATASYNC); | ||
387 | + break; | ||
388 | + default: | ||
389 | + fprintf(stderr, "%s: invalid AIO request type, aborting 0x%x.\n", | ||
390 | + __func__, type); | ||
391 | + abort(); | ||
392 | + } | 65 | + } |
393 | + io_uring_sqe_set_data(sqes, luringcb); | ||
394 | + | ||
395 | + QSIMPLEQ_INSERT_TAIL(&s->io_q.submit_queue, luringcb, next); | ||
396 | + s->io_q.in_queue++; | ||
397 | + | ||
398 | + if (!s->io_q.blocked && | ||
399 | + (!s->io_q.plugged || | ||
400 | + s->io_q.in_flight + s->io_q.in_queue >= MAX_ENTRIES)) { | ||
401 | + return ioq_submit(s); | ||
402 | + } | ||
403 | + return 0; | ||
404 | +} | 66 | +} |
405 | + | 67 | + |
406 | +int coroutine_fn luring_co_submit(BlockDriverState *bs, LuringState *s, int fd, | 68 | +/* Check that fd handlers work when external clients are disabled */ |
407 | + uint64_t offset, QEMUIOVector *qiov, int type) | 69 | +static void test_external_disabled(void) |
408 | +{ | 70 | +{ |
409 | + int ret; | 71 | + EventNotifier notifiers[100]; |
410 | + LuringAIOCB luringcb = { | ||
411 | + .co = qemu_coroutine_self(), | ||
412 | + .ret = -EINPROGRESS, | ||
413 | + .qiov = qiov, | ||
414 | + .is_read = (type == QEMU_AIO_READ), | ||
415 | + }; | ||
416 | + | 72 | + |
417 | + ret = luring_do_submit(fd, &luringcb, s, offset, type); | 73 | + /* fdmon-epoll is only enabled when many fd handlers are registered */ |
418 | + if (ret < 0) { | 74 | + add_event_notifiers(notifiers, G_N_ELEMENTS(notifiers)); |
419 | + return ret; | 75 | + |
76 | + event_notifier_set(¬ifiers[0]); | ||
77 | + assert(aio_poll(ctx, true)); | ||
78 | + | ||
79 | + aio_disable_external(ctx); | ||
80 | + event_notifier_set(¬ifiers[0]); | ||
81 | + assert(aio_poll(ctx, true)); | ||
82 | + aio_enable_external(ctx); | ||
83 | + | ||
84 | + remove_event_notifiers(notifiers, G_N_ELEMENTS(notifiers)); | ||
85 | +} | ||
86 | + | ||
87 | +int main(int argc, char **argv) | ||
88 | +{ | ||
89 | + /* | ||
90 | + * This code relies on the fact that fdmon-io_uring disables itself when | ||
91 | + * the glib main loop is in use. The main loop uses fdmon-poll and upgrades | ||
92 | + * to fdmon-epoll when the number of fds exceeds a threshold. | ||
93 | + */ | ||
94 | + qemu_init_main_loop(&error_fatal); | ||
95 | + ctx = qemu_get_aio_context(); | ||
96 | + | ||
97 | + while (g_main_context_iteration(NULL, false)) { | ||
98 | + /* Do nothing */ | ||
420 | + } | 99 | + } |
421 | + | 100 | + |
422 | + if (luringcb.ret == -EINPROGRESS) { | 101 | + g_test_init(&argc, &argv, NULL); |
423 | + qemu_coroutine_yield(); | 102 | + g_test_add_func("/fdmon-epoll/external-disabled", test_external_disabled); |
424 | + } | 103 | + return g_test_run(); |
425 | + return luringcb.ret; | ||
426 | +} | 104 | +} |
427 | + | 105 | diff --git a/tests/meson.build b/tests/meson.build |
428 | +void luring_detach_aio_context(LuringState *s, AioContext *old_context) | ||
429 | +{ | ||
430 | + aio_set_fd_handler(old_context, s->ring.ring_fd, false, NULL, NULL, NULL, | ||
431 | + s); | ||
432 | + qemu_bh_delete(s->completion_bh); | ||
433 | + s->aio_context = NULL; | ||
434 | +} | ||
435 | + | ||
436 | +void luring_attach_aio_context(LuringState *s, AioContext *new_context) | ||
437 | +{ | ||
438 | + s->aio_context = new_context; | ||
439 | + s->completion_bh = aio_bh_new(new_context, qemu_luring_completion_bh, s); | ||
440 | + aio_set_fd_handler(s->aio_context, s->ring.ring_fd, false, | ||
441 | + qemu_luring_completion_cb, NULL, NULL, s); | ||
442 | +} | ||
443 | + | ||
444 | +LuringState *luring_init(Error **errp) | ||
445 | +{ | ||
446 | + int rc; | ||
447 | + LuringState *s = g_new0(LuringState, 1); | ||
448 | + struct io_uring *ring = &s->ring; | ||
449 | + | ||
450 | + rc = io_uring_queue_init(MAX_ENTRIES, ring, 0); | ||
451 | + if (rc < 0) { | ||
452 | + error_setg_errno(errp, errno, "failed to init linux io_uring ring"); | ||
453 | + g_free(s); | ||
454 | + return NULL; | ||
455 | + } | ||
456 | + | ||
457 | + ioq_init(&s->io_q); | ||
458 | + return s; | ||
459 | + | ||
460 | +} | ||
461 | + | ||
462 | +void luring_cleanup(LuringState *s) | ||
463 | +{ | ||
464 | + io_uring_queue_exit(&s->ring); | ||
465 | + g_free(s); | ||
466 | +} | ||
467 | diff --git a/include/block/aio.h b/include/block/aio.h | ||
468 | index XXXXXXX..XXXXXXX 100644 | 106 | index XXXXXXX..XXXXXXX 100644 |
469 | --- a/include/block/aio.h | 107 | --- a/tests/meson.build |
470 | +++ b/include/block/aio.h | 108 | +++ b/tests/meson.build |
471 | @@ -XXX,XX +XXX,XX @@ typedef void IOHandler(void *opaque); | 109 | @@ -XXX,XX +XXX,XX @@ if have_block |
472 | struct Coroutine; | 110 | if 'CONFIG_NETTLE' in config_host or 'CONFIG_GCRYPT' in config_host |
473 | struct ThreadPool; | 111 | tests += {'test-crypto-pbkdf': [io]} |
474 | struct LinuxAioState; | 112 | endif |
475 | +struct LuringState; | 113 | + if 'CONFIG_EPOLL_CREATE1' in config_host |
476 | 114 | + tests += {'test-fdmon-epoll': [testblock]} | |
477 | struct AioContext { | 115 | + endif |
478 | GSource source; | 116 | benchs += { |
479 | @@ -XXX,XX +XXX,XX @@ struct AioContext { | 117 | 'benchmark-crypto-hash': [crypto], |
480 | struct ThreadPool *thread_pool; | 118 | 'benchmark-crypto-hmac': [crypto], |
481 | |||
482 | #ifdef CONFIG_LINUX_AIO | ||
483 | - /* State for native Linux AIO. Uses aio_context_acquire/release for | ||
484 | + /* | ||
485 | + * State for native Linux AIO. Uses aio_context_acquire/release for | ||
486 | * locking. | ||
487 | */ | ||
488 | struct LinuxAioState *linux_aio; | ||
489 | #endif | ||
490 | +#ifdef CONFIG_LINUX_IO_URING | ||
491 | + /* | ||
492 | + * State for Linux io_uring. Uses aio_context_acquire/release for | ||
493 | + * locking. | ||
494 | + */ | ||
495 | + struct LuringState *linux_io_uring; | ||
496 | +#endif | ||
497 | |||
498 | /* TimerLists for calling timers - one per clock type. Has its own | ||
499 | * locking. | ||
500 | @@ -XXX,XX +XXX,XX @@ struct LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp); | ||
501 | /* Return the LinuxAioState bound to this AioContext */ | ||
502 | struct LinuxAioState *aio_get_linux_aio(AioContext *ctx); | ||
503 | |||
504 | +/* Setup the LuringState bound to this AioContext */ | ||
505 | +struct LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp); | ||
506 | + | ||
507 | +/* Return the LuringState bound to this AioContext */ | ||
508 | +struct LuringState *aio_get_linux_io_uring(AioContext *ctx); | ||
509 | /** | ||
510 | * aio_timer_new_with_attrs: | ||
511 | * @ctx: the aio context | ||
512 | diff --git a/include/block/raw-aio.h b/include/block/raw-aio.h | ||
513 | index XXXXXXX..XXXXXXX 100644 | ||
514 | --- a/include/block/raw-aio.h | ||
515 | +++ b/include/block/raw-aio.h | ||
516 | @@ -XXX,XX +XXX,XX @@ void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context); | ||
517 | void laio_io_plug(BlockDriverState *bs, LinuxAioState *s); | ||
518 | void laio_io_unplug(BlockDriverState *bs, LinuxAioState *s); | ||
519 | #endif | ||
520 | +/* io_uring.c - Linux io_uring implementation */ | ||
521 | +#ifdef CONFIG_LINUX_IO_URING | ||
522 | +typedef struct LuringState LuringState; | ||
523 | +LuringState *luring_init(Error **errp); | ||
524 | +void luring_cleanup(LuringState *s); | ||
525 | +int coroutine_fn luring_co_submit(BlockDriverState *bs, LuringState *s, int fd, | ||
526 | + uint64_t offset, QEMUIOVector *qiov, int type); | ||
527 | +void luring_detach_aio_context(LuringState *s, AioContext *old_context); | ||
528 | +void luring_attach_aio_context(LuringState *s, AioContext *new_context); | ||
529 | +void luring_io_plug(BlockDriverState *bs, LuringState *s); | ||
530 | +void luring_io_unplug(BlockDriverState *bs, LuringState *s); | ||
531 | +#endif | ||
532 | |||
533 | #ifdef _WIN32 | ||
534 | typedef struct QEMUWin32AIOState QEMUWin32AIOState; | ||
535 | -- | 119 | -- |
536 | 2.24.1 | 120 | 2.26.2 |
537 | 121 | ||
538 | diff view generated by jsdifflib |
1 | From: Paolo Bonzini <pbonzini@redhat.com> | 1 | clang's C11 atomic_fetch_*() functions only take a C11 atomic type |
---|---|---|---|
2 | pointer argument. QEMU uses direct types (int, etc) and this causes a | ||
3 | compiler error when a QEMU code calls these functions in a source file | ||
4 | that also included <stdatomic.h> via a system header file: | ||
2 | 5 | ||
3 | It is unused since commit 00e30f0 ("block/backup: use backup-top instead | 6 | $ CC=clang CXX=clang++ ./configure ... && make |
4 | of write notifiers", 2019-10-01), drop it to simplify the code. | 7 | ../util/async.c:79:17: error: address argument to atomic operation must be a pointer to _Atomic type ('unsigned int *' invalid) |
5 | 8 | ||
6 | While at it, drop redundant assertions on flags. | 9 | Avoid using atomic_*() names in QEMU's atomic.h since that namespace is |
10 | used by <stdatomic.h>. Prefix QEMU's APIs with 'q' so that atomic.h | ||
11 | and <stdatomic.h> can co-exist. I checked /usr/include on my machine and | ||
12 | searched GitHub for existing "qatomic_" users but there seem to be none. | ||
7 | 13 | ||
8 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | 14 | This patch was generated using: |
9 | Message-id: 1578495356-46219-2-git-send-email-pbonzini@redhat.com | 15 | |
10 | Message-Id: <1578495356-46219-2-git-send-email-pbonzini@redhat.com> | 16 | $ git grep -h -o '\<atomic\(64\)\?_[a-z0-9_]\+' include/qemu/atomic.h | \ |
17 | sort -u >/tmp/changed_identifiers | ||
18 | $ for identifier in $(</tmp/changed_identifiers); do | ||
19 | sed -i "s%\<$identifier\>%q$identifier%g" \ | ||
20 | $(git grep -I -l "\<$identifier\>") | ||
21 | done | ||
22 | |||
23 | I manually fixed line-wrap issues and misaligned rST tables. | ||
24 | |||
11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 25 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
26 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
27 | Acked-by: Paolo Bonzini <pbonzini@redhat.com> | ||
28 | Message-Id: <20200923105646.47864-1-stefanha@redhat.com> | ||
12 | --- | 29 | --- |
13 | block/io.c | 18 ++++-------------- | 30 | include/qemu/atomic.h | 248 +++++++++--------- |
14 | include/block/block.h | 12 ------------ | 31 | docs/devel/lockcnt.txt | 8 +- |
15 | 2 files changed, 4 insertions(+), 26 deletions(-) | 32 | docs/devel/rcu.txt | 34 +-- |
33 | accel/tcg/atomic_template.h | 20 +- | ||
34 | include/block/aio-wait.h | 4 +- | ||
35 | include/block/aio.h | 8 +- | ||
36 | include/exec/cpu_ldst.h | 2 +- | ||
37 | include/exec/exec-all.h | 6 +- | ||
38 | include/exec/log.h | 6 +- | ||
39 | include/exec/memory.h | 2 +- | ||
40 | include/exec/ram_addr.h | 26 +- | ||
41 | include/exec/ramlist.h | 2 +- | ||
42 | include/exec/tb-lookup.h | 4 +- | ||
43 | include/hw/core/cpu.h | 2 +- | ||
44 | include/qemu/atomic128.h | 6 +- | ||
45 | include/qemu/bitops.h | 2 +- | ||
46 | include/qemu/coroutine.h | 2 +- | ||
47 | include/qemu/log.h | 6 +- | ||
48 | include/qemu/queue.h | 7 +- | ||
49 | include/qemu/rcu.h | 10 +- | ||
50 | include/qemu/rcu_queue.h | 100 +++---- | ||
51 | include/qemu/seqlock.h | 8 +- | ||
52 | include/qemu/stats64.h | 28 +- | ||
53 | include/qemu/thread.h | 24 +- | ||
54 | .../infiniband/hw/vmw_pvrdma/pvrdma_ring.h | 14 +- | ||
55 | linux-user/qemu.h | 2 +- | ||
56 | tcg/i386/tcg-target.h | 2 +- | ||
57 | tcg/s390/tcg-target.h | 2 +- | ||
58 | tcg/tci/tcg-target.h | 2 +- | ||
59 | accel/kvm/kvm-all.c | 12 +- | ||
60 | accel/tcg/cpu-exec.c | 15 +- | ||
61 | accel/tcg/cputlb.c | 24 +- | ||
62 | accel/tcg/tcg-all.c | 2 +- | ||
63 | accel/tcg/translate-all.c | 55 ++-- | ||
64 | audio/jackaudio.c | 18 +- | ||
65 | block.c | 4 +- | ||
66 | block/block-backend.c | 15 +- | ||
67 | block/io.c | 48 ++-- | ||
68 | block/nfs.c | 2 +- | ||
69 | block/sheepdog.c | 2 +- | ||
70 | block/throttle-groups.c | 12 +- | ||
71 | block/throttle.c | 4 +- | ||
72 | blockdev.c | 2 +- | ||
73 | blockjob.c | 2 +- | ||
74 | contrib/libvhost-user/libvhost-user.c | 2 +- | ||
75 | cpus-common.c | 26 +- | ||
76 | dump/dump.c | 8 +- | ||
77 | exec.c | 49 ++-- | ||
78 | hw/core/cpu.c | 6 +- | ||
79 | hw/display/qxl.c | 4 +- | ||
80 | hw/hyperv/hyperv.c | 10 +- | ||
81 | hw/hyperv/vmbus.c | 2 +- | ||
82 | hw/i386/xen/xen-hvm.c | 2 +- | ||
83 | hw/intc/rx_icu.c | 12 +- | ||
84 | hw/intc/sifive_plic.c | 4 +- | ||
85 | hw/misc/edu.c | 16 +- | ||
86 | hw/net/virtio-net.c | 10 +- | ||
87 | hw/rdma/rdma_backend.c | 18 +- | ||
88 | hw/rdma/rdma_rm.c | 2 +- | ||
89 | hw/rdma/vmw/pvrdma_dev_ring.c | 4 +- | ||
90 | hw/s390x/s390-pci-bus.c | 2 +- | ||
91 | hw/s390x/virtio-ccw.c | 2 +- | ||
92 | hw/virtio/vhost.c | 2 +- | ||
93 | hw/virtio/virtio-mmio.c | 6 +- | ||
94 | hw/virtio/virtio-pci.c | 6 +- | ||
95 | hw/virtio/virtio.c | 16 +- | ||
96 | hw/xtensa/pic_cpu.c | 4 +- | ||
97 | iothread.c | 6 +- | ||
98 | linux-user/hppa/cpu_loop.c | 11 +- | ||
99 | linux-user/signal.c | 8 +- | ||
100 | migration/colo-failover.c | 4 +- | ||
101 | migration/migration.c | 8 +- | ||
102 | migration/multifd.c | 18 +- | ||
103 | migration/postcopy-ram.c | 34 +-- | ||
104 | migration/rdma.c | 34 +-- | ||
105 | monitor/hmp.c | 6 +- | ||
106 | monitor/misc.c | 2 +- | ||
107 | monitor/monitor.c | 6 +- | ||
108 | qemu-nbd.c | 2 +- | ||
109 | qga/commands.c | 12 +- | ||
110 | qom/object.c | 20 +- | ||
111 | scsi/qemu-pr-helper.c | 4 +- | ||
112 | softmmu/cpu-throttle.c | 10 +- | ||
113 | softmmu/cpus.c | 42 +-- | ||
114 | softmmu/memory.c | 6 +- | ||
115 | softmmu/vl.c | 2 +- | ||
116 | target/arm/mte_helper.c | 6 +- | ||
117 | target/hppa/op_helper.c | 2 +- | ||
118 | target/i386/mem_helper.c | 2 +- | ||
119 | target/i386/whpx-all.c | 6 +- | ||
120 | target/riscv/cpu_helper.c | 2 +- | ||
121 | target/s390x/mem_helper.c | 4 +- | ||
122 | target/xtensa/exc_helper.c | 4 +- | ||
123 | target/xtensa/op_helper.c | 2 +- | ||
124 | tcg/tcg.c | 58 ++-- | ||
125 | tcg/tci.c | 2 +- | ||
126 | tests/atomic64-bench.c | 14 +- | ||
127 | tests/atomic_add-bench.c | 14 +- | ||
128 | tests/iothread.c | 2 +- | ||
129 | tests/qht-bench.c | 12 +- | ||
130 | tests/rcutorture.c | 24 +- | ||
131 | tests/test-aio-multithread.c | 52 ++-- | ||
132 | tests/test-logging.c | 4 +- | ||
133 | tests/test-rcu-list.c | 38 +-- | ||
134 | tests/test-thread-pool.c | 10 +- | ||
135 | util/aio-posix.c | 14 +- | ||
136 | util/aio-wait.c | 2 +- | ||
137 | util/aio-win32.c | 5 +- | ||
138 | util/async.c | 28 +- | ||
139 | util/atomic64.c | 10 +- | ||
140 | util/bitmap.c | 14 +- | ||
141 | util/cacheinfo.c | 2 +- | ||
142 | util/fdmon-epoll.c | 4 +- | ||
143 | util/fdmon-io_uring.c | 12 +- | ||
144 | util/lockcnt.c | 52 ++-- | ||
145 | util/log.c | 10 +- | ||
146 | util/qemu-coroutine-lock.c | 18 +- | ||
147 | util/qemu-coroutine-sleep.c | 4 +- | ||
148 | util/qemu-coroutine.c | 6 +- | ||
149 | util/qemu-sockets.c | 4 +- | ||
150 | util/qemu-thread-posix.c | 12 +- | ||
151 | util/qemu-thread-win32.c | 12 +- | ||
152 | util/qemu-timer.c | 12 +- | ||
153 | util/qht.c | 57 ++-- | ||
154 | util/qsp.c | 50 ++-- | ||
155 | util/rcu.c | 36 +-- | ||
156 | util/stats64.c | 34 +-- | ||
157 | docs/devel/atomics.rst | 134 +++++----- | ||
158 | scripts/kernel-doc | 2 +- | ||
159 | tcg/aarch64/tcg-target.c.inc | 2 +- | ||
160 | tcg/mips/tcg-target.c.inc | 2 +- | ||
161 | tcg/ppc/tcg-target.c.inc | 6 +- | ||
162 | tcg/sparc/tcg-target.c.inc | 5 +- | ||
163 | 133 files changed, 1041 insertions(+), 1018 deletions(-) | ||
16 | 164 | ||
165 | diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h | ||
166 | index XXXXXXX..XXXXXXX 100644 | ||
167 | --- a/include/qemu/atomic.h | ||
168 | +++ b/include/qemu/atomic.h | ||
169 | @@ -XXX,XX +XXX,XX @@ | ||
170 | * no effect on the generated code but not using the atomic primitives | ||
171 | * will get flagged by sanitizers as a violation. | ||
172 | */ | ||
173 | -#define atomic_read__nocheck(ptr) \ | ||
174 | +#define qatomic_read__nocheck(ptr) \ | ||
175 | __atomic_load_n(ptr, __ATOMIC_RELAXED) | ||
176 | |||
177 | -#define atomic_read(ptr) \ | ||
178 | - ({ \ | ||
179 | +#define qatomic_read(ptr) \ | ||
180 | + ({ \ | ||
181 | QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ | ||
182 | - atomic_read__nocheck(ptr); \ | ||
183 | + qatomic_read__nocheck(ptr); \ | ||
184 | }) | ||
185 | |||
186 | -#define atomic_set__nocheck(ptr, i) \ | ||
187 | +#define qatomic_set__nocheck(ptr, i) \ | ||
188 | __atomic_store_n(ptr, i, __ATOMIC_RELAXED) | ||
189 | |||
190 | -#define atomic_set(ptr, i) do { \ | ||
191 | +#define qatomic_set(ptr, i) do { \ | ||
192 | QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ | ||
193 | - atomic_set__nocheck(ptr, i); \ | ||
194 | + qatomic_set__nocheck(ptr, i); \ | ||
195 | } while(0) | ||
196 | |||
197 | /* See above: most compilers currently treat consume and acquire the | ||
198 | - * same, but this slows down atomic_rcu_read unnecessarily. | ||
199 | + * same, but this slows down qatomic_rcu_read unnecessarily. | ||
200 | */ | ||
201 | #ifdef __SANITIZE_THREAD__ | ||
202 | -#define atomic_rcu_read__nocheck(ptr, valptr) \ | ||
203 | +#define qatomic_rcu_read__nocheck(ptr, valptr) \ | ||
204 | __atomic_load(ptr, valptr, __ATOMIC_CONSUME); | ||
205 | #else | ||
206 | -#define atomic_rcu_read__nocheck(ptr, valptr) \ | ||
207 | - __atomic_load(ptr, valptr, __ATOMIC_RELAXED); \ | ||
208 | +#define qatomic_rcu_read__nocheck(ptr, valptr) \ | ||
209 | + __atomic_load(ptr, valptr, __ATOMIC_RELAXED); \ | ||
210 | smp_read_barrier_depends(); | ||
211 | #endif | ||
212 | |||
213 | -#define atomic_rcu_read(ptr) \ | ||
214 | - ({ \ | ||
215 | +#define qatomic_rcu_read(ptr) \ | ||
216 | + ({ \ | ||
217 | QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ | ||
218 | - typeof_strip_qual(*ptr) _val; \ | ||
219 | - atomic_rcu_read__nocheck(ptr, &_val); \ | ||
220 | - _val; \ | ||
221 | + typeof_strip_qual(*ptr) _val; \ | ||
222 | + qatomic_rcu_read__nocheck(ptr, &_val); \ | ||
223 | + _val; \ | ||
224 | }) | ||
225 | |||
226 | -#define atomic_rcu_set(ptr, i) do { \ | ||
227 | +#define qatomic_rcu_set(ptr, i) do { \ | ||
228 | QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ | ||
229 | - __atomic_store_n(ptr, i, __ATOMIC_RELEASE); \ | ||
230 | + __atomic_store_n(ptr, i, __ATOMIC_RELEASE); \ | ||
231 | } while(0) | ||
232 | |||
233 | -#define atomic_load_acquire(ptr) \ | ||
234 | +#define qatomic_load_acquire(ptr) \ | ||
235 | ({ \ | ||
236 | QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ | ||
237 | typeof_strip_qual(*ptr) _val; \ | ||
238 | @@ -XXX,XX +XXX,XX @@ | ||
239 | _val; \ | ||
240 | }) | ||
241 | |||
242 | -#define atomic_store_release(ptr, i) do { \ | ||
243 | +#define qatomic_store_release(ptr, i) do { \ | ||
244 | QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ | ||
245 | __atomic_store_n(ptr, i, __ATOMIC_RELEASE); \ | ||
246 | } while(0) | ||
247 | @@ -XXX,XX +XXX,XX @@ | ||
248 | |||
249 | /* All the remaining operations are fully sequentially consistent */ | ||
250 | |||
251 | -#define atomic_xchg__nocheck(ptr, i) ({ \ | ||
252 | +#define qatomic_xchg__nocheck(ptr, i) ({ \ | ||
253 | __atomic_exchange_n(ptr, (i), __ATOMIC_SEQ_CST); \ | ||
254 | }) | ||
255 | |||
256 | -#define atomic_xchg(ptr, i) ({ \ | ||
257 | +#define qatomic_xchg(ptr, i) ({ \ | ||
258 | QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ | ||
259 | - atomic_xchg__nocheck(ptr, i); \ | ||
260 | + qatomic_xchg__nocheck(ptr, i); \ | ||
261 | }) | ||
262 | |||
263 | /* Returns the eventual value, failed or not */ | ||
264 | -#define atomic_cmpxchg__nocheck(ptr, old, new) ({ \ | ||
265 | +#define qatomic_cmpxchg__nocheck(ptr, old, new) ({ \ | ||
266 | typeof_strip_qual(*ptr) _old = (old); \ | ||
267 | (void)__atomic_compare_exchange_n(ptr, &_old, new, false, \ | ||
268 | __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \ | ||
269 | _old; \ | ||
270 | }) | ||
271 | |||
272 | -#define atomic_cmpxchg(ptr, old, new) ({ \ | ||
273 | +#define qatomic_cmpxchg(ptr, old, new) ({ \ | ||
274 | QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \ | ||
275 | - atomic_cmpxchg__nocheck(ptr, old, new); \ | ||
276 | + qatomic_cmpxchg__nocheck(ptr, old, new); \ | ||
277 | }) | ||
278 | |||
279 | /* Provide shorter names for GCC atomic builtins, return old value */ | ||
280 | -#define atomic_fetch_inc(ptr) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST) | ||
281 | -#define atomic_fetch_dec(ptr) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST) | ||
282 | +#define qatomic_fetch_inc(ptr) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST) | ||
283 | +#define qatomic_fetch_dec(ptr) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST) | ||
284 | |||
285 | -#ifndef atomic_fetch_add | ||
286 | -#define atomic_fetch_add(ptr, n) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST) | ||
287 | -#define atomic_fetch_sub(ptr, n) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST) | ||
288 | -#define atomic_fetch_and(ptr, n) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST) | ||
289 | -#define atomic_fetch_or(ptr, n) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST) | ||
290 | -#define atomic_fetch_xor(ptr, n) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST) | ||
291 | -#endif | ||
292 | +#define qatomic_fetch_add(ptr, n) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST) | ||
293 | +#define qatomic_fetch_sub(ptr, n) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST) | ||
294 | +#define qatomic_fetch_and(ptr, n) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST) | ||
295 | +#define qatomic_fetch_or(ptr, n) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST) | ||
296 | +#define qatomic_fetch_xor(ptr, n) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST) | ||
297 | |||
298 | -#define atomic_inc_fetch(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_SEQ_CST) | ||
299 | -#define atomic_dec_fetch(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_SEQ_CST) | ||
300 | -#define atomic_add_fetch(ptr, n) __atomic_add_fetch(ptr, n, __ATOMIC_SEQ_CST) | ||
301 | -#define atomic_sub_fetch(ptr, n) __atomic_sub_fetch(ptr, n, __ATOMIC_SEQ_CST) | ||
302 | -#define atomic_and_fetch(ptr, n) __atomic_and_fetch(ptr, n, __ATOMIC_SEQ_CST) | ||
303 | -#define atomic_or_fetch(ptr, n) __atomic_or_fetch(ptr, n, __ATOMIC_SEQ_CST) | ||
304 | -#define atomic_xor_fetch(ptr, n) __atomic_xor_fetch(ptr, n, __ATOMIC_SEQ_CST) | ||
305 | +#define qatomic_inc_fetch(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_SEQ_CST) | ||
306 | +#define qatomic_dec_fetch(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_SEQ_CST) | ||
307 | +#define qatomic_add_fetch(ptr, n) __atomic_add_fetch(ptr, n, __ATOMIC_SEQ_CST) | ||
308 | +#define qatomic_sub_fetch(ptr, n) __atomic_sub_fetch(ptr, n, __ATOMIC_SEQ_CST) | ||
309 | +#define qatomic_and_fetch(ptr, n) __atomic_and_fetch(ptr, n, __ATOMIC_SEQ_CST) | ||
310 | +#define qatomic_or_fetch(ptr, n) __atomic_or_fetch(ptr, n, __ATOMIC_SEQ_CST) | ||
311 | +#define qatomic_xor_fetch(ptr, n) __atomic_xor_fetch(ptr, n, __ATOMIC_SEQ_CST) | ||
312 | |||
313 | /* And even shorter names that return void. */ | ||
314 | -#define atomic_inc(ptr) ((void) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST)) | ||
315 | -#define atomic_dec(ptr) ((void) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST)) | ||
316 | -#define atomic_add(ptr, n) ((void) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST)) | ||
317 | -#define atomic_sub(ptr, n) ((void) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST)) | ||
318 | -#define atomic_and(ptr, n) ((void) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST)) | ||
319 | -#define atomic_or(ptr, n) ((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST)) | ||
320 | -#define atomic_xor(ptr, n) ((void) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST)) | ||
321 | +#define qatomic_inc(ptr) \ | ||
322 | + ((void) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST)) | ||
323 | +#define qatomic_dec(ptr) \ | ||
324 | + ((void) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST)) | ||
325 | +#define qatomic_add(ptr, n) \ | ||
326 | + ((void) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST)) | ||
327 | +#define qatomic_sub(ptr, n) \ | ||
328 | + ((void) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST)) | ||
329 | +#define qatomic_and(ptr, n) \ | ||
330 | + ((void) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST)) | ||
331 | +#define qatomic_or(ptr, n) \ | ||
332 | + ((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST)) | ||
333 | +#define qatomic_xor(ptr, n) \ | ||
334 | + ((void) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST)) | ||
335 | |||
336 | #else /* __ATOMIC_RELAXED */ | ||
337 | |||
338 | @@ -XXX,XX +XXX,XX @@ | ||
339 | * but it is a full barrier at the hardware level. Add a compiler barrier | ||
340 | * to make it a full barrier also at the compiler level. | ||
341 | */ | ||
342 | -#define atomic_xchg(ptr, i) (barrier(), __sync_lock_test_and_set(ptr, i)) | ||
343 | +#define qatomic_xchg(ptr, i) (barrier(), __sync_lock_test_and_set(ptr, i)) | ||
344 | |||
345 | #elif defined(_ARCH_PPC) | ||
346 | |||
347 | @@ -XXX,XX +XXX,XX @@ | ||
348 | /* These will only be atomic if the processor does the fetch or store | ||
349 | * in a single issue memory operation | ||
350 | */ | ||
351 | -#define atomic_read__nocheck(p) (*(__typeof__(*(p)) volatile*) (p)) | ||
352 | -#define atomic_set__nocheck(p, i) ((*(__typeof__(*(p)) volatile*) (p)) = (i)) | ||
353 | +#define qatomic_read__nocheck(p) (*(__typeof__(*(p)) volatile*) (p)) | ||
354 | +#define qatomic_set__nocheck(p, i) ((*(__typeof__(*(p)) volatile*) (p)) = (i)) | ||
355 | |||
356 | -#define atomic_read(ptr) atomic_read__nocheck(ptr) | ||
357 | -#define atomic_set(ptr, i) atomic_set__nocheck(ptr,i) | ||
358 | +#define qatomic_read(ptr) qatomic_read__nocheck(ptr) | ||
359 | +#define qatomic_set(ptr, i) qatomic_set__nocheck(ptr,i) | ||
360 | |||
361 | /** | ||
362 | - * atomic_rcu_read - reads a RCU-protected pointer to a local variable | ||
363 | + * qatomic_rcu_read - reads a RCU-protected pointer to a local variable | ||
364 | * into a RCU read-side critical section. The pointer can later be safely | ||
365 | * dereferenced within the critical section. | ||
366 | * | ||
367 | @@ -XXX,XX +XXX,XX @@ | ||
368 | * Inserts memory barriers on architectures that require them (currently only | ||
369 | * Alpha) and documents which pointers are protected by RCU. | ||
370 | * | ||
371 | - * atomic_rcu_read also includes a compiler barrier to ensure that | ||
372 | + * qatomic_rcu_read also includes a compiler barrier to ensure that | ||
373 | * value-speculative optimizations (e.g. VSS: Value Speculation | ||
374 | * Scheduling) does not perform the data read before the pointer read | ||
375 | * by speculating the value of the pointer. | ||
376 | * | ||
377 | - * Should match atomic_rcu_set(), atomic_xchg(), atomic_cmpxchg(). | ||
378 | + * Should match qatomic_rcu_set(), qatomic_xchg(), qatomic_cmpxchg(). | ||
379 | */ | ||
380 | -#define atomic_rcu_read(ptr) ({ \ | ||
381 | - typeof(*ptr) _val = atomic_read(ptr); \ | ||
382 | +#define qatomic_rcu_read(ptr) ({ \ | ||
383 | + typeof(*ptr) _val = qatomic_read(ptr); \ | ||
384 | smp_read_barrier_depends(); \ | ||
385 | _val; \ | ||
386 | }) | ||
387 | |||
388 | /** | ||
389 | - * atomic_rcu_set - assigns (publicizes) a pointer to a new data structure | ||
390 | + * qatomic_rcu_set - assigns (publicizes) a pointer to a new data structure | ||
391 | * meant to be read by RCU read-side critical sections. | ||
392 | * | ||
393 | * Documents which pointers will be dereferenced by RCU read-side critical | ||
394 | @@ -XXX,XX +XXX,XX @@ | ||
395 | * them. It also makes sure the compiler does not reorder code initializing the | ||
396 | * data structure before its publication. | ||
397 | * | ||
398 | - * Should match atomic_rcu_read(). | ||
399 | + * Should match qatomic_rcu_read(). | ||
400 | */ | ||
401 | -#define atomic_rcu_set(ptr, i) do { \ | ||
402 | +#define qatomic_rcu_set(ptr, i) do { \ | ||
403 | smp_wmb(); \ | ||
404 | - atomic_set(ptr, i); \ | ||
405 | + qatomic_set(ptr, i); \ | ||
406 | } while (0) | ||
407 | |||
408 | -#define atomic_load_acquire(ptr) ({ \ | ||
409 | - typeof(*ptr) _val = atomic_read(ptr); \ | ||
410 | +#define qatomic_load_acquire(ptr) ({ \ | ||
411 | + typeof(*ptr) _val = qatomic_read(ptr); \ | ||
412 | smp_mb_acquire(); \ | ||
413 | _val; \ | ||
414 | }) | ||
415 | |||
416 | -#define atomic_store_release(ptr, i) do { \ | ||
417 | +#define qatomic_store_release(ptr, i) do { \ | ||
418 | smp_mb_release(); \ | ||
419 | - atomic_set(ptr, i); \ | ||
420 | + qatomic_set(ptr, i); \ | ||
421 | } while (0) | ||
422 | |||
423 | -#ifndef atomic_xchg | ||
424 | +#ifndef qatomic_xchg | ||
425 | #if defined(__clang__) | ||
426 | -#define atomic_xchg(ptr, i) __sync_swap(ptr, i) | ||
427 | +#define qatomic_xchg(ptr, i) __sync_swap(ptr, i) | ||
428 | #else | ||
429 | /* __sync_lock_test_and_set() is documented to be an acquire barrier only. */ | ||
430 | -#define atomic_xchg(ptr, i) (smp_mb(), __sync_lock_test_and_set(ptr, i)) | ||
431 | +#define qatomic_xchg(ptr, i) (smp_mb(), __sync_lock_test_and_set(ptr, i)) | ||
432 | #endif | ||
433 | #endif | ||
434 | -#define atomic_xchg__nocheck atomic_xchg | ||
435 | +#define qatomic_xchg__nocheck qatomic_xchg | ||
436 | |||
437 | /* Provide shorter names for GCC atomic builtins. */ | ||
438 | -#define atomic_fetch_inc(ptr) __sync_fetch_and_add(ptr, 1) | ||
439 | -#define atomic_fetch_dec(ptr) __sync_fetch_and_add(ptr, -1) | ||
440 | +#define qatomic_fetch_inc(ptr) __sync_fetch_and_add(ptr, 1) | ||
441 | +#define qatomic_fetch_dec(ptr) __sync_fetch_and_add(ptr, -1) | ||
442 | |||
443 | -#ifndef atomic_fetch_add | ||
444 | -#define atomic_fetch_add(ptr, n) __sync_fetch_and_add(ptr, n) | ||
445 | -#define atomic_fetch_sub(ptr, n) __sync_fetch_and_sub(ptr, n) | ||
446 | -#define atomic_fetch_and(ptr, n) __sync_fetch_and_and(ptr, n) | ||
447 | -#define atomic_fetch_or(ptr, n) __sync_fetch_and_or(ptr, n) | ||
448 | -#define atomic_fetch_xor(ptr, n) __sync_fetch_and_xor(ptr, n) | ||
449 | -#endif | ||
450 | +#define qatomic_fetch_add(ptr, n) __sync_fetch_and_add(ptr, n) | ||
451 | +#define qatomic_fetch_sub(ptr, n) __sync_fetch_and_sub(ptr, n) | ||
452 | +#define qatomic_fetch_and(ptr, n) __sync_fetch_and_and(ptr, n) | ||
453 | +#define qatomic_fetch_or(ptr, n) __sync_fetch_and_or(ptr, n) | ||
454 | +#define qatomic_fetch_xor(ptr, n) __sync_fetch_and_xor(ptr, n) | ||
455 | |||
456 | -#define atomic_inc_fetch(ptr) __sync_add_and_fetch(ptr, 1) | ||
457 | -#define atomic_dec_fetch(ptr) __sync_add_and_fetch(ptr, -1) | ||
458 | -#define atomic_add_fetch(ptr, n) __sync_add_and_fetch(ptr, n) | ||
459 | -#define atomic_sub_fetch(ptr, n) __sync_sub_and_fetch(ptr, n) | ||
460 | -#define atomic_and_fetch(ptr, n) __sync_and_and_fetch(ptr, n) | ||
461 | -#define atomic_or_fetch(ptr, n) __sync_or_and_fetch(ptr, n) | ||
462 | -#define atomic_xor_fetch(ptr, n) __sync_xor_and_fetch(ptr, n) | ||
463 | +#define qatomic_inc_fetch(ptr) __sync_add_and_fetch(ptr, 1) | ||
464 | +#define qatomic_dec_fetch(ptr) __sync_add_and_fetch(ptr, -1) | ||
465 | +#define qatomic_add_fetch(ptr, n) __sync_add_and_fetch(ptr, n) | ||
466 | +#define qatomic_sub_fetch(ptr, n) __sync_sub_and_fetch(ptr, n) | ||
467 | +#define qatomic_and_fetch(ptr, n) __sync_and_and_fetch(ptr, n) | ||
468 | +#define qatomic_or_fetch(ptr, n) __sync_or_and_fetch(ptr, n) | ||
469 | +#define qatomic_xor_fetch(ptr, n) __sync_xor_and_fetch(ptr, n) | ||
470 | |||
471 | -#define atomic_cmpxchg(ptr, old, new) __sync_val_compare_and_swap(ptr, old, new) | ||
472 | -#define atomic_cmpxchg__nocheck(ptr, old, new) atomic_cmpxchg(ptr, old, new) | ||
473 | +#define qatomic_cmpxchg(ptr, old, new) \ | ||
474 | + __sync_val_compare_and_swap(ptr, old, new) | ||
475 | +#define qatomic_cmpxchg__nocheck(ptr, old, new) qatomic_cmpxchg(ptr, old, new) | ||
476 | |||
477 | /* And even shorter names that return void. */ | ||
478 | -#define atomic_inc(ptr) ((void) __sync_fetch_and_add(ptr, 1)) | ||
479 | -#define atomic_dec(ptr) ((void) __sync_fetch_and_add(ptr, -1)) | ||
480 | -#define atomic_add(ptr, n) ((void) __sync_fetch_and_add(ptr, n)) | ||
481 | -#define atomic_sub(ptr, n) ((void) __sync_fetch_and_sub(ptr, n)) | ||
482 | -#define atomic_and(ptr, n) ((void) __sync_fetch_and_and(ptr, n)) | ||
483 | -#define atomic_or(ptr, n) ((void) __sync_fetch_and_or(ptr, n)) | ||
484 | -#define atomic_xor(ptr, n) ((void) __sync_fetch_and_xor(ptr, n)) | ||
485 | +#define qatomic_inc(ptr) ((void) __sync_fetch_and_add(ptr, 1)) | ||
486 | +#define qatomic_dec(ptr) ((void) __sync_fetch_and_add(ptr, -1)) | ||
487 | +#define qatomic_add(ptr, n) ((void) __sync_fetch_and_add(ptr, n)) | ||
488 | +#define qatomic_sub(ptr, n) ((void) __sync_fetch_and_sub(ptr, n)) | ||
489 | +#define qatomic_and(ptr, n) ((void) __sync_fetch_and_and(ptr, n)) | ||
490 | +#define qatomic_or(ptr, n) ((void) __sync_fetch_and_or(ptr, n)) | ||
491 | +#define qatomic_xor(ptr, n) ((void) __sync_fetch_and_xor(ptr, n)) | ||
492 | |||
493 | #endif /* __ATOMIC_RELAXED */ | ||
494 | |||
495 | @@ -XXX,XX +XXX,XX @@ | ||
496 | /* This is more efficient than a store plus a fence. */ | ||
497 | #if !defined(__SANITIZE_THREAD__) | ||
498 | #if defined(__i386__) || defined(__x86_64__) || defined(__s390x__) | ||
499 | -#define atomic_mb_set(ptr, i) ((void)atomic_xchg(ptr, i)) | ||
500 | +#define qatomic_mb_set(ptr, i) ((void)qatomic_xchg(ptr, i)) | ||
501 | #endif | ||
502 | #endif | ||
503 | |||
504 | -/* atomic_mb_read/set semantics map Java volatile variables. They are | ||
505 | +/* qatomic_mb_read/set semantics map Java volatile variables. They are | ||
506 | * less expensive on some platforms (notably POWER) than fully | ||
507 | * sequentially consistent operations. | ||
508 | * | ||
509 | @@ -XXX,XX +XXX,XX @@ | ||
510 | * use. See docs/devel/atomics.txt for more discussion. | ||
511 | */ | ||
512 | |||
513 | -#ifndef atomic_mb_read | ||
514 | -#define atomic_mb_read(ptr) \ | ||
515 | - atomic_load_acquire(ptr) | ||
516 | +#ifndef qatomic_mb_read | ||
517 | +#define qatomic_mb_read(ptr) \ | ||
518 | + qatomic_load_acquire(ptr) | ||
519 | #endif | ||
520 | |||
521 | -#ifndef atomic_mb_set | ||
522 | -#define atomic_mb_set(ptr, i) do { \ | ||
523 | - atomic_store_release(ptr, i); \ | ||
524 | +#ifndef qatomic_mb_set | ||
525 | +#define qatomic_mb_set(ptr, i) do { \ | ||
526 | + qatomic_store_release(ptr, i); \ | ||
527 | smp_mb(); \ | ||
528 | } while(0) | ||
529 | #endif | ||
530 | |||
531 | -#define atomic_fetch_inc_nonzero(ptr) ({ \ | ||
532 | - typeof_strip_qual(*ptr) _oldn = atomic_read(ptr); \ | ||
533 | - while (_oldn && atomic_cmpxchg(ptr, _oldn, _oldn + 1) != _oldn) { \ | ||
534 | - _oldn = atomic_read(ptr); \ | ||
535 | +#define qatomic_fetch_inc_nonzero(ptr) ({ \ | ||
536 | + typeof_strip_qual(*ptr) _oldn = qatomic_read(ptr); \ | ||
537 | + while (_oldn && qatomic_cmpxchg(ptr, _oldn, _oldn + 1) != _oldn) { \ | ||
538 | + _oldn = qatomic_read(ptr); \ | ||
539 | } \ | ||
540 | _oldn; \ | ||
541 | }) | ||
542 | |||
543 | /* Abstractions to access atomically (i.e. "once") i64/u64 variables */ | ||
544 | #ifdef CONFIG_ATOMIC64 | ||
545 | -static inline int64_t atomic_read_i64(const int64_t *ptr) | ||
546 | +static inline int64_t qatomic_read_i64(const int64_t *ptr) | ||
547 | { | ||
548 | /* use __nocheck because sizeof(void *) might be < sizeof(u64) */ | ||
549 | - return atomic_read__nocheck(ptr); | ||
550 | + return qatomic_read__nocheck(ptr); | ||
551 | } | ||
552 | |||
553 | -static inline uint64_t atomic_read_u64(const uint64_t *ptr) | ||
554 | +static inline uint64_t qatomic_read_u64(const uint64_t *ptr) | ||
555 | { | ||
556 | - return atomic_read__nocheck(ptr); | ||
557 | + return qatomic_read__nocheck(ptr); | ||
558 | } | ||
559 | |||
560 | -static inline void atomic_set_i64(int64_t *ptr, int64_t val) | ||
561 | +static inline void qatomic_set_i64(int64_t *ptr, int64_t val) | ||
562 | { | ||
563 | - atomic_set__nocheck(ptr, val); | ||
564 | + qatomic_set__nocheck(ptr, val); | ||
565 | } | ||
566 | |||
567 | -static inline void atomic_set_u64(uint64_t *ptr, uint64_t val) | ||
568 | +static inline void qatomic_set_u64(uint64_t *ptr, uint64_t val) | ||
569 | { | ||
570 | - atomic_set__nocheck(ptr, val); | ||
571 | + qatomic_set__nocheck(ptr, val); | ||
572 | } | ||
573 | |||
574 | -static inline void atomic64_init(void) | ||
575 | +static inline void qatomic64_init(void) | ||
576 | { | ||
577 | } | ||
578 | #else /* !CONFIG_ATOMIC64 */ | ||
579 | -int64_t atomic_read_i64(const int64_t *ptr); | ||
580 | -uint64_t atomic_read_u64(const uint64_t *ptr); | ||
581 | -void atomic_set_i64(int64_t *ptr, int64_t val); | ||
582 | -void atomic_set_u64(uint64_t *ptr, uint64_t val); | ||
583 | -void atomic64_init(void); | ||
584 | +int64_t qatomic_read_i64(const int64_t *ptr); | ||
585 | +uint64_t qatomic_read_u64(const uint64_t *ptr); | ||
586 | +void qatomic_set_i64(int64_t *ptr, int64_t val); | ||
587 | +void qatomic_set_u64(uint64_t *ptr, uint64_t val); | ||
588 | +void qatomic64_init(void); | ||
589 | #endif /* !CONFIG_ATOMIC64 */ | ||
590 | |||
591 | #endif /* QEMU_ATOMIC_H */ | ||
592 | diff --git a/docs/devel/lockcnt.txt b/docs/devel/lockcnt.txt | ||
593 | index XXXXXXX..XXXXXXX 100644 | ||
594 | --- a/docs/devel/lockcnt.txt | ||
595 | +++ b/docs/devel/lockcnt.txt | ||
596 | @@ -XXX,XX +XXX,XX @@ not just frees, though there could be cases where this is not necessary. | ||
597 | |||
598 | Reads, instead, can be done without taking the mutex, as long as the | ||
599 | readers and writers use the same macros that are used for RCU, for | ||
600 | -example atomic_rcu_read, atomic_rcu_set, QLIST_FOREACH_RCU, etc. This is | ||
601 | +example qatomic_rcu_read, qatomic_rcu_set, QLIST_FOREACH_RCU, etc. This is | ||
602 | because the reads are done outside a lock and a set or QLIST_INSERT_HEAD | ||
603 | can happen concurrently with the read. The RCU API ensures that the | ||
604 | processor and the compiler see all required memory barriers. | ||
605 | @@ -XXX,XX +XXX,XX @@ qemu_lockcnt_lock and qemu_lockcnt_unlock: | ||
606 | if (!xyz) { | ||
607 | new_xyz = g_new(XYZ, 1); | ||
608 | ... | ||
609 | - atomic_rcu_set(&xyz, new_xyz); | ||
610 | + qatomic_rcu_set(&xyz, new_xyz); | ||
611 | } | ||
612 | qemu_lockcnt_unlock(&xyz_lockcnt); | ||
613 | |||
614 | @@ -XXX,XX +XXX,XX @@ qemu_lockcnt_dec: | ||
615 | |||
616 | qemu_lockcnt_inc(&xyz_lockcnt); | ||
617 | if (xyz) { | ||
618 | - XYZ *p = atomic_rcu_read(&xyz); | ||
619 | + XYZ *p = qatomic_rcu_read(&xyz); | ||
620 | ... | ||
621 | /* Accesses can now be done through "p". */ | ||
622 | } | ||
623 | @@ -XXX,XX +XXX,XX @@ the decrement, the locking and the check on count as follows: | ||
624 | |||
625 | qemu_lockcnt_inc(&xyz_lockcnt); | ||
626 | if (xyz) { | ||
627 | - XYZ *p = atomic_rcu_read(&xyz); | ||
628 | + XYZ *p = qatomic_rcu_read(&xyz); | ||
629 | ... | ||
630 | /* Accesses can now be done through "p". */ | ||
631 | } | ||
632 | diff --git a/docs/devel/rcu.txt b/docs/devel/rcu.txt | ||
633 | index XXXXXXX..XXXXXXX 100644 | ||
634 | --- a/docs/devel/rcu.txt | ||
635 | +++ b/docs/devel/rcu.txt | ||
636 | @@ -XXX,XX +XXX,XX @@ The core RCU API is small: | ||
637 | |||
638 | g_free_rcu(&foo, rcu); | ||
639 | |||
640 | - typeof(*p) atomic_rcu_read(p); | ||
641 | + typeof(*p) qatomic_rcu_read(p); | ||
642 | |||
643 | - atomic_rcu_read() is similar to atomic_load_acquire(), but it makes | ||
644 | + qatomic_rcu_read() is similar to qatomic_load_acquire(), but it makes | ||
645 | some assumptions on the code that calls it. This allows a more | ||
646 | optimized implementation. | ||
647 | |||
648 | - atomic_rcu_read assumes that whenever a single RCU critical | ||
649 | + qatomic_rcu_read assumes that whenever a single RCU critical | ||
650 | section reads multiple shared data, these reads are either | ||
651 | data-dependent or need no ordering. This is almost always the | ||
652 | case when using RCU, because read-side critical sections typically | ||
653 | @@ -XXX,XX +XXX,XX @@ The core RCU API is small: | ||
654 | every update) until reaching a data structure of interest, | ||
655 | and then read from there. | ||
656 | |||
657 | - RCU read-side critical sections must use atomic_rcu_read() to | ||
658 | + RCU read-side critical sections must use qatomic_rcu_read() to | ||
659 | read data, unless concurrent writes are prevented by another | ||
660 | synchronization mechanism. | ||
661 | |||
662 | @@ -XXX,XX +XXX,XX @@ The core RCU API is small: | ||
663 | data structure in a single direction, opposite to the direction | ||
664 | in which the updater initializes it. | ||
665 | |||
666 | - void atomic_rcu_set(p, typeof(*p) v); | ||
667 | + void qatomic_rcu_set(p, typeof(*p) v); | ||
668 | |||
669 | - atomic_rcu_set() is similar to atomic_store_release(), though it also | ||
670 | + qatomic_rcu_set() is similar to qatomic_store_release(), though it also | ||
671 | makes assumptions on the code that calls it in order to allow a more | ||
672 | optimized implementation. | ||
673 | |||
674 | - In particular, atomic_rcu_set() suffices for synchronization | ||
675 | + In particular, qatomic_rcu_set() suffices for synchronization | ||
676 | with readers, if the updater never mutates a field within a | ||
677 | data item that is already accessible to readers. This is the | ||
678 | case when initializing a new copy of the RCU-protected data | ||
679 | structure; just ensure that initialization of *p is carried out | ||
680 | - before atomic_rcu_set() makes the data item visible to readers. | ||
681 | + before qatomic_rcu_set() makes the data item visible to readers. | ||
682 | If this rule is observed, writes will happen in the opposite | ||
683 | order as reads in the RCU read-side critical sections (or if | ||
684 | there is just one update), and there will be no need for other | ||
685 | @@ -XXX,XX +XXX,XX @@ DIFFERENCES WITH LINUX | ||
686 | programming; not allowing this would prevent upgrading an RCU read-side | ||
687 | critical section to become an updater. | ||
688 | |||
689 | -- atomic_rcu_read and atomic_rcu_set replace rcu_dereference and | ||
690 | +- qatomic_rcu_read and qatomic_rcu_set replace rcu_dereference and | ||
691 | rcu_assign_pointer. They take a _pointer_ to the variable being accessed. | ||
692 | |||
693 | - call_rcu is a macro that has an extra argument (the name of the first | ||
694 | @@ -XXX,XX +XXX,XX @@ may be used as a restricted reference-counting mechanism. For example, | ||
695 | consider the following code fragment: | ||
696 | |||
697 | rcu_read_lock(); | ||
698 | - p = atomic_rcu_read(&foo); | ||
699 | + p = qatomic_rcu_read(&foo); | ||
700 | /* do something with p. */ | ||
701 | rcu_read_unlock(); | ||
702 | |||
703 | @@ -XXX,XX +XXX,XX @@ The write side looks simply like this (with appropriate locking): | ||
704 | |||
705 | qemu_mutex_lock(&foo_mutex); | ||
706 | old = foo; | ||
707 | - atomic_rcu_set(&foo, new); | ||
708 | + qatomic_rcu_set(&foo, new); | ||
709 | qemu_mutex_unlock(&foo_mutex); | ||
710 | synchronize_rcu(); | ||
711 | free(old); | ||
712 | @@ -XXX,XX +XXX,XX @@ If the processing cannot be done purely within the critical section, it | ||
713 | is possible to combine this idiom with a "real" reference count: | ||
714 | |||
715 | rcu_read_lock(); | ||
716 | - p = atomic_rcu_read(&foo); | ||
717 | + p = qatomic_rcu_read(&foo); | ||
718 | foo_ref(p); | ||
719 | rcu_read_unlock(); | ||
720 | /* do something with p. */ | ||
721 | @@ -XXX,XX +XXX,XX @@ The write side can be like this: | ||
722 | |||
723 | qemu_mutex_lock(&foo_mutex); | ||
724 | old = foo; | ||
725 | - atomic_rcu_set(&foo, new); | ||
726 | + qatomic_rcu_set(&foo, new); | ||
727 | qemu_mutex_unlock(&foo_mutex); | ||
728 | synchronize_rcu(); | ||
729 | foo_unref(old); | ||
730 | @@ -XXX,XX +XXX,XX @@ or with call_rcu: | ||
731 | |||
732 | qemu_mutex_lock(&foo_mutex); | ||
733 | old = foo; | ||
734 | - atomic_rcu_set(&foo, new); | ||
735 | + qatomic_rcu_set(&foo, new); | ||
736 | qemu_mutex_unlock(&foo_mutex); | ||
737 | call_rcu(foo_unref, old, rcu); | ||
738 | |||
739 | @@ -XXX,XX +XXX,XX @@ last reference may be dropped on the read side. Hence you can | ||
740 | use call_rcu() instead: | ||
741 | |||
742 | foo_unref(struct foo *p) { | ||
743 | - if (atomic_fetch_dec(&p->refcount) == 1) { | ||
744 | + if (qatomic_fetch_dec(&p->refcount) == 1) { | ||
745 | call_rcu(foo_destroy, p, rcu); | ||
746 | } | ||
747 | } | ||
748 | @@ -XXX,XX +XXX,XX @@ Instead, we store the size of the array with the array itself: | ||
749 | |||
750 | read side: | ||
751 | rcu_read_lock(); | ||
752 | - struct arr *array = atomic_rcu_read(&global_array); | ||
753 | + struct arr *array = qatomic_rcu_read(&global_array); | ||
754 | x = i < array->size ? array->data[i] : -1; | ||
755 | rcu_read_unlock(); | ||
756 | return x; | ||
757 | @@ -XXX,XX +XXX,XX @@ Instead, we store the size of the array with the array itself: | ||
758 | |||
759 | /* Removal phase. */ | ||
760 | old_array = global_array; | ||
761 | - atomic_rcu_set(&new_array->data, new_array); | ||
762 | + qatomic_rcu_set(&new_array->data, new_array); | ||
763 | synchronize_rcu(); | ||
764 | |||
765 | /* Reclamation phase. */ | ||
766 | diff --git a/accel/tcg/atomic_template.h b/accel/tcg/atomic_template.h | ||
767 | index XXXXXXX..XXXXXXX 100644 | ||
768 | --- a/accel/tcg/atomic_template.h | ||
769 | +++ b/accel/tcg/atomic_template.h | ||
770 | @@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, | ||
771 | #if DATA_SIZE == 16 | ||
772 | ret = atomic16_cmpxchg(haddr, cmpv, newv); | ||
773 | #else | ||
774 | - ret = atomic_cmpxchg__nocheck(haddr, cmpv, newv); | ||
775 | + ret = qatomic_cmpxchg__nocheck(haddr, cmpv, newv); | ||
776 | #endif | ||
777 | ATOMIC_MMU_CLEANUP; | ||
778 | atomic_trace_rmw_post(env, addr, info); | ||
779 | @@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, | ||
780 | ATOMIC_MMU_IDX); | ||
781 | |||
782 | atomic_trace_rmw_pre(env, addr, info); | ||
783 | - ret = atomic_xchg__nocheck(haddr, val); | ||
784 | + ret = qatomic_xchg__nocheck(haddr, val); | ||
785 | ATOMIC_MMU_CLEANUP; | ||
786 | atomic_trace_rmw_post(env, addr, info); | ||
787 | return ret; | ||
788 | @@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ | ||
789 | uint16_t info = trace_mem_build_info(SHIFT, false, 0, false, \ | ||
790 | ATOMIC_MMU_IDX); \ | ||
791 | atomic_trace_rmw_pre(env, addr, info); \ | ||
792 | - ret = atomic_##X(haddr, val); \ | ||
793 | + ret = qatomic_##X(haddr, val); \ | ||
794 | ATOMIC_MMU_CLEANUP; \ | ||
795 | atomic_trace_rmw_post(env, addr, info); \ | ||
796 | return ret; \ | ||
797 | @@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ | ||
798 | ATOMIC_MMU_IDX); \ | ||
799 | atomic_trace_rmw_pre(env, addr, info); \ | ||
800 | smp_mb(); \ | ||
801 | - cmp = atomic_read__nocheck(haddr); \ | ||
802 | + cmp = qatomic_read__nocheck(haddr); \ | ||
803 | do { \ | ||
804 | old = cmp; new = FN(old, val); \ | ||
805 | - cmp = atomic_cmpxchg__nocheck(haddr, old, new); \ | ||
806 | + cmp = qatomic_cmpxchg__nocheck(haddr, old, new); \ | ||
807 | } while (cmp != old); \ | ||
808 | ATOMIC_MMU_CLEANUP; \ | ||
809 | atomic_trace_rmw_post(env, addr, info); \ | ||
810 | @@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, | ||
811 | #if DATA_SIZE == 16 | ||
812 | ret = atomic16_cmpxchg(haddr, BSWAP(cmpv), BSWAP(newv)); | ||
813 | #else | ||
814 | - ret = atomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv)); | ||
815 | + ret = qatomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv)); | ||
816 | #endif | ||
817 | ATOMIC_MMU_CLEANUP; | ||
818 | atomic_trace_rmw_post(env, addr, info); | ||
819 | @@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, | ||
820 | ATOMIC_MMU_IDX); | ||
821 | |||
822 | atomic_trace_rmw_pre(env, addr, info); | ||
823 | - ret = atomic_xchg__nocheck(haddr, BSWAP(val)); | ||
824 | + ret = qatomic_xchg__nocheck(haddr, BSWAP(val)); | ||
825 | ATOMIC_MMU_CLEANUP; | ||
826 | atomic_trace_rmw_post(env, addr, info); | ||
827 | return BSWAP(ret); | ||
828 | @@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ | ||
829 | uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, \ | ||
830 | false, ATOMIC_MMU_IDX); \ | ||
831 | atomic_trace_rmw_pre(env, addr, info); \ | ||
832 | - ret = atomic_##X(haddr, BSWAP(val)); \ | ||
833 | + ret = qatomic_##X(haddr, BSWAP(val)); \ | ||
834 | ATOMIC_MMU_CLEANUP; \ | ||
835 | atomic_trace_rmw_post(env, addr, info); \ | ||
836 | return BSWAP(ret); \ | ||
837 | @@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ | ||
838 | false, ATOMIC_MMU_IDX); \ | ||
839 | atomic_trace_rmw_pre(env, addr, info); \ | ||
840 | smp_mb(); \ | ||
841 | - ldn = atomic_read__nocheck(haddr); \ | ||
842 | + ldn = qatomic_read__nocheck(haddr); \ | ||
843 | do { \ | ||
844 | ldo = ldn; old = BSWAP(ldo); new = FN(old, val); \ | ||
845 | - ldn = atomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new)); \ | ||
846 | + ldn = qatomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new)); \ | ||
847 | } while (ldo != ldn); \ | ||
848 | ATOMIC_MMU_CLEANUP; \ | ||
849 | atomic_trace_rmw_post(env, addr, info); \ | ||
850 | diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h | ||
851 | index XXXXXXX..XXXXXXX 100644 | ||
852 | --- a/include/block/aio-wait.h | ||
853 | +++ b/include/block/aio-wait.h | ||
854 | @@ -XXX,XX +XXX,XX @@ extern AioWait global_aio_wait; | ||
855 | AioWait *wait_ = &global_aio_wait; \ | ||
856 | AioContext *ctx_ = (ctx); \ | ||
857 | /* Increment wait_->num_waiters before evaluating cond. */ \ | ||
858 | - atomic_inc(&wait_->num_waiters); \ | ||
859 | + qatomic_inc(&wait_->num_waiters); \ | ||
860 | if (ctx_ && in_aio_context_home_thread(ctx_)) { \ | ||
861 | while ((cond)) { \ | ||
862 | aio_poll(ctx_, true); \ | ||
863 | @@ -XXX,XX +XXX,XX @@ extern AioWait global_aio_wait; | ||
864 | waited_ = true; \ | ||
865 | } \ | ||
866 | } \ | ||
867 | - atomic_dec(&wait_->num_waiters); \ | ||
868 | + qatomic_dec(&wait_->num_waiters); \ | ||
869 | waited_; }) | ||
870 | |||
871 | /** | ||
872 | diff --git a/include/block/aio.h b/include/block/aio.h | ||
873 | index XXXXXXX..XXXXXXX 100644 | ||
874 | --- a/include/block/aio.h | ||
875 | +++ b/include/block/aio.h | ||
876 | @@ -XXX,XX +XXX,XX @@ int64_t aio_compute_timeout(AioContext *ctx); | ||
877 | */ | ||
878 | static inline void aio_disable_external(AioContext *ctx) | ||
879 | { | ||
880 | - atomic_inc(&ctx->external_disable_cnt); | ||
881 | + qatomic_inc(&ctx->external_disable_cnt); | ||
882 | } | ||
883 | |||
884 | /** | ||
885 | @@ -XXX,XX +XXX,XX @@ static inline void aio_enable_external(AioContext *ctx) | ||
886 | { | ||
887 | int old; | ||
888 | |||
889 | - old = atomic_fetch_dec(&ctx->external_disable_cnt); | ||
890 | + old = qatomic_fetch_dec(&ctx->external_disable_cnt); | ||
891 | assert(old > 0); | ||
892 | if (old == 1) { | ||
893 | /* Kick event loop so it re-arms file descriptors */ | ||
894 | @@ -XXX,XX +XXX,XX @@ static inline void aio_enable_external(AioContext *ctx) | ||
895 | */ | ||
896 | static inline bool aio_external_disabled(AioContext *ctx) | ||
897 | { | ||
898 | - return atomic_read(&ctx->external_disable_cnt); | ||
899 | + return qatomic_read(&ctx->external_disable_cnt); | ||
900 | } | ||
901 | |||
902 | /** | ||
903 | @@ -XXX,XX +XXX,XX @@ static inline bool aio_external_disabled(AioContext *ctx) | ||
904 | */ | ||
905 | static inline bool aio_node_check(AioContext *ctx, bool is_external) | ||
906 | { | ||
907 | - return !is_external || !atomic_read(&ctx->external_disable_cnt); | ||
908 | + return !is_external || !qatomic_read(&ctx->external_disable_cnt); | ||
909 | } | ||
910 | |||
911 | /** | ||
912 | diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h | ||
913 | index XXXXXXX..XXXXXXX 100644 | ||
914 | --- a/include/exec/cpu_ldst.h | ||
915 | +++ b/include/exec/cpu_ldst.h | ||
916 | @@ -XXX,XX +XXX,XX @@ static inline target_ulong tlb_addr_write(const CPUTLBEntry *entry) | ||
917 | #if TCG_OVERSIZED_GUEST | ||
918 | return entry->addr_write; | ||
919 | #else | ||
920 | - return atomic_read(&entry->addr_write); | ||
921 | + return qatomic_read(&entry->addr_write); | ||
922 | #endif | ||
923 | } | ||
924 | |||
925 | diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h | ||
926 | index XXXXXXX..XXXXXXX 100644 | ||
927 | --- a/include/exec/exec-all.h | ||
928 | +++ b/include/exec/exec-all.h | ||
929 | @@ -XXX,XX +XXX,XX @@ void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc); | ||
930 | */ | ||
931 | static inline bool cpu_loop_exit_requested(CPUState *cpu) | ||
932 | { | ||
933 | - return (int32_t)atomic_read(&cpu_neg(cpu)->icount_decr.u32) < 0; | ||
934 | + return (int32_t)qatomic_read(&cpu_neg(cpu)->icount_decr.u32) < 0; | ||
935 | } | ||
936 | |||
937 | #if !defined(CONFIG_USER_ONLY) | ||
938 | @@ -XXX,XX +XXX,XX @@ struct TranslationBlock { | ||
939 | |||
940 | extern bool parallel_cpus; | ||
941 | |||
942 | -/* Hide the atomic_read to make code a little easier on the eyes */ | ||
943 | +/* Hide the qatomic_read to make code a little easier on the eyes */ | ||
944 | static inline uint32_t tb_cflags(const TranslationBlock *tb) | ||
945 | { | ||
946 | - return atomic_read(&tb->cflags); | ||
947 | + return qatomic_read(&tb->cflags); | ||
948 | } | ||
949 | |||
950 | /* current cflags for hashing/comparison */ | ||
951 | diff --git a/include/exec/log.h b/include/exec/log.h | ||
952 | index XXXXXXX..XXXXXXX 100644 | ||
953 | --- a/include/exec/log.h | ||
954 | +++ b/include/exec/log.h | ||
955 | @@ -XXX,XX +XXX,XX @@ static inline void log_cpu_state(CPUState *cpu, int flags) | ||
956 | |||
957 | if (qemu_log_enabled()) { | ||
958 | rcu_read_lock(); | ||
959 | - logfile = atomic_rcu_read(&qemu_logfile); | ||
960 | + logfile = qatomic_rcu_read(&qemu_logfile); | ||
961 | if (logfile) { | ||
962 | cpu_dump_state(cpu, logfile->fd, flags); | ||
963 | } | ||
964 | @@ -XXX,XX +XXX,XX @@ static inline void log_target_disas(CPUState *cpu, target_ulong start, | ||
965 | { | ||
966 | QemuLogFile *logfile; | ||
967 | rcu_read_lock(); | ||
968 | - logfile = atomic_rcu_read(&qemu_logfile); | ||
969 | + logfile = qatomic_rcu_read(&qemu_logfile); | ||
970 | if (logfile) { | ||
971 | target_disas(logfile->fd, cpu, start, len); | ||
972 | } | ||
973 | @@ -XXX,XX +XXX,XX @@ static inline void log_disas(void *code, unsigned long size, const char *note) | ||
974 | { | ||
975 | QemuLogFile *logfile; | ||
976 | rcu_read_lock(); | ||
977 | - logfile = atomic_rcu_read(&qemu_logfile); | ||
978 | + logfile = qatomic_rcu_read(&qemu_logfile); | ||
979 | if (logfile) { | ||
980 | disas(logfile->fd, code, size, note); | ||
981 | } | ||
982 | diff --git a/include/exec/memory.h b/include/exec/memory.h | ||
983 | index XXXXXXX..XXXXXXX 100644 | ||
984 | --- a/include/exec/memory.h | ||
985 | +++ b/include/exec/memory.h | ||
986 | @@ -XXX,XX +XXX,XX @@ struct FlatView { | ||
987 | |||
988 | static inline FlatView *address_space_to_flatview(AddressSpace *as) | ||
989 | { | ||
990 | - return atomic_rcu_read(&as->current_map); | ||
991 | + return qatomic_rcu_read(&as->current_map); | ||
992 | } | ||
993 | |||
994 | |||
995 | diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h | ||
996 | index XXXXXXX..XXXXXXX 100644 | ||
997 | --- a/include/exec/ram_addr.h | ||
998 | +++ b/include/exec/ram_addr.h | ||
999 | @@ -XXX,XX +XXX,XX @@ static inline bool cpu_physical_memory_get_dirty(ram_addr_t start, | ||
1000 | page = start >> TARGET_PAGE_BITS; | ||
1001 | |||
1002 | WITH_RCU_READ_LOCK_GUARD() { | ||
1003 | - blocks = atomic_rcu_read(&ram_list.dirty_memory[client]); | ||
1004 | + blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]); | ||
1005 | |||
1006 | idx = page / DIRTY_MEMORY_BLOCK_SIZE; | ||
1007 | offset = page % DIRTY_MEMORY_BLOCK_SIZE; | ||
1008 | @@ -XXX,XX +XXX,XX @@ static inline bool cpu_physical_memory_all_dirty(ram_addr_t start, | ||
1009 | |||
1010 | RCU_READ_LOCK_GUARD(); | ||
1011 | |||
1012 | - blocks = atomic_rcu_read(&ram_list.dirty_memory[client]); | ||
1013 | + blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]); | ||
1014 | |||
1015 | idx = page / DIRTY_MEMORY_BLOCK_SIZE; | ||
1016 | offset = page % DIRTY_MEMORY_BLOCK_SIZE; | ||
1017 | @@ -XXX,XX +XXX,XX @@ static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr, | ||
1018 | |||
1019 | RCU_READ_LOCK_GUARD(); | ||
1020 | |||
1021 | - blocks = atomic_rcu_read(&ram_list.dirty_memory[client]); | ||
1022 | + blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]); | ||
1023 | |||
1024 | set_bit_atomic(offset, blocks->blocks[idx]); | ||
1025 | } | ||
1026 | @@ -XXX,XX +XXX,XX @@ static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start, | ||
1027 | |||
1028 | WITH_RCU_READ_LOCK_GUARD() { | ||
1029 | for (i = 0; i < DIRTY_MEMORY_NUM; i++) { | ||
1030 | - blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i]); | ||
1031 | + blocks[i] = qatomic_rcu_read(&ram_list.dirty_memory[i]); | ||
1032 | } | ||
1033 | |||
1034 | idx = page / DIRTY_MEMORY_BLOCK_SIZE; | ||
1035 | @@ -XXX,XX +XXX,XX @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap, | ||
1036 | |||
1037 | WITH_RCU_READ_LOCK_GUARD() { | ||
1038 | for (i = 0; i < DIRTY_MEMORY_NUM; i++) { | ||
1039 | - blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i])->blocks; | ||
1040 | + blocks[i] = | ||
1041 | + qatomic_rcu_read(&ram_list.dirty_memory[i])->blocks; | ||
1042 | } | ||
1043 | |||
1044 | for (k = 0; k < nr; k++) { | ||
1045 | if (bitmap[k]) { | ||
1046 | unsigned long temp = leul_to_cpu(bitmap[k]); | ||
1047 | |||
1048 | - atomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp); | ||
1049 | + qatomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp); | ||
1050 | |||
1051 | if (global_dirty_log) { | ||
1052 | - atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset], | ||
1053 | - temp); | ||
1054 | + qatomic_or( | ||
1055 | + &blocks[DIRTY_MEMORY_MIGRATION][idx][offset], | ||
1056 | + temp); | ||
1057 | } | ||
1058 | |||
1059 | if (tcg_enabled()) { | ||
1060 | - atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset], | ||
1061 | - temp); | ||
1062 | + qatomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset], | ||
1063 | + temp); | ||
1064 | } | ||
1065 | } | ||
1066 | |||
1067 | @@ -XXX,XX +XXX,XX @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb, | ||
1068 | DIRTY_MEMORY_BLOCK_SIZE); | ||
1069 | unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS); | ||
1070 | |||
1071 | - src = atomic_rcu_read( | ||
1072 | + src = qatomic_rcu_read( | ||
1073 | &ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks; | ||
1074 | |||
1075 | for (k = page; k < page + nr; k++) { | ||
1076 | if (src[idx][offset]) { | ||
1077 | - unsigned long bits = atomic_xchg(&src[idx][offset], 0); | ||
1078 | + unsigned long bits = qatomic_xchg(&src[idx][offset], 0); | ||
1079 | unsigned long new_dirty; | ||
1080 | new_dirty = ~dest[k]; | ||
1081 | dest[k] |= bits; | ||
1082 | diff --git a/include/exec/ramlist.h b/include/exec/ramlist.h | ||
1083 | index XXXXXXX..XXXXXXX 100644 | ||
1084 | --- a/include/exec/ramlist.h | ||
1085 | +++ b/include/exec/ramlist.h | ||
1086 | @@ -XXX,XX +XXX,XX @@ typedef struct RAMBlockNotifier RAMBlockNotifier; | ||
1087 | * rcu_read_lock(); | ||
1088 | * | ||
1089 | * DirtyMemoryBlocks *blocks = | ||
1090 | - * atomic_rcu_read(&ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION]); | ||
1091 | + * qatomic_rcu_read(&ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION]); | ||
1092 | * | ||
1093 | * ram_addr_t idx = (addr >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE; | ||
1094 | * unsigned long *block = blocks.blocks[idx]; | ||
1095 | diff --git a/include/exec/tb-lookup.h b/include/exec/tb-lookup.h | ||
1096 | index XXXXXXX..XXXXXXX 100644 | ||
1097 | --- a/include/exec/tb-lookup.h | ||
1098 | +++ b/include/exec/tb-lookup.h | ||
1099 | @@ -XXX,XX +XXX,XX @@ tb_lookup__cpu_state(CPUState *cpu, target_ulong *pc, target_ulong *cs_base, | ||
1100 | |||
1101 | cpu_get_tb_cpu_state(env, pc, cs_base, flags); | ||
1102 | hash = tb_jmp_cache_hash_func(*pc); | ||
1103 | - tb = atomic_rcu_read(&cpu->tb_jmp_cache[hash]); | ||
1104 | + tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]); | ||
1105 | |||
1106 | cf_mask &= ~CF_CLUSTER_MASK; | ||
1107 | cf_mask |= cpu->cluster_index << CF_CLUSTER_SHIFT; | ||
1108 | @@ -XXX,XX +XXX,XX @@ tb_lookup__cpu_state(CPUState *cpu, target_ulong *pc, target_ulong *cs_base, | ||
1109 | if (tb == NULL) { | ||
1110 | return NULL; | ||
1111 | } | ||
1112 | - atomic_set(&cpu->tb_jmp_cache[hash], tb); | ||
1113 | + qatomic_set(&cpu->tb_jmp_cache[hash], tb); | ||
1114 | return tb; | ||
1115 | } | ||
1116 | |||
1117 | diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h | ||
1118 | index XXXXXXX..XXXXXXX 100644 | ||
1119 | --- a/include/hw/core/cpu.h | ||
1120 | +++ b/include/hw/core/cpu.h | ||
1121 | @@ -XXX,XX +XXX,XX @@ static inline void cpu_tb_jmp_cache_clear(CPUState *cpu) | ||
1122 | unsigned int i; | ||
1123 | |||
1124 | for (i = 0; i < TB_JMP_CACHE_SIZE; i++) { | ||
1125 | - atomic_set(&cpu->tb_jmp_cache[i], NULL); | ||
1126 | + qatomic_set(&cpu->tb_jmp_cache[i], NULL); | ||
1127 | } | ||
1128 | } | ||
1129 | |||
1130 | diff --git a/include/qemu/atomic128.h b/include/qemu/atomic128.h | ||
1131 | index XXXXXXX..XXXXXXX 100644 | ||
1132 | --- a/include/qemu/atomic128.h | ||
1133 | +++ b/include/qemu/atomic128.h | ||
1134 | @@ -XXX,XX +XXX,XX @@ | ||
1135 | #if defined(CONFIG_ATOMIC128) | ||
1136 | static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new) | ||
1137 | { | ||
1138 | - return atomic_cmpxchg__nocheck(ptr, cmp, new); | ||
1139 | + return qatomic_cmpxchg__nocheck(ptr, cmp, new); | ||
1140 | } | ||
1141 | # define HAVE_CMPXCHG128 1 | ||
1142 | #elif defined(CONFIG_CMPXCHG128) | ||
1143 | @@ -XXX,XX +XXX,XX @@ Int128 QEMU_ERROR("unsupported atomic") | ||
1144 | #if defined(CONFIG_ATOMIC128) | ||
1145 | static inline Int128 atomic16_read(Int128 *ptr) | ||
1146 | { | ||
1147 | - return atomic_read__nocheck(ptr); | ||
1148 | + return qatomic_read__nocheck(ptr); | ||
1149 | } | ||
1150 | |||
1151 | static inline void atomic16_set(Int128 *ptr, Int128 val) | ||
1152 | { | ||
1153 | - atomic_set__nocheck(ptr, val); | ||
1154 | + qatomic_set__nocheck(ptr, val); | ||
1155 | } | ||
1156 | |||
1157 | # define HAVE_ATOMIC128 1 | ||
1158 | diff --git a/include/qemu/bitops.h b/include/qemu/bitops.h | ||
1159 | index XXXXXXX..XXXXXXX 100644 | ||
1160 | --- a/include/qemu/bitops.h | ||
1161 | +++ b/include/qemu/bitops.h | ||
1162 | @@ -XXX,XX +XXX,XX @@ static inline void set_bit_atomic(long nr, unsigned long *addr) | ||
1163 | unsigned long mask = BIT_MASK(nr); | ||
1164 | unsigned long *p = addr + BIT_WORD(nr); | ||
1165 | |||
1166 | - atomic_or(p, mask); | ||
1167 | + qatomic_or(p, mask); | ||
1168 | } | ||
1169 | |||
1170 | /** | ||
1171 | diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h | ||
1172 | index XXXXXXX..XXXXXXX 100644 | ||
1173 | --- a/include/qemu/coroutine.h | ||
1174 | +++ b/include/qemu/coroutine.h | ||
1175 | @@ -XXX,XX +XXX,XX @@ static inline coroutine_fn void qemu_co_mutex_assert_locked(CoMutex *mutex) | ||
1176 | * because the condition will be false no matter whether we read NULL or | ||
1177 | * the pointer for any other coroutine. | ||
1178 | */ | ||
1179 | - assert(atomic_read(&mutex->locked) && | ||
1180 | + assert(qatomic_read(&mutex->locked) && | ||
1181 | mutex->holder == qemu_coroutine_self()); | ||
1182 | } | ||
1183 | |||
1184 | diff --git a/include/qemu/log.h b/include/qemu/log.h | ||
1185 | index XXXXXXX..XXXXXXX 100644 | ||
1186 | --- a/include/qemu/log.h | ||
1187 | +++ b/include/qemu/log.h | ||
1188 | @@ -XXX,XX +XXX,XX @@ static inline bool qemu_log_separate(void) | ||
1189 | bool res = false; | ||
1190 | |||
1191 | rcu_read_lock(); | ||
1192 | - logfile = atomic_rcu_read(&qemu_logfile); | ||
1193 | + logfile = qatomic_rcu_read(&qemu_logfile); | ||
1194 | if (logfile && logfile->fd != stderr) { | ||
1195 | res = true; | ||
1196 | } | ||
1197 | @@ -XXX,XX +XXX,XX @@ static inline FILE *qemu_log_lock(void) | ||
1198 | { | ||
1199 | QemuLogFile *logfile; | ||
1200 | rcu_read_lock(); | ||
1201 | - logfile = atomic_rcu_read(&qemu_logfile); | ||
1202 | + logfile = qatomic_rcu_read(&qemu_logfile); | ||
1203 | if (logfile) { | ||
1204 | qemu_flockfile(logfile->fd); | ||
1205 | return logfile->fd; | ||
1206 | @@ -XXX,XX +XXX,XX @@ qemu_log_vprintf(const char *fmt, va_list va) | ||
1207 | QemuLogFile *logfile; | ||
1208 | |||
1209 | rcu_read_lock(); | ||
1210 | - logfile = atomic_rcu_read(&qemu_logfile); | ||
1211 | + logfile = qatomic_rcu_read(&qemu_logfile); | ||
1212 | if (logfile) { | ||
1213 | vfprintf(logfile->fd, fmt, va); | ||
1214 | } | ||
1215 | diff --git a/include/qemu/queue.h b/include/qemu/queue.h | ||
1216 | index XXXXXXX..XXXXXXX 100644 | ||
1217 | --- a/include/qemu/queue.h | ||
1218 | +++ b/include/qemu/queue.h | ||
1219 | @@ -XXX,XX +XXX,XX @@ struct { \ | ||
1220 | typeof(elm) save_sle_next; \ | ||
1221 | do { \ | ||
1222 | save_sle_next = (elm)->field.sle_next = (head)->slh_first; \ | ||
1223 | - } while (atomic_cmpxchg(&(head)->slh_first, save_sle_next, (elm)) != \ | ||
1224 | + } while (qatomic_cmpxchg(&(head)->slh_first, save_sle_next, (elm)) !=\ | ||
1225 | save_sle_next); \ | ||
1226 | } while (/*CONSTCOND*/0) | ||
1227 | |||
1228 | #define QSLIST_MOVE_ATOMIC(dest, src) do { \ | ||
1229 | - (dest)->slh_first = atomic_xchg(&(src)->slh_first, NULL); \ | ||
1230 | + (dest)->slh_first = qatomic_xchg(&(src)->slh_first, NULL); \ | ||
1231 | } while (/*CONSTCOND*/0) | ||
1232 | |||
1233 | #define QSLIST_REMOVE_HEAD(head, field) do { \ | ||
1234 | @@ -XXX,XX +XXX,XX @@ struct { \ | ||
1235 | /* | ||
1236 | * Simple queue access methods. | ||
1237 | */ | ||
1238 | -#define QSIMPLEQ_EMPTY_ATOMIC(head) (atomic_read(&((head)->sqh_first)) == NULL) | ||
1239 | +#define QSIMPLEQ_EMPTY_ATOMIC(head) \ | ||
1240 | + (qatomic_read(&((head)->sqh_first)) == NULL) | ||
1241 | #define QSIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL) | ||
1242 | #define QSIMPLEQ_FIRST(head) ((head)->sqh_first) | ||
1243 | #define QSIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next) | ||
1244 | diff --git a/include/qemu/rcu.h b/include/qemu/rcu.h | ||
1245 | index XXXXXXX..XXXXXXX 100644 | ||
1246 | --- a/include/qemu/rcu.h | ||
1247 | +++ b/include/qemu/rcu.h | ||
1248 | @@ -XXX,XX +XXX,XX @@ static inline void rcu_read_lock(void) | ||
1249 | return; | ||
1250 | } | ||
1251 | |||
1252 | - ctr = atomic_read(&rcu_gp_ctr); | ||
1253 | - atomic_set(&p_rcu_reader->ctr, ctr); | ||
1254 | + ctr = qatomic_read(&rcu_gp_ctr); | ||
1255 | + qatomic_set(&p_rcu_reader->ctr, ctr); | ||
1256 | |||
1257 | /* Write p_rcu_reader->ctr before reading RCU-protected pointers. */ | ||
1258 | smp_mb_placeholder(); | ||
1259 | @@ -XXX,XX +XXX,XX @@ static inline void rcu_read_unlock(void) | ||
1260 | * smp_mb_placeholder(), this ensures writes to p_rcu_reader->ctr | ||
1261 | * are sequentially consistent. | ||
1262 | */ | ||
1263 | - atomic_store_release(&p_rcu_reader->ctr, 0); | ||
1264 | + qatomic_store_release(&p_rcu_reader->ctr, 0); | ||
1265 | |||
1266 | /* Write p_rcu_reader->ctr before reading p_rcu_reader->waiting. */ | ||
1267 | smp_mb_placeholder(); | ||
1268 | - if (unlikely(atomic_read(&p_rcu_reader->waiting))) { | ||
1269 | - atomic_set(&p_rcu_reader->waiting, false); | ||
1270 | + if (unlikely(qatomic_read(&p_rcu_reader->waiting))) { | ||
1271 | + qatomic_set(&p_rcu_reader->waiting, false); | ||
1272 | qemu_event_set(&rcu_gp_event); | ||
1273 | } | ||
1274 | } | ||
1275 | diff --git a/include/qemu/rcu_queue.h b/include/qemu/rcu_queue.h | ||
1276 | index XXXXXXX..XXXXXXX 100644 | ||
1277 | --- a/include/qemu/rcu_queue.h | ||
1278 | +++ b/include/qemu/rcu_queue.h | ||
1279 | @@ -XXX,XX +XXX,XX @@ extern "C" { | ||
1280 | /* | ||
1281 | * List access methods. | ||
1282 | */ | ||
1283 | -#define QLIST_EMPTY_RCU(head) (atomic_read(&(head)->lh_first) == NULL) | ||
1284 | -#define QLIST_FIRST_RCU(head) (atomic_rcu_read(&(head)->lh_first)) | ||
1285 | -#define QLIST_NEXT_RCU(elm, field) (atomic_rcu_read(&(elm)->field.le_next)) | ||
1286 | +#define QLIST_EMPTY_RCU(head) (qatomic_read(&(head)->lh_first) == NULL) | ||
1287 | +#define QLIST_FIRST_RCU(head) (qatomic_rcu_read(&(head)->lh_first)) | ||
1288 | +#define QLIST_NEXT_RCU(elm, field) (qatomic_rcu_read(&(elm)->field.le_next)) | ||
1289 | |||
1290 | /* | ||
1291 | * List functions. | ||
1292 | @@ -XXX,XX +XXX,XX @@ extern "C" { | ||
1293 | |||
1294 | |||
1295 | /* | ||
1296 | - * The difference between atomic_read/set and atomic_rcu_read/set | ||
1297 | + * The difference between qatomic_read/set and qatomic_rcu_read/set | ||
1298 | * is in the including of a read/write memory barrier to the volatile | ||
1299 | * access. atomic_rcu_* macros include the memory barrier, the | ||
1300 | * plain atomic macros do not. Therefore, it should be correct to | ||
1301 | @@ -XXX,XX +XXX,XX @@ extern "C" { | ||
1302 | #define QLIST_INSERT_AFTER_RCU(listelm, elm, field) do { \ | ||
1303 | (elm)->field.le_next = (listelm)->field.le_next; \ | ||
1304 | (elm)->field.le_prev = &(listelm)->field.le_next; \ | ||
1305 | - atomic_rcu_set(&(listelm)->field.le_next, (elm)); \ | ||
1306 | + qatomic_rcu_set(&(listelm)->field.le_next, (elm)); \ | ||
1307 | if ((elm)->field.le_next != NULL) { \ | ||
1308 | (elm)->field.le_next->field.le_prev = \ | ||
1309 | &(elm)->field.le_next; \ | ||
1310 | @@ -XXX,XX +XXX,XX @@ extern "C" { | ||
1311 | #define QLIST_INSERT_BEFORE_RCU(listelm, elm, field) do { \ | ||
1312 | (elm)->field.le_prev = (listelm)->field.le_prev; \ | ||
1313 | (elm)->field.le_next = (listelm); \ | ||
1314 | - atomic_rcu_set((listelm)->field.le_prev, (elm)); \ | ||
1315 | + qatomic_rcu_set((listelm)->field.le_prev, (elm)); \ | ||
1316 | (listelm)->field.le_prev = &(elm)->field.le_next; \ | ||
1317 | } while (/*CONSTCOND*/0) | ||
1318 | |||
1319 | @@ -XXX,XX +XXX,XX @@ extern "C" { | ||
1320 | #define QLIST_INSERT_HEAD_RCU(head, elm, field) do { \ | ||
1321 | (elm)->field.le_prev = &(head)->lh_first; \ | ||
1322 | (elm)->field.le_next = (head)->lh_first; \ | ||
1323 | - atomic_rcu_set((&(head)->lh_first), (elm)); \ | ||
1324 | + qatomic_rcu_set((&(head)->lh_first), (elm)); \ | ||
1325 | if ((elm)->field.le_next != NULL) { \ | ||
1326 | (elm)->field.le_next->field.le_prev = \ | ||
1327 | &(elm)->field.le_next; \ | ||
1328 | @@ -XXX,XX +XXX,XX @@ extern "C" { | ||
1329 | (elm)->field.le_next->field.le_prev = \ | ||
1330 | (elm)->field.le_prev; \ | ||
1331 | } \ | ||
1332 | - atomic_set((elm)->field.le_prev, (elm)->field.le_next); \ | ||
1333 | + qatomic_set((elm)->field.le_prev, (elm)->field.le_next); \ | ||
1334 | } while (/*CONSTCOND*/0) | ||
1335 | |||
1336 | /* List traversal must occur within an RCU critical section. */ | ||
1337 | #define QLIST_FOREACH_RCU(var, head, field) \ | ||
1338 | - for ((var) = atomic_rcu_read(&(head)->lh_first); \ | ||
1339 | + for ((var) = qatomic_rcu_read(&(head)->lh_first); \ | ||
1340 | (var); \ | ||
1341 | - (var) = atomic_rcu_read(&(var)->field.le_next)) | ||
1342 | + (var) = qatomic_rcu_read(&(var)->field.le_next)) | ||
1343 | |||
1344 | /* List traversal must occur within an RCU critical section. */ | ||
1345 | #define QLIST_FOREACH_SAFE_RCU(var, head, field, next_var) \ | ||
1346 | - for ((var) = (atomic_rcu_read(&(head)->lh_first)); \ | ||
1347 | + for ((var) = (qatomic_rcu_read(&(head)->lh_first)); \ | ||
1348 | (var) && \ | ||
1349 | - ((next_var) = atomic_rcu_read(&(var)->field.le_next), 1); \ | ||
1350 | + ((next_var) = qatomic_rcu_read(&(var)->field.le_next), 1); \ | ||
1351 | (var) = (next_var)) | ||
1352 | |||
1353 | /* | ||
1354 | @@ -XXX,XX +XXX,XX @@ extern "C" { | ||
1355 | */ | ||
1356 | |||
1357 | /* Simple queue access methods */ | ||
1358 | -#define QSIMPLEQ_EMPTY_RCU(head) (atomic_read(&(head)->sqh_first) == NULL) | ||
1359 | -#define QSIMPLEQ_FIRST_RCU(head) atomic_rcu_read(&(head)->sqh_first) | ||
1360 | -#define QSIMPLEQ_NEXT_RCU(elm, field) atomic_rcu_read(&(elm)->field.sqe_next) | ||
1361 | +#define QSIMPLEQ_EMPTY_RCU(head) \ | ||
1362 | + (qatomic_read(&(head)->sqh_first) == NULL) | ||
1363 | +#define QSIMPLEQ_FIRST_RCU(head) qatomic_rcu_read(&(head)->sqh_first) | ||
1364 | +#define QSIMPLEQ_NEXT_RCU(elm, field) qatomic_rcu_read(&(elm)->field.sqe_next) | ||
1365 | |||
1366 | /* Simple queue functions */ | ||
1367 | #define QSIMPLEQ_INSERT_HEAD_RCU(head, elm, field) do { \ | ||
1368 | @@ -XXX,XX +XXX,XX @@ extern "C" { | ||
1369 | if ((elm)->field.sqe_next == NULL) { \ | ||
1370 | (head)->sqh_last = &(elm)->field.sqe_next; \ | ||
1371 | } \ | ||
1372 | - atomic_rcu_set(&(head)->sqh_first, (elm)); \ | ||
1373 | + qatomic_rcu_set(&(head)->sqh_first, (elm)); \ | ||
1374 | } while (/*CONSTCOND*/0) | ||
1375 | |||
1376 | #define QSIMPLEQ_INSERT_TAIL_RCU(head, elm, field) do { \ | ||
1377 | (elm)->field.sqe_next = NULL; \ | ||
1378 | - atomic_rcu_set((head)->sqh_last, (elm)); \ | ||
1379 | + qatomic_rcu_set((head)->sqh_last, (elm)); \ | ||
1380 | (head)->sqh_last = &(elm)->field.sqe_next; \ | ||
1381 | } while (/*CONSTCOND*/0) | ||
1382 | |||
1383 | @@ -XXX,XX +XXX,XX @@ extern "C" { | ||
1384 | if ((elm)->field.sqe_next == NULL) { \ | ||
1385 | (head)->sqh_last = &(elm)->field.sqe_next; \ | ||
1386 | } \ | ||
1387 | - atomic_rcu_set(&(listelm)->field.sqe_next, (elm)); \ | ||
1388 | + qatomic_rcu_set(&(listelm)->field.sqe_next, (elm)); \ | ||
1389 | } while (/*CONSTCOND*/0) | ||
1390 | |||
1391 | #define QSIMPLEQ_REMOVE_HEAD_RCU(head, field) do { \ | ||
1392 | - atomic_set(&(head)->sqh_first, (head)->sqh_first->field.sqe_next); \ | ||
1393 | + qatomic_set(&(head)->sqh_first, (head)->sqh_first->field.sqe_next);\ | ||
1394 | if ((head)->sqh_first == NULL) { \ | ||
1395 | (head)->sqh_last = &(head)->sqh_first; \ | ||
1396 | } \ | ||
1397 | @@ -XXX,XX +XXX,XX @@ extern "C" { | ||
1398 | while (curr->field.sqe_next != (elm)) { \ | ||
1399 | curr = curr->field.sqe_next; \ | ||
1400 | } \ | ||
1401 | - atomic_set(&curr->field.sqe_next, \ | ||
1402 | + qatomic_set(&curr->field.sqe_next, \ | ||
1403 | curr->field.sqe_next->field.sqe_next); \ | ||
1404 | if (curr->field.sqe_next == NULL) { \ | ||
1405 | (head)->sqh_last = &(curr)->field.sqe_next; \ | ||
1406 | @@ -XXX,XX +XXX,XX @@ extern "C" { | ||
1407 | } while (/*CONSTCOND*/0) | ||
1408 | |||
1409 | #define QSIMPLEQ_FOREACH_RCU(var, head, field) \ | ||
1410 | - for ((var) = atomic_rcu_read(&(head)->sqh_first); \ | ||
1411 | + for ((var) = qatomic_rcu_read(&(head)->sqh_first); \ | ||
1412 | (var); \ | ||
1413 | - (var) = atomic_rcu_read(&(var)->field.sqe_next)) | ||
1414 | + (var) = qatomic_rcu_read(&(var)->field.sqe_next)) | ||
1415 | |||
1416 | #define QSIMPLEQ_FOREACH_SAFE_RCU(var, head, field, next) \ | ||
1417 | - for ((var) = atomic_rcu_read(&(head)->sqh_first); \ | ||
1418 | - (var) && ((next) = atomic_rcu_read(&(var)->field.sqe_next), 1); \ | ||
1419 | + for ((var) = qatomic_rcu_read(&(head)->sqh_first); \ | ||
1420 | + (var) && ((next) = qatomic_rcu_read(&(var)->field.sqe_next), 1);\ | ||
1421 | (var) = (next)) | ||
1422 | |||
1423 | /* | ||
1424 | @@ -XXX,XX +XXX,XX @@ extern "C" { | ||
1425 | */ | ||
1426 | |||
1427 | /* Tail queue access methods */ | ||
1428 | -#define QTAILQ_EMPTY_RCU(head) (atomic_read(&(head)->tqh_first) == NULL) | ||
1429 | -#define QTAILQ_FIRST_RCU(head) atomic_rcu_read(&(head)->tqh_first) | ||
1430 | -#define QTAILQ_NEXT_RCU(elm, field) atomic_rcu_read(&(elm)->field.tqe_next) | ||
1431 | +#define QTAILQ_EMPTY_RCU(head) (qatomic_read(&(head)->tqh_first) == NULL) | ||
1432 | +#define QTAILQ_FIRST_RCU(head) qatomic_rcu_read(&(head)->tqh_first) | ||
1433 | +#define QTAILQ_NEXT_RCU(elm, field) qatomic_rcu_read(&(elm)->field.tqe_next) | ||
1434 | |||
1435 | /* Tail queue functions */ | ||
1436 | #define QTAILQ_INSERT_HEAD_RCU(head, elm, field) do { \ | ||
1437 | @@ -XXX,XX +XXX,XX @@ extern "C" { | ||
1438 | } else { \ | ||
1439 | (head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \ | ||
1440 | } \ | ||
1441 | - atomic_rcu_set(&(head)->tqh_first, (elm)); \ | ||
1442 | + qatomic_rcu_set(&(head)->tqh_first, (elm)); \ | ||
1443 | (elm)->field.tqe_circ.tql_prev = &(head)->tqh_circ; \ | ||
1444 | } while (/*CONSTCOND*/0) | ||
1445 | |||
1446 | #define QTAILQ_INSERT_TAIL_RCU(head, elm, field) do { \ | ||
1447 | (elm)->field.tqe_next = NULL; \ | ||
1448 | (elm)->field.tqe_circ.tql_prev = (head)->tqh_circ.tql_prev; \ | ||
1449 | - atomic_rcu_set(&(head)->tqh_circ.tql_prev->tql_next, (elm)); \ | ||
1450 | + qatomic_rcu_set(&(head)->tqh_circ.tql_prev->tql_next, (elm)); \ | ||
1451 | (head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \ | ||
1452 | } while (/*CONSTCOND*/0) | ||
1453 | |||
1454 | @@ -XXX,XX +XXX,XX @@ extern "C" { | ||
1455 | } else { \ | ||
1456 | (head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \ | ||
1457 | } \ | ||
1458 | - atomic_rcu_set(&(listelm)->field.tqe_next, (elm)); \ | ||
1459 | + qatomic_rcu_set(&(listelm)->field.tqe_next, (elm)); \ | ||
1460 | (elm)->field.tqe_circ.tql_prev = &(listelm)->field.tqe_circ; \ | ||
1461 | } while (/*CONSTCOND*/0) | ||
1462 | |||
1463 | #define QTAILQ_INSERT_BEFORE_RCU(listelm, elm, field) do { \ | ||
1464 | (elm)->field.tqe_circ.tql_prev = (listelm)->field.tqe_circ.tql_prev; \ | ||
1465 | (elm)->field.tqe_next = (listelm); \ | ||
1466 | - atomic_rcu_set(&(listelm)->field.tqe_circ.tql_prev->tql_next, (elm)); \ | ||
1467 | + qatomic_rcu_set(&(listelm)->field.tqe_circ.tql_prev->tql_next, (elm));\ | ||
1468 | (listelm)->field.tqe_circ.tql_prev = &(elm)->field.tqe_circ; \ | ||
1469 | } while (/*CONSTCOND*/0) | ||
1470 | |||
1471 | @@ -XXX,XX +XXX,XX @@ extern "C" { | ||
1472 | } else { \ | ||
1473 | (head)->tqh_circ.tql_prev = (elm)->field.tqe_circ.tql_prev; \ | ||
1474 | } \ | ||
1475 | - atomic_set(&(elm)->field.tqe_circ.tql_prev->tql_next, (elm)->field.tqe_next); \ | ||
1476 | + qatomic_set(&(elm)->field.tqe_circ.tql_prev->tql_next, \ | ||
1477 | + (elm)->field.tqe_next); \ | ||
1478 | (elm)->field.tqe_circ.tql_prev = NULL; \ | ||
1479 | } while (/*CONSTCOND*/0) | ||
1480 | |||
1481 | #define QTAILQ_FOREACH_RCU(var, head, field) \ | ||
1482 | - for ((var) = atomic_rcu_read(&(head)->tqh_first); \ | ||
1483 | + for ((var) = qatomic_rcu_read(&(head)->tqh_first); \ | ||
1484 | (var); \ | ||
1485 | - (var) = atomic_rcu_read(&(var)->field.tqe_next)) | ||
1486 | + (var) = qatomic_rcu_read(&(var)->field.tqe_next)) | ||
1487 | |||
1488 | #define QTAILQ_FOREACH_SAFE_RCU(var, head, field, next) \ | ||
1489 | - for ((var) = atomic_rcu_read(&(head)->tqh_first); \ | ||
1490 | - (var) && ((next) = atomic_rcu_read(&(var)->field.tqe_next), 1); \ | ||
1491 | + for ((var) = qatomic_rcu_read(&(head)->tqh_first); \ | ||
1492 | + (var) && ((next) = qatomic_rcu_read(&(var)->field.tqe_next), 1);\ | ||
1493 | (var) = (next)) | ||
1494 | |||
1495 | /* | ||
1496 | @@ -XXX,XX +XXX,XX @@ extern "C" { | ||
1497 | */ | ||
1498 | |||
1499 | /* Singly-linked list access methods */ | ||
1500 | -#define QSLIST_EMPTY_RCU(head) (atomic_read(&(head)->slh_first) == NULL) | ||
1501 | -#define QSLIST_FIRST_RCU(head) atomic_rcu_read(&(head)->slh_first) | ||
1502 | -#define QSLIST_NEXT_RCU(elm, field) atomic_rcu_read(&(elm)->field.sle_next) | ||
1503 | +#define QSLIST_EMPTY_RCU(head) (qatomic_read(&(head)->slh_first) == NULL) | ||
1504 | +#define QSLIST_FIRST_RCU(head) qatomic_rcu_read(&(head)->slh_first) | ||
1505 | +#define QSLIST_NEXT_RCU(elm, field) qatomic_rcu_read(&(elm)->field.sle_next) | ||
1506 | |||
1507 | /* Singly-linked list functions */ | ||
1508 | #define QSLIST_INSERT_HEAD_RCU(head, elm, field) do { \ | ||
1509 | (elm)->field.sle_next = (head)->slh_first; \ | ||
1510 | - atomic_rcu_set(&(head)->slh_first, (elm)); \ | ||
1511 | + qatomic_rcu_set(&(head)->slh_first, (elm)); \ | ||
1512 | } while (/*CONSTCOND*/0) | ||
1513 | |||
1514 | #define QSLIST_INSERT_AFTER_RCU(head, listelm, elm, field) do { \ | ||
1515 | (elm)->field.sle_next = (listelm)->field.sle_next; \ | ||
1516 | - atomic_rcu_set(&(listelm)->field.sle_next, (elm)); \ | ||
1517 | + qatomic_rcu_set(&(listelm)->field.sle_next, (elm)); \ | ||
1518 | } while (/*CONSTCOND*/0) | ||
1519 | |||
1520 | #define QSLIST_REMOVE_HEAD_RCU(head, field) do { \ | ||
1521 | - atomic_set(&(head)->slh_first, (head)->slh_first->field.sle_next); \ | ||
1522 | + qatomic_set(&(head)->slh_first, (head)->slh_first->field.sle_next);\ | ||
1523 | } while (/*CONSTCOND*/0) | ||
1524 | |||
1525 | #define QSLIST_REMOVE_RCU(head, elm, type, field) do { \ | ||
1526 | @@ -XXX,XX +XXX,XX @@ extern "C" { | ||
1527 | while (curr->field.sle_next != (elm)) { \ | ||
1528 | curr = curr->field.sle_next; \ | ||
1529 | } \ | ||
1530 | - atomic_set(&curr->field.sle_next, \ | ||
1531 | + qatomic_set(&curr->field.sle_next, \ | ||
1532 | curr->field.sle_next->field.sle_next); \ | ||
1533 | } \ | ||
1534 | } while (/*CONSTCOND*/0) | ||
1535 | |||
1536 | #define QSLIST_FOREACH_RCU(var, head, field) \ | ||
1537 | - for ((var) = atomic_rcu_read(&(head)->slh_first); \ | ||
1538 | - (var); \ | ||
1539 | - (var) = atomic_rcu_read(&(var)->field.sle_next)) | ||
1540 | + for ((var) = qatomic_rcu_read(&(head)->slh_first); \ | ||
1541 | + (var); \ | ||
1542 | + (var) = qatomic_rcu_read(&(var)->field.sle_next)) | ||
1543 | |||
1544 | -#define QSLIST_FOREACH_SAFE_RCU(var, head, field, next) \ | ||
1545 | - for ((var) = atomic_rcu_read(&(head)->slh_first); \ | ||
1546 | - (var) && ((next) = atomic_rcu_read(&(var)->field.sle_next), 1); \ | ||
1547 | +#define QSLIST_FOREACH_SAFE_RCU(var, head, field, next) \ | ||
1548 | + for ((var) = qatomic_rcu_read(&(head)->slh_first); \ | ||
1549 | + (var) && ((next) = qatomic_rcu_read(&(var)->field.sle_next), 1); \ | ||
1550 | (var) = (next)) | ||
1551 | |||
1552 | #ifdef __cplusplus | ||
1553 | diff --git a/include/qemu/seqlock.h b/include/qemu/seqlock.h | ||
1554 | index XXXXXXX..XXXXXXX 100644 | ||
1555 | --- a/include/qemu/seqlock.h | ||
1556 | +++ b/include/qemu/seqlock.h | ||
1557 | @@ -XXX,XX +XXX,XX @@ static inline void seqlock_init(QemuSeqLock *sl) | ||
1558 | /* Lock out other writers and update the count. */ | ||
1559 | static inline void seqlock_write_begin(QemuSeqLock *sl) | ||
1560 | { | ||
1561 | - atomic_set(&sl->sequence, sl->sequence + 1); | ||
1562 | + qatomic_set(&sl->sequence, sl->sequence + 1); | ||
1563 | |||
1564 | /* Write sequence before updating other fields. */ | ||
1565 | smp_wmb(); | ||
1566 | @@ -XXX,XX +XXX,XX @@ static inline void seqlock_write_end(QemuSeqLock *sl) | ||
1567 | /* Write other fields before finalizing sequence. */ | ||
1568 | smp_wmb(); | ||
1569 | |||
1570 | - atomic_set(&sl->sequence, sl->sequence + 1); | ||
1571 | + qatomic_set(&sl->sequence, sl->sequence + 1); | ||
1572 | } | ||
1573 | |||
1574 | /* Lock out other writers and update the count. */ | ||
1575 | @@ -XXX,XX +XXX,XX @@ static inline void seqlock_write_unlock_impl(QemuSeqLock *sl, QemuLockable *lock | ||
1576 | static inline unsigned seqlock_read_begin(const QemuSeqLock *sl) | ||
1577 | { | ||
1578 | /* Always fail if a write is in progress. */ | ||
1579 | - unsigned ret = atomic_read(&sl->sequence); | ||
1580 | + unsigned ret = qatomic_read(&sl->sequence); | ||
1581 | |||
1582 | /* Read sequence before reading other fields. */ | ||
1583 | smp_rmb(); | ||
1584 | @@ -XXX,XX +XXX,XX @@ static inline int seqlock_read_retry(const QemuSeqLock *sl, unsigned start) | ||
1585 | { | ||
1586 | /* Read other fields before reading final sequence. */ | ||
1587 | smp_rmb(); | ||
1588 | - return unlikely(atomic_read(&sl->sequence) != start); | ||
1589 | + return unlikely(qatomic_read(&sl->sequence) != start); | ||
1590 | } | ||
1591 | |||
1592 | #endif | ||
1593 | diff --git a/include/qemu/stats64.h b/include/qemu/stats64.h | ||
1594 | index XXXXXXX..XXXXXXX 100644 | ||
1595 | --- a/include/qemu/stats64.h | ||
1596 | +++ b/include/qemu/stats64.h | ||
1597 | @@ -XXX,XX +XXX,XX @@ static inline void stat64_init(Stat64 *s, uint64_t value) | ||
1598 | |||
1599 | static inline uint64_t stat64_get(const Stat64 *s) | ||
1600 | { | ||
1601 | - return atomic_read__nocheck(&s->value); | ||
1602 | + return qatomic_read__nocheck(&s->value); | ||
1603 | } | ||
1604 | |||
1605 | static inline void stat64_add(Stat64 *s, uint64_t value) | ||
1606 | { | ||
1607 | - atomic_add(&s->value, value); | ||
1608 | + qatomic_add(&s->value, value); | ||
1609 | } | ||
1610 | |||
1611 | static inline void stat64_min(Stat64 *s, uint64_t value) | ||
1612 | { | ||
1613 | - uint64_t orig = atomic_read__nocheck(&s->value); | ||
1614 | + uint64_t orig = qatomic_read__nocheck(&s->value); | ||
1615 | while (orig > value) { | ||
1616 | - orig = atomic_cmpxchg__nocheck(&s->value, orig, value); | ||
1617 | + orig = qatomic_cmpxchg__nocheck(&s->value, orig, value); | ||
1618 | } | ||
1619 | } | ||
1620 | |||
1621 | static inline void stat64_max(Stat64 *s, uint64_t value) | ||
1622 | { | ||
1623 | - uint64_t orig = atomic_read__nocheck(&s->value); | ||
1624 | + uint64_t orig = qatomic_read__nocheck(&s->value); | ||
1625 | while (orig < value) { | ||
1626 | - orig = atomic_cmpxchg__nocheck(&s->value, orig, value); | ||
1627 | + orig = qatomic_cmpxchg__nocheck(&s->value, orig, value); | ||
1628 | } | ||
1629 | } | ||
1630 | #else | ||
1631 | @@ -XXX,XX +XXX,XX @@ static inline void stat64_add(Stat64 *s, uint64_t value) | ||
1632 | low = (uint32_t) value; | ||
1633 | if (!low) { | ||
1634 | if (high) { | ||
1635 | - atomic_add(&s->high, high); | ||
1636 | + qatomic_add(&s->high, high); | ||
1637 | } | ||
1638 | return; | ||
1639 | } | ||
1640 | @@ -XXX,XX +XXX,XX @@ static inline void stat64_add(Stat64 *s, uint64_t value) | ||
1641 | * the high 32 bits, so it can race just fine with stat64_add32_carry | ||
1642 | * and even stat64_get! | ||
1643 | */ | ||
1644 | - old = atomic_cmpxchg(&s->low, orig, result); | ||
1645 | + old = qatomic_cmpxchg(&s->low, orig, result); | ||
1646 | if (orig == old) { | ||
1647 | return; | ||
1648 | } | ||
1649 | @@ -XXX,XX +XXX,XX @@ static inline void stat64_min(Stat64 *s, uint64_t value) | ||
1650 | high = value >> 32; | ||
1651 | low = (uint32_t) value; | ||
1652 | do { | ||
1653 | - orig_high = atomic_read(&s->high); | ||
1654 | + orig_high = qatomic_read(&s->high); | ||
1655 | if (orig_high < high) { | ||
1656 | return; | ||
1657 | } | ||
1658 | @@ -XXX,XX +XXX,XX @@ static inline void stat64_min(Stat64 *s, uint64_t value) | ||
1659 | * the write barrier in stat64_min_slow. | ||
1660 | */ | ||
1661 | smp_rmb(); | ||
1662 | - orig_low = atomic_read(&s->low); | ||
1663 | + orig_low = qatomic_read(&s->low); | ||
1664 | if (orig_low <= low) { | ||
1665 | return; | ||
1666 | } | ||
1667 | @@ -XXX,XX +XXX,XX @@ static inline void stat64_min(Stat64 *s, uint64_t value) | ||
1668 | * we may miss being lucky. | ||
1669 | */ | ||
1670 | smp_rmb(); | ||
1671 | - orig_high = atomic_read(&s->high); | ||
1672 | + orig_high = qatomic_read(&s->high); | ||
1673 | if (orig_high < high) { | ||
1674 | return; | ||
1675 | } | ||
1676 | @@ -XXX,XX +XXX,XX @@ static inline void stat64_max(Stat64 *s, uint64_t value) | ||
1677 | high = value >> 32; | ||
1678 | low = (uint32_t) value; | ||
1679 | do { | ||
1680 | - orig_high = atomic_read(&s->high); | ||
1681 | + orig_high = qatomic_read(&s->high); | ||
1682 | if (orig_high > high) { | ||
1683 | return; | ||
1684 | } | ||
1685 | @@ -XXX,XX +XXX,XX @@ static inline void stat64_max(Stat64 *s, uint64_t value) | ||
1686 | * the write barrier in stat64_max_slow. | ||
1687 | */ | ||
1688 | smp_rmb(); | ||
1689 | - orig_low = atomic_read(&s->low); | ||
1690 | + orig_low = qatomic_read(&s->low); | ||
1691 | if (orig_low >= low) { | ||
1692 | return; | ||
1693 | } | ||
1694 | @@ -XXX,XX +XXX,XX @@ static inline void stat64_max(Stat64 *s, uint64_t value) | ||
1695 | * we may miss being lucky. | ||
1696 | */ | ||
1697 | smp_rmb(); | ||
1698 | - orig_high = atomic_read(&s->high); | ||
1699 | + orig_high = qatomic_read(&s->high); | ||
1700 | if (orig_high > high) { | ||
1701 | return; | ||
1702 | } | ||
1703 | diff --git a/include/qemu/thread.h b/include/qemu/thread.h | ||
1704 | index XXXXXXX..XXXXXXX 100644 | ||
1705 | --- a/include/qemu/thread.h | ||
1706 | +++ b/include/qemu/thread.h | ||
1707 | @@ -XXX,XX +XXX,XX @@ extern QemuCondTimedWaitFunc qemu_cond_timedwait_func; | ||
1708 | qemu_cond_timedwait_impl(c, m, ms, __FILE__, __LINE__) | ||
1709 | #else | ||
1710 | #define qemu_mutex_lock(m) ({ \ | ||
1711 | - QemuMutexLockFunc _f = atomic_read(&qemu_mutex_lock_func); \ | ||
1712 | + QemuMutexLockFunc _f = qatomic_read(&qemu_mutex_lock_func); \ | ||
1713 | _f(m, __FILE__, __LINE__); \ | ||
1714 | }) | ||
1715 | |||
1716 | -#define qemu_mutex_trylock(m) ({ \ | ||
1717 | - QemuMutexTrylockFunc _f = atomic_read(&qemu_mutex_trylock_func); \ | ||
1718 | - _f(m, __FILE__, __LINE__); \ | ||
1719 | +#define qemu_mutex_trylock(m) ({ \ | ||
1720 | + QemuMutexTrylockFunc _f = qatomic_read(&qemu_mutex_trylock_func); \ | ||
1721 | + _f(m, __FILE__, __LINE__); \ | ||
1722 | }) | ||
1723 | |||
1724 | -#define qemu_rec_mutex_lock(m) ({ \ | ||
1725 | - QemuRecMutexLockFunc _f = atomic_read(&qemu_rec_mutex_lock_func); \ | ||
1726 | - _f(m, __FILE__, __LINE__); \ | ||
1727 | +#define qemu_rec_mutex_lock(m) ({ \ | ||
1728 | + QemuRecMutexLockFunc _f = qatomic_read(&qemu_rec_mutex_lock_func);\ | ||
1729 | + _f(m, __FILE__, __LINE__); \ | ||
1730 | }) | ||
1731 | |||
1732 | #define qemu_rec_mutex_trylock(m) ({ \ | ||
1733 | QemuRecMutexTrylockFunc _f; \ | ||
1734 | - _f = atomic_read(&qemu_rec_mutex_trylock_func); \ | ||
1735 | + _f = qatomic_read(&qemu_rec_mutex_trylock_func); \ | ||
1736 | _f(m, __FILE__, __LINE__); \ | ||
1737 | }) | ||
1738 | |||
1739 | #define qemu_cond_wait(c, m) ({ \ | ||
1740 | - QemuCondWaitFunc _f = atomic_read(&qemu_cond_wait_func); \ | ||
1741 | + QemuCondWaitFunc _f = qatomic_read(&qemu_cond_wait_func); \ | ||
1742 | _f(c, m, __FILE__, __LINE__); \ | ||
1743 | }) | ||
1744 | |||
1745 | #define qemu_cond_timedwait(c, m, ms) ({ \ | ||
1746 | - QemuCondTimedWaitFunc _f = atomic_read(&qemu_cond_timedwait_func); \ | ||
1747 | + QemuCondTimedWaitFunc _f = qatomic_read(&qemu_cond_timedwait_func);\ | ||
1748 | _f(c, m, ms, __FILE__, __LINE__); \ | ||
1749 | }) | ||
1750 | #endif | ||
1751 | @@ -XXX,XX +XXX,XX @@ static inline void qemu_spin_lock(QemuSpin *spin) | ||
1752 | __tsan_mutex_pre_lock(spin, 0); | ||
1753 | #endif | ||
1754 | while (unlikely(__sync_lock_test_and_set(&spin->value, true))) { | ||
1755 | - while (atomic_read(&spin->value)) { | ||
1756 | + while (qatomic_read(&spin->value)) { | ||
1757 | cpu_relax(); | ||
1758 | } | ||
1759 | } | ||
1760 | @@ -XXX,XX +XXX,XX @@ static inline bool qemu_spin_trylock(QemuSpin *spin) | ||
1761 | |||
1762 | static inline bool qemu_spin_locked(QemuSpin *spin) | ||
1763 | { | ||
1764 | - return atomic_read(&spin->value); | ||
1765 | + return qatomic_read(&spin->value); | ||
1766 | } | ||
1767 | |||
1768 | static inline void qemu_spin_unlock(QemuSpin *spin) | ||
1769 | diff --git a/include/standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h b/include/standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h | ||
1770 | index XXXXXXX..XXXXXXX 100644 | ||
1771 | --- a/include/standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h | ||
1772 | +++ b/include/standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h | ||
1773 | @@ -XXX,XX +XXX,XX @@ static inline int pvrdma_idx_valid(uint32_t idx, uint32_t max_elems) | ||
1774 | |||
1775 | static inline int32_t pvrdma_idx(int *var, uint32_t max_elems) | ||
1776 | { | ||
1777 | - const unsigned int idx = atomic_read(var); | ||
1778 | + const unsigned int idx = qatomic_read(var); | ||
1779 | |||
1780 | if (pvrdma_idx_valid(idx, max_elems)) | ||
1781 | return idx & (max_elems - 1); | ||
1782 | @@ -XXX,XX +XXX,XX @@ static inline int32_t pvrdma_idx(int *var, uint32_t max_elems) | ||
1783 | |||
1784 | static inline void pvrdma_idx_ring_inc(int *var, uint32_t max_elems) | ||
1785 | { | ||
1786 | - uint32_t idx = atomic_read(var) + 1; /* Increment. */ | ||
1787 | + uint32_t idx = qatomic_read(var) + 1; /* Increment. */ | ||
1788 | |||
1789 | idx &= (max_elems << 1) - 1; /* Modulo size, flip gen. */ | ||
1790 | - atomic_set(var, idx); | ||
1791 | + qatomic_set(var, idx); | ||
1792 | } | ||
1793 | |||
1794 | static inline int32_t pvrdma_idx_ring_has_space(const struct pvrdma_ring *r, | ||
1795 | uint32_t max_elems, uint32_t *out_tail) | ||
1796 | { | ||
1797 | - const uint32_t tail = atomic_read(&r->prod_tail); | ||
1798 | - const uint32_t head = atomic_read(&r->cons_head); | ||
1799 | + const uint32_t tail = qatomic_read(&r->prod_tail); | ||
1800 | + const uint32_t head = qatomic_read(&r->cons_head); | ||
1801 | |||
1802 | if (pvrdma_idx_valid(tail, max_elems) && | ||
1803 | pvrdma_idx_valid(head, max_elems)) { | ||
1804 | @@ -XXX,XX +XXX,XX @@ static inline int32_t pvrdma_idx_ring_has_space(const struct pvrdma_ring *r, | ||
1805 | static inline int32_t pvrdma_idx_ring_has_data(const struct pvrdma_ring *r, | ||
1806 | uint32_t max_elems, uint32_t *out_head) | ||
1807 | { | ||
1808 | - const uint32_t tail = atomic_read(&r->prod_tail); | ||
1809 | - const uint32_t head = atomic_read(&r->cons_head); | ||
1810 | + const uint32_t tail = qatomic_read(&r->prod_tail); | ||
1811 | + const uint32_t head = qatomic_read(&r->cons_head); | ||
1812 | |||
1813 | if (pvrdma_idx_valid(tail, max_elems) && | ||
1814 | pvrdma_idx_valid(head, max_elems)) { | ||
1815 | diff --git a/linux-user/qemu.h b/linux-user/qemu.h | ||
1816 | index XXXXXXX..XXXXXXX 100644 | ||
1817 | --- a/linux-user/qemu.h | ||
1818 | +++ b/linux-user/qemu.h | ||
1819 | @@ -XXX,XX +XXX,XX @@ typedef struct TaskState { | ||
1820 | /* Nonzero if process_pending_signals() needs to do something (either | ||
1821 | * handle a pending signal or unblock signals). | ||
1822 | * This flag is written from a signal handler so should be accessed via | ||
1823 | - * the atomic_read() and atomic_set() functions. (It is not accessed | ||
1824 | + * the qatomic_read() and qatomic_set() functions. (It is not accessed | ||
1825 | * from multiple threads.) | ||
1826 | */ | ||
1827 | int signal_pending; | ||
1828 | diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h | ||
1829 | index XXXXXXX..XXXXXXX 100644 | ||
1830 | --- a/tcg/i386/tcg-target.h | ||
1831 | +++ b/tcg/i386/tcg-target.h | ||
1832 | @@ -XXX,XX +XXX,XX @@ static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, | ||
1833 | uintptr_t jmp_addr, uintptr_t addr) | ||
1834 | { | ||
1835 | /* patch the branch destination */ | ||
1836 | - atomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4)); | ||
1837 | + qatomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4)); | ||
1838 | /* no need to flush icache explicitly */ | ||
1839 | } | ||
1840 | |||
1841 | diff --git a/tcg/s390/tcg-target.h b/tcg/s390/tcg-target.h | ||
1842 | index XXXXXXX..XXXXXXX 100644 | ||
1843 | --- a/tcg/s390/tcg-target.h | ||
1844 | +++ b/tcg/s390/tcg-target.h | ||
1845 | @@ -XXX,XX +XXX,XX @@ static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, | ||
1846 | { | ||
1847 | /* patch the branch destination */ | ||
1848 | intptr_t disp = addr - (jmp_addr - 2); | ||
1849 | - atomic_set((int32_t *)jmp_addr, disp / 2); | ||
1850 | + qatomic_set((int32_t *)jmp_addr, disp / 2); | ||
1851 | /* no need to flush icache explicitly */ | ||
1852 | } | ||
1853 | |||
1854 | diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h | ||
1855 | index XXXXXXX..XXXXXXX 100644 | ||
1856 | --- a/tcg/tci/tcg-target.h | ||
1857 | +++ b/tcg/tci/tcg-target.h | ||
1858 | @@ -XXX,XX +XXX,XX @@ static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, | ||
1859 | uintptr_t jmp_addr, uintptr_t addr) | ||
1860 | { | ||
1861 | /* patch the branch destination */ | ||
1862 | - atomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4)); | ||
1863 | + qatomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4)); | ||
1864 | /* no need to flush icache explicitly */ | ||
1865 | } | ||
1866 | |||
1867 | diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c | ||
1868 | index XXXXXXX..XXXXXXX 100644 | ||
1869 | --- a/accel/kvm/kvm-all.c | ||
1870 | +++ b/accel/kvm/kvm-all.c | ||
1871 | @@ -XXX,XX +XXX,XX @@ static __thread bool have_sigbus_pending; | ||
1872 | |||
1873 | static void kvm_cpu_kick(CPUState *cpu) | ||
1874 | { | ||
1875 | - atomic_set(&cpu->kvm_run->immediate_exit, 1); | ||
1876 | + qatomic_set(&cpu->kvm_run->immediate_exit, 1); | ||
1877 | } | ||
1878 | |||
1879 | static void kvm_cpu_kick_self(void) | ||
1880 | @@ -XXX,XX +XXX,XX @@ static void kvm_eat_signals(CPUState *cpu) | ||
1881 | int r; | ||
1882 | |||
1883 | if (kvm_immediate_exit) { | ||
1884 | - atomic_set(&cpu->kvm_run->immediate_exit, 0); | ||
1885 | + qatomic_set(&cpu->kvm_run->immediate_exit, 0); | ||
1886 | /* Write kvm_run->immediate_exit before the cpu->exit_request | ||
1887 | * write in kvm_cpu_exec. | ||
1888 | */ | ||
1889 | @@ -XXX,XX +XXX,XX @@ int kvm_cpu_exec(CPUState *cpu) | ||
1890 | DPRINTF("kvm_cpu_exec()\n"); | ||
1891 | |||
1892 | if (kvm_arch_process_async_events(cpu)) { | ||
1893 | - atomic_set(&cpu->exit_request, 0); | ||
1894 | + qatomic_set(&cpu->exit_request, 0); | ||
1895 | return EXCP_HLT; | ||
1896 | } | ||
1897 | |||
1898 | @@ -XXX,XX +XXX,XX @@ int kvm_cpu_exec(CPUState *cpu) | ||
1899 | } | ||
1900 | |||
1901 | kvm_arch_pre_run(cpu, run); | ||
1902 | - if (atomic_read(&cpu->exit_request)) { | ||
1903 | + if (qatomic_read(&cpu->exit_request)) { | ||
1904 | DPRINTF("interrupt exit requested\n"); | ||
1905 | /* | ||
1906 | * KVM requires us to reenter the kernel after IO exits to complete | ||
1907 | @@ -XXX,XX +XXX,XX @@ int kvm_cpu_exec(CPUState *cpu) | ||
1908 | vm_stop(RUN_STATE_INTERNAL_ERROR); | ||
1909 | } | ||
1910 | |||
1911 | - atomic_set(&cpu->exit_request, 0); | ||
1912 | + qatomic_set(&cpu->exit_request, 0); | ||
1913 | return ret; | ||
1914 | } | ||
1915 | |||
1916 | @@ -XXX,XX +XXX,XX @@ int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr) | ||
1917 | have_sigbus_pending = true; | ||
1918 | pending_sigbus_addr = addr; | ||
1919 | pending_sigbus_code = code; | ||
1920 | - atomic_set(&cpu->exit_request, 1); | ||
1921 | + qatomic_set(&cpu->exit_request, 1); | ||
1922 | return 0; | ||
1923 | #else | ||
1924 | return 1; | ||
1925 | diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c | ||
1926 | index XXXXXXX..XXXXXXX 100644 | ||
1927 | --- a/accel/tcg/cpu-exec.c | ||
1928 | +++ b/accel/tcg/cpu-exec.c | ||
1929 | @@ -XXX,XX +XXX,XX @@ static inline void tb_add_jump(TranslationBlock *tb, int n, | ||
1930 | goto out_unlock_next; | ||
1931 | } | ||
1932 | /* Atomically claim the jump destination slot only if it was NULL */ | ||
1933 | - old = atomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL, (uintptr_t)tb_next); | ||
1934 | + old = qatomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL, | ||
1935 | + (uintptr_t)tb_next); | ||
1936 | if (old) { | ||
1937 | goto out_unlock_next; | ||
1938 | } | ||
1939 | @@ -XXX,XX +XXX,XX @@ static inline TranslationBlock *tb_find(CPUState *cpu, | ||
1940 | tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask); | ||
1941 | mmap_unlock(); | ||
1942 | /* We add the TB in the virtual pc hash table for the fast lookup */ | ||
1943 | - atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb); | ||
1944 | + qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb); | ||
1945 | } | ||
1946 | #ifndef CONFIG_USER_ONLY | ||
1947 | /* We don't take care of direct jumps when address mapping changes in | ||
1948 | @@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_interrupt(CPUState *cpu, | ||
1949 | * Ensure zeroing happens before reading cpu->exit_request or | ||
1950 | * cpu->interrupt_request (see also smp_wmb in cpu_exit()) | ||
1951 | */ | ||
1952 | - atomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0); | ||
1953 | + qatomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0); | ||
1954 | |||
1955 | - if (unlikely(atomic_read(&cpu->interrupt_request))) { | ||
1956 | + if (unlikely(qatomic_read(&cpu->interrupt_request))) { | ||
1957 | int interrupt_request; | ||
1958 | qemu_mutex_lock_iothread(); | ||
1959 | interrupt_request = cpu->interrupt_request; | ||
1960 | @@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_interrupt(CPUState *cpu, | ||
1961 | } | ||
1962 | |||
1963 | /* Finally, check if we need to exit to the main loop. */ | ||
1964 | - if (unlikely(atomic_read(&cpu->exit_request)) | ||
1965 | + if (unlikely(qatomic_read(&cpu->exit_request)) | ||
1966 | || (use_icount | ||
1967 | && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) { | ||
1968 | - atomic_set(&cpu->exit_request, 0); | ||
1969 | + qatomic_set(&cpu->exit_request, 0); | ||
1970 | if (cpu->exception_index == -1) { | ||
1971 | cpu->exception_index = EXCP_INTERRUPT; | ||
1972 | } | ||
1973 | @@ -XXX,XX +XXX,XX @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb, | ||
1974 | } | ||
1975 | |||
1976 | *last_tb = NULL; | ||
1977 | - insns_left = atomic_read(&cpu_neg(cpu)->icount_decr.u32); | ||
1978 | + insns_left = qatomic_read(&cpu_neg(cpu)->icount_decr.u32); | ||
1979 | if (insns_left < 0) { | ||
1980 | /* Something asked us to stop executing chained TBs; just | ||
1981 | * continue round the main loop. Whatever requested the exit | ||
1982 | diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c | ||
1983 | index XXXXXXX..XXXXXXX 100644 | ||
1984 | --- a/accel/tcg/cputlb.c | ||
1985 | +++ b/accel/tcg/cputlb.c | ||
1986 | @@ -XXX,XX +XXX,XX @@ void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide) | ||
1987 | CPU_FOREACH(cpu) { | ||
1988 | CPUArchState *env = cpu->env_ptr; | ||
1989 | |||
1990 | - full += atomic_read(&env_tlb(env)->c.full_flush_count); | ||
1991 | - part += atomic_read(&env_tlb(env)->c.part_flush_count); | ||
1992 | - elide += atomic_read(&env_tlb(env)->c.elide_flush_count); | ||
1993 | + full += qatomic_read(&env_tlb(env)->c.full_flush_count); | ||
1994 | + part += qatomic_read(&env_tlb(env)->c.part_flush_count); | ||
1995 | + elide += qatomic_read(&env_tlb(env)->c.elide_flush_count); | ||
1996 | } | ||
1997 | *pfull = full; | ||
1998 | *ppart = part; | ||
1999 | @@ -XXX,XX +XXX,XX @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) | ||
2000 | cpu_tb_jmp_cache_clear(cpu); | ||
2001 | |||
2002 | if (to_clean == ALL_MMUIDX_BITS) { | ||
2003 | - atomic_set(&env_tlb(env)->c.full_flush_count, | ||
2004 | + qatomic_set(&env_tlb(env)->c.full_flush_count, | ||
2005 | env_tlb(env)->c.full_flush_count + 1); | ||
2006 | } else { | ||
2007 | - atomic_set(&env_tlb(env)->c.part_flush_count, | ||
2008 | + qatomic_set(&env_tlb(env)->c.part_flush_count, | ||
2009 | env_tlb(env)->c.part_flush_count + ctpop16(to_clean)); | ||
2010 | if (to_clean != asked) { | ||
2011 | - atomic_set(&env_tlb(env)->c.elide_flush_count, | ||
2012 | + qatomic_set(&env_tlb(env)->c.elide_flush_count, | ||
2013 | env_tlb(env)->c.elide_flush_count + | ||
2014 | ctpop16(asked & ~to_clean)); | ||
2015 | } | ||
2016 | @@ -XXX,XX +XXX,XX @@ void tlb_unprotect_code(ram_addr_t ram_addr) | ||
2017 | * generated code. | ||
2018 | * | ||
2019 | * Other vCPUs might be reading their TLBs during guest execution, so we update | ||
2020 | - * te->addr_write with atomic_set. We don't need to worry about this for | ||
2021 | + * te->addr_write with qatomic_set. We don't need to worry about this for | ||
2022 | * oversized guests as MTTCG is disabled for them. | ||
2023 | * | ||
2024 | * Called with tlb_c.lock held. | ||
2025 | @@ -XXX,XX +XXX,XX @@ static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry, | ||
2026 | #if TCG_OVERSIZED_GUEST | ||
2027 | tlb_entry->addr_write |= TLB_NOTDIRTY; | ||
2028 | #else | ||
2029 | - atomic_set(&tlb_entry->addr_write, | ||
2030 | + qatomic_set(&tlb_entry->addr_write, | ||
2031 | tlb_entry->addr_write | TLB_NOTDIRTY); | ||
2032 | #endif | ||
2033 | } | ||
2034 | @@ -XXX,XX +XXX,XX @@ static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs) | ||
2035 | #if TCG_OVERSIZED_GUEST | ||
2036 | return *(target_ulong *)((uintptr_t)entry + ofs); | ||
2037 | #else | ||
2038 | - /* ofs might correspond to .addr_write, so use atomic_read */ | ||
2039 | - return atomic_read((target_ulong *)((uintptr_t)entry + ofs)); | ||
2040 | + /* ofs might correspond to .addr_write, so use qatomic_read */ | ||
2041 | + return qatomic_read((target_ulong *)((uintptr_t)entry + ofs)); | ||
2042 | #endif | ||
2043 | } | ||
2044 | |||
2045 | @@ -XXX,XX +XXX,XX @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, | ||
2046 | CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx]; | ||
2047 | target_ulong cmp; | ||
2048 | |||
2049 | - /* elt_ofs might correspond to .addr_write, so use atomic_read */ | ||
2050 | + /* elt_ofs might correspond to .addr_write, so use qatomic_read */ | ||
2051 | #if TCG_OVERSIZED_GUEST | ||
2052 | cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs); | ||
2053 | #else | ||
2054 | - cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs)); | ||
2055 | + cmp = qatomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs)); | ||
2056 | #endif | ||
2057 | |||
2058 | if (cmp == page) { | ||
2059 | diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c | ||
2060 | index XXXXXXX..XXXXXXX 100644 | ||
2061 | --- a/accel/tcg/tcg-all.c | ||
2062 | +++ b/accel/tcg/tcg-all.c | ||
2063 | @@ -XXX,XX +XXX,XX @@ static void tcg_handle_interrupt(CPUState *cpu, int mask) | ||
2064 | if (!qemu_cpu_is_self(cpu)) { | ||
2065 | qemu_cpu_kick(cpu); | ||
2066 | } else { | ||
2067 | - atomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1); | ||
2068 | + qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1); | ||
2069 | if (use_icount && | ||
2070 | !cpu->can_do_io | ||
2071 | && (mask & ~old_mask) != 0) { | ||
2072 | diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c | ||
2073 | index XXXXXXX..XXXXXXX 100644 | ||
2074 | --- a/accel/tcg/translate-all.c | ||
2075 | +++ b/accel/tcg/translate-all.c | ||
2076 | @@ -XXX,XX +XXX,XX @@ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, | ||
2077 | restore_state_to_opc(env, tb, data); | ||
2078 | |||
2079 | #ifdef CONFIG_PROFILER | ||
2080 | - atomic_set(&prof->restore_time, | ||
2081 | + qatomic_set(&prof->restore_time, | ||
2082 | prof->restore_time + profile_getclock() - ti); | ||
2083 | - atomic_set(&prof->restore_count, prof->restore_count + 1); | ||
2084 | + qatomic_set(&prof->restore_count, prof->restore_count + 1); | ||
2085 | #endif | ||
2086 | return 0; | ||
2087 | } | ||
2088 | @@ -XXX,XX +XXX,XX @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc) | ||
2089 | |||
2090 | /* Level 2..N-1. */ | ||
2091 | for (i = v_l2_levels; i > 0; i--) { | ||
2092 | - void **p = atomic_rcu_read(lp); | ||
2093 | + void **p = qatomic_rcu_read(lp); | ||
2094 | |||
2095 | if (p == NULL) { | ||
2096 | void *existing; | ||
2097 | @@ -XXX,XX +XXX,XX @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc) | ||
2098 | return NULL; | ||
2099 | } | ||
2100 | p = g_new0(void *, V_L2_SIZE); | ||
2101 | - existing = atomic_cmpxchg(lp, NULL, p); | ||
2102 | + existing = qatomic_cmpxchg(lp, NULL, p); | ||
2103 | if (unlikely(existing)) { | ||
2104 | g_free(p); | ||
2105 | p = existing; | ||
2106 | @@ -XXX,XX +XXX,XX @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc) | ||
2107 | lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1)); | ||
2108 | } | ||
2109 | |||
2110 | - pd = atomic_rcu_read(lp); | ||
2111 | + pd = qatomic_rcu_read(lp); | ||
2112 | if (pd == NULL) { | ||
2113 | void *existing; | ||
2114 | |||
2115 | @@ -XXX,XX +XXX,XX @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc) | ||
2116 | } | ||
2117 | } | ||
2118 | #endif | ||
2119 | - existing = atomic_cmpxchg(lp, NULL, pd); | ||
2120 | + existing = qatomic_cmpxchg(lp, NULL, pd); | ||
2121 | if (unlikely(existing)) { | ||
2122 | #ifndef CONFIG_USER_ONLY | ||
2123 | { | ||
2124 | @@ -XXX,XX +XXX,XX @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count) | ||
2125 | tcg_region_reset_all(); | ||
2126 | /* XXX: flush processor icache at this point if cache flush is | ||
2127 | expensive */ | ||
2128 | - atomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1); | ||
2129 | + qatomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1); | ||
2130 | |||
2131 | done: | ||
2132 | mmap_unlock(); | ||
2133 | @@ -XXX,XX +XXX,XX @@ done: | ||
2134 | void tb_flush(CPUState *cpu) | ||
2135 | { | ||
2136 | if (tcg_enabled()) { | ||
2137 | - unsigned tb_flush_count = atomic_mb_read(&tb_ctx.tb_flush_count); | ||
2138 | + unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count); | ||
2139 | |||
2140 | if (cpu_in_exclusive_context(cpu)) { | ||
2141 | do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count)); | ||
2142 | @@ -XXX,XX +XXX,XX @@ static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig) | ||
2143 | int n; | ||
2144 | |||
2145 | /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */ | ||
2146 | - ptr = atomic_or_fetch(&orig->jmp_dest[n_orig], 1); | ||
2147 | + ptr = qatomic_or_fetch(&orig->jmp_dest[n_orig], 1); | ||
2148 | dest = (TranslationBlock *)(ptr & ~1); | ||
2149 | if (dest == NULL) { | ||
2150 | return; | ||
2151 | @@ -XXX,XX +XXX,XX @@ static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig) | ||
2152 | * While acquiring the lock, the jump might have been removed if the | ||
2153 | * destination TB was invalidated; check again. | ||
2154 | */ | ||
2155 | - ptr_locked = atomic_read(&orig->jmp_dest[n_orig]); | ||
2156 | + ptr_locked = qatomic_read(&orig->jmp_dest[n_orig]); | ||
2157 | if (ptr_locked != ptr) { | ||
2158 | qemu_spin_unlock(&dest->jmp_lock); | ||
2159 | /* | ||
2160 | @@ -XXX,XX +XXX,XX @@ static inline void tb_jmp_unlink(TranslationBlock *dest) | ||
2161 | |||
2162 | TB_FOR_EACH_JMP(dest, tb, n) { | ||
2163 | tb_reset_jump(tb, n); | ||
2164 | - atomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1); | ||
2165 | + qatomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1); | ||
2166 | /* No need to clear the list entry; setting the dest ptr is enough */ | ||
2167 | } | ||
2168 | dest->jmp_list_head = (uintptr_t)NULL; | ||
2169 | @@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list) | ||
2170 | |||
2171 | /* make sure no further incoming jumps will be chained to this TB */ | ||
2172 | qemu_spin_lock(&tb->jmp_lock); | ||
2173 | - atomic_set(&tb->cflags, tb->cflags | CF_INVALID); | ||
2174 | + qatomic_set(&tb->cflags, tb->cflags | CF_INVALID); | ||
2175 | qemu_spin_unlock(&tb->jmp_lock); | ||
2176 | |||
2177 | /* remove the TB from the hash list */ | ||
2178 | @@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list) | ||
2179 | /* remove the TB from the hash list */ | ||
2180 | h = tb_jmp_cache_hash_func(tb->pc); | ||
2181 | CPU_FOREACH(cpu) { | ||
2182 | - if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) { | ||
2183 | - atomic_set(&cpu->tb_jmp_cache[h], NULL); | ||
2184 | + if (qatomic_read(&cpu->tb_jmp_cache[h]) == tb) { | ||
2185 | + qatomic_set(&cpu->tb_jmp_cache[h], NULL); | ||
2186 | } | ||
2187 | } | ||
2188 | |||
2189 | @@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list) | ||
2190 | /* suppress any remaining jumps to this TB */ | ||
2191 | tb_jmp_unlink(tb); | ||
2192 | |||
2193 | - atomic_set(&tcg_ctx->tb_phys_invalidate_count, | ||
2194 | + qatomic_set(&tcg_ctx->tb_phys_invalidate_count, | ||
2195 | tcg_ctx->tb_phys_invalidate_count + 1); | ||
2196 | } | ||
2197 | |||
2198 | @@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu, | ||
2199 | |||
2200 | #ifdef CONFIG_PROFILER | ||
2201 | /* includes aborted translations because of exceptions */ | ||
2202 | - atomic_set(&prof->tb_count1, prof->tb_count1 + 1); | ||
2203 | + qatomic_set(&prof->tb_count1, prof->tb_count1 + 1); | ||
2204 | ti = profile_getclock(); | ||
2205 | #endif | ||
2206 | |||
2207 | @@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu, | ||
2208 | } | ||
2209 | |||
2210 | #ifdef CONFIG_PROFILER | ||
2211 | - atomic_set(&prof->tb_count, prof->tb_count + 1); | ||
2212 | - atomic_set(&prof->interm_time, prof->interm_time + profile_getclock() - ti); | ||
2213 | + qatomic_set(&prof->tb_count, prof->tb_count + 1); | ||
2214 | + qatomic_set(&prof->interm_time, | ||
2215 | + prof->interm_time + profile_getclock() - ti); | ||
2216 | ti = profile_getclock(); | ||
2217 | #endif | ||
2218 | |||
2219 | @@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu, | ||
2220 | tb->tc.size = gen_code_size; | ||
2221 | |||
2222 | #ifdef CONFIG_PROFILER | ||
2223 | - atomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti); | ||
2224 | - atomic_set(&prof->code_in_len, prof->code_in_len + tb->size); | ||
2225 | - atomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size); | ||
2226 | - atomic_set(&prof->search_out_len, prof->search_out_len + search_size); | ||
2227 | + qatomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti); | ||
2228 | + qatomic_set(&prof->code_in_len, prof->code_in_len + tb->size); | ||
2229 | + qatomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size); | ||
2230 | + qatomic_set(&prof->search_out_len, prof->search_out_len + search_size); | ||
2231 | #endif | ||
2232 | |||
2233 | #ifdef DEBUG_DISAS | ||
2234 | @@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu, | ||
2235 | } | ||
2236 | #endif | ||
2237 | |||
2238 | - atomic_set(&tcg_ctx->code_gen_ptr, (void *) | ||
2239 | + qatomic_set(&tcg_ctx->code_gen_ptr, (void *) | ||
2240 | ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size, | ||
2241 | CODE_GEN_ALIGN)); | ||
2242 | |||
2243 | @@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu, | ||
2244 | uintptr_t orig_aligned = (uintptr_t)gen_code_buf; | ||
2245 | |||
2246 | orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize); | ||
2247 | - atomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned); | ||
2248 | + qatomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned); | ||
2249 | tb_destroy(tb); | ||
2250 | return existing_tb; | ||
2251 | } | ||
2252 | @@ -XXX,XX +XXX,XX @@ static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr) | ||
2253 | unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr); | ||
2254 | |||
2255 | for (i = 0; i < TB_JMP_PAGE_SIZE; i++) { | ||
2256 | - atomic_set(&cpu->tb_jmp_cache[i0 + i], NULL); | ||
2257 | + qatomic_set(&cpu->tb_jmp_cache[i0 + i], NULL); | ||
2258 | } | ||
2259 | } | ||
2260 | |||
2261 | @@ -XXX,XX +XXX,XX @@ void dump_exec_info(void) | ||
2262 | |||
2263 | qemu_printf("\nStatistics:\n"); | ||
2264 | qemu_printf("TB flush count %u\n", | ||
2265 | - atomic_read(&tb_ctx.tb_flush_count)); | ||
2266 | + qatomic_read(&tb_ctx.tb_flush_count)); | ||
2267 | qemu_printf("TB invalidate count %zu\n", | ||
2268 | tcg_tb_phys_invalidate_count()); | ||
2269 | |||
2270 | @@ -XXX,XX +XXX,XX @@ void cpu_interrupt(CPUState *cpu, int mask) | ||
2271 | { | ||
2272 | g_assert(qemu_mutex_iothread_locked()); | ||
2273 | cpu->interrupt_request |= mask; | ||
2274 | - atomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1); | ||
2275 | + qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1); | ||
2276 | } | ||
2277 | |||
2278 | /* | ||
2279 | diff --git a/audio/jackaudio.c b/audio/jackaudio.c | ||
2280 | index XXXXXXX..XXXXXXX 100644 | ||
2281 | --- a/audio/jackaudio.c | ||
2282 | +++ b/audio/jackaudio.c | ||
2283 | @@ -XXX,XX +XXX,XX @@ static void qjack_buffer_create(QJackBuffer *buffer, int channels, int frames) | ||
2284 | static void qjack_buffer_clear(QJackBuffer *buffer) | ||
2285 | { | ||
2286 | assert(buffer->data); | ||
2287 | - atomic_store_release(&buffer->used, 0); | ||
2288 | + qatomic_store_release(&buffer->used, 0); | ||
2289 | buffer->rptr = 0; | ||
2290 | buffer->wptr = 0; | ||
2291 | } | ||
2292 | @@ -XXX,XX +XXX,XX @@ static int qjack_buffer_write(QJackBuffer *buffer, float *data, int size) | ||
2293 | assert(buffer->data); | ||
2294 | const int samples = size / sizeof(float); | ||
2295 | int frames = samples / buffer->channels; | ||
2296 | - const int avail = buffer->frames - atomic_load_acquire(&buffer->used); | ||
2297 | + const int avail = buffer->frames - qatomic_load_acquire(&buffer->used); | ||
2298 | |||
2299 | if (frames > avail) { | ||
2300 | frames = avail; | ||
2301 | @@ -XXX,XX +XXX,XX @@ static int qjack_buffer_write(QJackBuffer *buffer, float *data, int size) | ||
2302 | |||
2303 | buffer->wptr = wptr; | ||
2304 | |||
2305 | - atomic_add(&buffer->used, frames); | ||
2306 | + qatomic_add(&buffer->used, frames); | ||
2307 | return frames * buffer->channels * sizeof(float); | ||
2308 | }; | ||
2309 | |||
2310 | @@ -XXX,XX +XXX,XX @@ static int qjack_buffer_write(QJackBuffer *buffer, float *data, int size) | ||
2311 | static int qjack_buffer_write_l(QJackBuffer *buffer, float **dest, int frames) | ||
2312 | { | ||
2313 | assert(buffer->data); | ||
2314 | - const int avail = buffer->frames - atomic_load_acquire(&buffer->used); | ||
2315 | + const int avail = buffer->frames - qatomic_load_acquire(&buffer->used); | ||
2316 | int wptr = buffer->wptr; | ||
2317 | |||
2318 | if (frames > avail) { | ||
2319 | @@ -XXX,XX +XXX,XX @@ static int qjack_buffer_write_l(QJackBuffer *buffer, float **dest, int frames) | ||
2320 | } | ||
2321 | buffer->wptr = wptr; | ||
2322 | |||
2323 | - atomic_add(&buffer->used, frames); | ||
2324 | + qatomic_add(&buffer->used, frames); | ||
2325 | return frames; | ||
2326 | } | ||
2327 | |||
2328 | @@ -XXX,XX +XXX,XX @@ static int qjack_buffer_read(QJackBuffer *buffer, float *dest, int size) | ||
2329 | assert(buffer->data); | ||
2330 | const int samples = size / sizeof(float); | ||
2331 | int frames = samples / buffer->channels; | ||
2332 | - const int avail = atomic_load_acquire(&buffer->used); | ||
2333 | + const int avail = qatomic_load_acquire(&buffer->used); | ||
2334 | |||
2335 | if (frames > avail) { | ||
2336 | frames = avail; | ||
2337 | @@ -XXX,XX +XXX,XX @@ static int qjack_buffer_read(QJackBuffer *buffer, float *dest, int size) | ||
2338 | |||
2339 | buffer->rptr = rptr; | ||
2340 | |||
2341 | - atomic_sub(&buffer->used, frames); | ||
2342 | + qatomic_sub(&buffer->used, frames); | ||
2343 | return frames * buffer->channels * sizeof(float); | ||
2344 | } | ||
2345 | |||
2346 | @@ -XXX,XX +XXX,XX @@ static int qjack_buffer_read_l(QJackBuffer *buffer, float **dest, int frames) | ||
2347 | { | ||
2348 | assert(buffer->data); | ||
2349 | int copy = frames; | ||
2350 | - const int used = atomic_load_acquire(&buffer->used); | ||
2351 | + const int used = qatomic_load_acquire(&buffer->used); | ||
2352 | int rptr = buffer->rptr; | ||
2353 | |||
2354 | if (copy > used) { | ||
2355 | @@ -XXX,XX +XXX,XX @@ static int qjack_buffer_read_l(QJackBuffer *buffer, float **dest, int frames) | ||
2356 | } | ||
2357 | buffer->rptr = rptr; | ||
2358 | |||
2359 | - atomic_sub(&buffer->used, copy); | ||
2360 | + qatomic_sub(&buffer->used, copy); | ||
2361 | return copy; | ||
2362 | } | ||
2363 | |||
2364 | diff --git a/block.c b/block.c | ||
2365 | index XXXXXXX..XXXXXXX 100644 | ||
2366 | --- a/block.c | ||
2367 | +++ b/block.c | ||
2368 | @@ -XXX,XX +XXX,XX @@ static int bdrv_open_common(BlockDriverState *bs, BlockBackend *file, | ||
2369 | } | ||
2370 | |||
2371 | /* bdrv_new() and bdrv_close() make it so */ | ||
2372 | - assert(atomic_read(&bs->copy_on_read) == 0); | ||
2373 | + assert(qatomic_read(&bs->copy_on_read) == 0); | ||
2374 | |||
2375 | if (bs->open_flags & BDRV_O_COPY_ON_READ) { | ||
2376 | if (!bs->read_only) { | ||
2377 | @@ -XXX,XX +XXX,XX @@ static void bdrv_close(BlockDriverState *bs) | ||
2378 | bs->file = NULL; | ||
2379 | g_free(bs->opaque); | ||
2380 | bs->opaque = NULL; | ||
2381 | - atomic_set(&bs->copy_on_read, 0); | ||
2382 | + qatomic_set(&bs->copy_on_read, 0); | ||
2383 | bs->backing_file[0] = '\0'; | ||
2384 | bs->backing_format[0] = '\0'; | ||
2385 | bs->total_sectors = 0; | ||
2386 | diff --git a/block/block-backend.c b/block/block-backend.c | ||
2387 | index XXXXXXX..XXXXXXX 100644 | ||
2388 | --- a/block/block-backend.c | ||
2389 | +++ b/block/block-backend.c | ||
2390 | @@ -XXX,XX +XXX,XX @@ int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags) | ||
2391 | |||
2392 | void blk_inc_in_flight(BlockBackend *blk) | ||
2393 | { | ||
2394 | - atomic_inc(&blk->in_flight); | ||
2395 | + qatomic_inc(&blk->in_flight); | ||
2396 | } | ||
2397 | |||
2398 | void blk_dec_in_flight(BlockBackend *blk) | ||
2399 | { | ||
2400 | - atomic_dec(&blk->in_flight); | ||
2401 | + qatomic_dec(&blk->in_flight); | ||
2402 | aio_wait_kick(); | ||
2403 | } | ||
2404 | |||
2405 | @@ -XXX,XX +XXX,XX @@ void blk_drain(BlockBackend *blk) | ||
2406 | |||
2407 | /* We may have -ENOMEDIUM completions in flight */ | ||
2408 | AIO_WAIT_WHILE(blk_get_aio_context(blk), | ||
2409 | - atomic_mb_read(&blk->in_flight) > 0); | ||
2410 | + qatomic_mb_read(&blk->in_flight) > 0); | ||
2411 | |||
2412 | if (bs) { | ||
2413 | bdrv_drained_end(bs); | ||
2414 | @@ -XXX,XX +XXX,XX @@ void blk_drain_all(void) | ||
2415 | aio_context_acquire(ctx); | ||
2416 | |||
2417 | /* We may have -ENOMEDIUM completions in flight */ | ||
2418 | - AIO_WAIT_WHILE(ctx, atomic_mb_read(&blk->in_flight) > 0); | ||
2419 | + AIO_WAIT_WHILE(ctx, qatomic_mb_read(&blk->in_flight) > 0); | ||
2420 | |||
2421 | aio_context_release(ctx); | ||
2422 | } | ||
2423 | @@ -XXX,XX +XXX,XX @@ void blk_io_limits_update_group(BlockBackend *blk, const char *group) | ||
2424 | static void blk_root_drained_begin(BdrvChild *child) | ||
2425 | { | ||
2426 | BlockBackend *blk = child->opaque; | ||
2427 | + ThrottleGroupMember *tgm = &blk->public.throttle_group_member; | ||
2428 | |||
2429 | if (++blk->quiesce_counter == 1) { | ||
2430 | if (blk->dev_ops && blk->dev_ops->drained_begin) { | ||
2431 | @@ -XXX,XX +XXX,XX @@ static void blk_root_drained_begin(BdrvChild *child) | ||
2432 | /* Note that blk->root may not be accessible here yet if we are just | ||
2433 | * attaching to a BlockDriverState that is drained. Use child instead. */ | ||
2434 | |||
2435 | - if (atomic_fetch_inc(&blk->public.throttle_group_member.io_limits_disabled) == 0) { | ||
2436 | - throttle_group_restart_tgm(&blk->public.throttle_group_member); | ||
2437 | + if (qatomic_fetch_inc(&tgm->io_limits_disabled) == 0) { | ||
2438 | + throttle_group_restart_tgm(tgm); | ||
2439 | } | ||
2440 | } | ||
2441 | |||
2442 | @@ -XXX,XX +XXX,XX @@ static void blk_root_drained_end(BdrvChild *child, int *drained_end_counter) | ||
2443 | assert(blk->quiesce_counter); | ||
2444 | |||
2445 | assert(blk->public.throttle_group_member.io_limits_disabled); | ||
2446 | - atomic_dec(&blk->public.throttle_group_member.io_limits_disabled); | ||
2447 | + qatomic_dec(&blk->public.throttle_group_member.io_limits_disabled); | ||
2448 | |||
2449 | if (--blk->quiesce_counter == 0) { | ||
2450 | if (blk->dev_ops && blk->dev_ops->drained_end) { | ||
17 | diff --git a/block/io.c b/block/io.c | 2451 | diff --git a/block/io.c b/block/io.c |
18 | index XXXXXXX..XXXXXXX 100644 | 2452 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/block/io.c | 2453 | --- a/block/io.c |
20 | +++ b/block/io.c | 2454 | +++ b/block/io.c |
21 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child, | 2455 | @@ -XXX,XX +XXX,XX @@ void bdrv_parent_drained_end_single(BdrvChild *c) |
22 | * potential fallback support, if we ever implement any read flags | 2456 | { |
23 | * to pass through to drivers. For now, there aren't any | 2457 | int drained_end_counter = 0; |
24 | * passthrough flags. */ | 2458 | bdrv_parent_drained_end_single_no_poll(c, &drained_end_counter); |
25 | - assert(!(flags & ~(BDRV_REQ_NO_SERIALISING | BDRV_REQ_COPY_ON_READ | | 2459 | - BDRV_POLL_WHILE(c->bs, atomic_read(&drained_end_counter) > 0); |
26 | - BDRV_REQ_PREFETCH))); | 2460 | + BDRV_POLL_WHILE(c->bs, qatomic_read(&drained_end_counter) > 0); |
27 | + assert(!(flags & ~(BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH))); | 2461 | } |
28 | 2462 | ||
29 | /* Handle Copy on Read and associated serialisation */ | 2463 | static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore, |
30 | if (flags & BDRV_REQ_COPY_ON_READ) { | 2464 | @@ -XXX,XX +XXX,XX @@ void bdrv_refresh_limits(BlockDriverState *bs, Error **errp) |
31 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child, | 2465 | */ |
32 | bdrv_mark_request_serialising(req, bdrv_get_cluster_size(bs)); | 2466 | void bdrv_enable_copy_on_read(BlockDriverState *bs) |
33 | } | 2467 | { |
34 | 2468 | - atomic_inc(&bs->copy_on_read); | |
35 | - /* BDRV_REQ_SERIALISING is only for write operation */ | 2469 | + qatomic_inc(&bs->copy_on_read); |
36 | - assert(!(flags & BDRV_REQ_SERIALISING)); | 2470 | } |
37 | - | 2471 | |
38 | - if (!(flags & BDRV_REQ_NO_SERIALISING)) { | 2472 | void bdrv_disable_copy_on_read(BlockDriverState *bs) |
39 | - bdrv_wait_serialising_requests(req); | 2473 | { |
40 | - } | 2474 | - int old = atomic_fetch_dec(&bs->copy_on_read); |
41 | + bdrv_wait_serialising_requests(req); | 2475 | + int old = qatomic_fetch_dec(&bs->copy_on_read); |
42 | 2476 | assert(old >= 1); | |
43 | if (flags & BDRV_REQ_COPY_ON_READ) { | 2477 | } |
44 | int64_t pnum; | 2478 | |
2479 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn bdrv_drain_invoke_entry(void *opaque) | ||
2480 | } | ||
2481 | |||
2482 | /* Set data->done and decrement drained_end_counter before bdrv_wakeup() */ | ||
2483 | - atomic_mb_set(&data->done, true); | ||
2484 | + qatomic_mb_set(&data->done, true); | ||
2485 | if (!data->begin) { | ||
2486 | - atomic_dec(data->drained_end_counter); | ||
2487 | + qatomic_dec(data->drained_end_counter); | ||
2488 | } | ||
2489 | bdrv_dec_in_flight(bs); | ||
2490 | |||
2491 | @@ -XXX,XX +XXX,XX @@ static void bdrv_drain_invoke(BlockDriverState *bs, bool begin, | ||
2492 | }; | ||
2493 | |||
2494 | if (!begin) { | ||
2495 | - atomic_inc(drained_end_counter); | ||
2496 | + qatomic_inc(drained_end_counter); | ||
2497 | } | ||
2498 | |||
2499 | /* Make sure the driver callback completes during the polling phase for | ||
2500 | @@ -XXX,XX +XXX,XX @@ bool bdrv_drain_poll(BlockDriverState *bs, bool recursive, | ||
2501 | return true; | ||
2502 | } | ||
2503 | |||
2504 | - if (atomic_read(&bs->in_flight)) { | ||
2505 | + if (qatomic_read(&bs->in_flight)) { | ||
2506 | return true; | ||
2507 | } | ||
2508 | |||
2509 | @@ -XXX,XX +XXX,XX @@ void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, | ||
2510 | assert(!qemu_in_coroutine()); | ||
2511 | |||
2512 | /* Stop things in parent-to-child order */ | ||
2513 | - if (atomic_fetch_inc(&bs->quiesce_counter) == 0) { | ||
2514 | + if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) { | ||
2515 | aio_disable_external(bdrv_get_aio_context(bs)); | ||
2516 | } | ||
2517 | |||
2518 | @@ -XXX,XX +XXX,XX @@ static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive, | ||
2519 | bdrv_parent_drained_end(bs, parent, ignore_bds_parents, | ||
2520 | drained_end_counter); | ||
2521 | |||
2522 | - old_quiesce_counter = atomic_fetch_dec(&bs->quiesce_counter); | ||
2523 | + old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter); | ||
2524 | if (old_quiesce_counter == 1) { | ||
2525 | aio_enable_external(bdrv_get_aio_context(bs)); | ||
2526 | } | ||
2527 | @@ -XXX,XX +XXX,XX @@ void bdrv_drained_end(BlockDriverState *bs) | ||
2528 | { | ||
2529 | int drained_end_counter = 0; | ||
2530 | bdrv_do_drained_end(bs, false, NULL, false, &drained_end_counter); | ||
2531 | - BDRV_POLL_WHILE(bs, atomic_read(&drained_end_counter) > 0); | ||
2532 | + BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0); | ||
2533 | } | ||
2534 | |||
2535 | void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter) | ||
2536 | @@ -XXX,XX +XXX,XX @@ void bdrv_subtree_drained_end(BlockDriverState *bs) | ||
2537 | { | ||
2538 | int drained_end_counter = 0; | ||
2539 | bdrv_do_drained_end(bs, true, NULL, false, &drained_end_counter); | ||
2540 | - BDRV_POLL_WHILE(bs, atomic_read(&drained_end_counter) > 0); | ||
2541 | + BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0); | ||
2542 | } | ||
2543 | |||
2544 | void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent) | ||
2545 | @@ -XXX,XX +XXX,XX @@ void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent) | ||
2546 | &drained_end_counter); | ||
2547 | } | ||
2548 | |||
2549 | - BDRV_POLL_WHILE(child->bs, atomic_read(&drained_end_counter) > 0); | ||
2550 | + BDRV_POLL_WHILE(child->bs, qatomic_read(&drained_end_counter) > 0); | ||
2551 | } | ||
2552 | |||
2553 | /* | ||
2554 | @@ -XXX,XX +XXX,XX @@ static void bdrv_drain_assert_idle(BlockDriverState *bs) | ||
2555 | { | ||
2556 | BdrvChild *child, *next; | ||
2557 | |||
2558 | - assert(atomic_read(&bs->in_flight) == 0); | ||
2559 | + assert(qatomic_read(&bs->in_flight) == 0); | ||
2560 | QLIST_FOREACH_SAFE(child, &bs->children, next, next) { | ||
2561 | bdrv_drain_assert_idle(child->bs); | ||
2562 | } | ||
2563 | @@ -XXX,XX +XXX,XX @@ void bdrv_drain_all_end(void) | ||
2564 | } | ||
2565 | |||
2566 | assert(qemu_get_current_aio_context() == qemu_get_aio_context()); | ||
2567 | - AIO_WAIT_WHILE(NULL, atomic_read(&drained_end_counter) > 0); | ||
2568 | + AIO_WAIT_WHILE(NULL, qatomic_read(&drained_end_counter) > 0); | ||
2569 | |||
2570 | assert(bdrv_drain_all_count > 0); | ||
2571 | bdrv_drain_all_count--; | ||
2572 | @@ -XXX,XX +XXX,XX @@ void bdrv_drain_all(void) | ||
2573 | static void tracked_request_end(BdrvTrackedRequest *req) | ||
2574 | { | ||
2575 | if (req->serialising) { | ||
2576 | - atomic_dec(&req->bs->serialising_in_flight); | ||
2577 | + qatomic_dec(&req->bs->serialising_in_flight); | ||
2578 | } | ||
2579 | |||
2580 | qemu_co_mutex_lock(&req->bs->reqs_lock); | ||
2581 | @@ -XXX,XX +XXX,XX @@ bool bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) | ||
2582 | |||
2583 | qemu_co_mutex_lock(&bs->reqs_lock); | ||
2584 | if (!req->serialising) { | ||
2585 | - atomic_inc(&req->bs->serialising_in_flight); | ||
2586 | + qatomic_inc(&req->bs->serialising_in_flight); | ||
2587 | req->serialising = true; | ||
2588 | } | ||
2589 | |||
2590 | @@ -XXX,XX +XXX,XX @@ static int bdrv_get_cluster_size(BlockDriverState *bs) | ||
2591 | |||
2592 | void bdrv_inc_in_flight(BlockDriverState *bs) | ||
2593 | { | ||
2594 | - atomic_inc(&bs->in_flight); | ||
2595 | + qatomic_inc(&bs->in_flight); | ||
2596 | } | ||
2597 | |||
2598 | void bdrv_wakeup(BlockDriverState *bs) | ||
2599 | @@ -XXX,XX +XXX,XX @@ void bdrv_wakeup(BlockDriverState *bs) | ||
2600 | |||
2601 | void bdrv_dec_in_flight(BlockDriverState *bs) | ||
2602 | { | ||
2603 | - atomic_dec(&bs->in_flight); | ||
2604 | + qatomic_dec(&bs->in_flight); | ||
2605 | bdrv_wakeup(bs); | ||
2606 | } | ||
2607 | |||
2608 | @@ -XXX,XX +XXX,XX @@ static bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self | ||
2609 | BlockDriverState *bs = self->bs; | ||
2610 | bool waited = false; | ||
2611 | |||
2612 | - if (!atomic_read(&bs->serialising_in_flight)) { | ||
2613 | + if (!qatomic_read(&bs->serialising_in_flight)) { | ||
2614 | return false; | ||
2615 | } | ||
2616 | |||
45 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_preadv_part(BdrvChild *child, | 2617 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_preadv_part(BdrvChild *child, |
46 | bdrv_inc_in_flight(bs); | 2618 | bdrv_inc_in_flight(bs); |
47 | 2619 | ||
48 | /* Don't do copy-on-read if we read data before write operation */ | 2620 | /* Don't do copy-on-read if we read data before write operation */ |
49 | - if (atomic_read(&bs->copy_on_read) && !(flags & BDRV_REQ_NO_SERIALISING)) { | 2621 | - if (atomic_read(&bs->copy_on_read)) { |
50 | + if (atomic_read(&bs->copy_on_read)) { | 2622 | + if (qatomic_read(&bs->copy_on_read)) { |
51 | flags |= BDRV_REQ_COPY_ON_READ; | 2623 | flags |= BDRV_REQ_COPY_ON_READ; |
52 | } | 2624 | } |
53 | 2625 | ||
54 | @@ -XXX,XX +XXX,XX @@ bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, uint64_t bytes, | 2626 | @@ -XXX,XX +XXX,XX @@ bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, uint64_t bytes, |
55 | return -EPERM; | 2627 | int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE); |
56 | } | 2628 | BlockDriverState *bs = child->bs; |
57 | 2629 | ||
58 | - /* BDRV_REQ_NO_SERIALISING is only for read operation */ | 2630 | - atomic_inc(&bs->write_gen); |
59 | - assert(!(flags & BDRV_REQ_NO_SERIALISING)); | 2631 | + qatomic_inc(&bs->write_gen); |
60 | assert(!(bs->open_flags & BDRV_O_INACTIVE)); | 2632 | |
61 | assert((bs->open_flags & BDRV_O_NO_IO) == 0); | 2633 | /* |
62 | assert(!(flags & ~BDRV_REQ_MASK)); | 2634 | * Discard cannot extend the image, but in error handling cases, such as |
63 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_co_copy_range_internal( | 2635 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_flush(BlockDriverState *bs) |
64 | 2636 | } | |
65 | /* BDRV_REQ_SERIALISING is only for write operation */ | 2637 | |
66 | assert(!(read_flags & BDRV_REQ_SERIALISING)); | 2638 | qemu_co_mutex_lock(&bs->reqs_lock); |
67 | - if (!(read_flags & BDRV_REQ_NO_SERIALISING)) { | 2639 | - current_gen = atomic_read(&bs->write_gen); |
68 | - bdrv_wait_serialising_requests(&req); | 2640 | + current_gen = qatomic_read(&bs->write_gen); |
69 | - } | 2641 | |
70 | + bdrv_wait_serialising_requests(&req); | 2642 | /* Wait until any previous flushes are completed */ |
71 | 2643 | while (bs->active_flush_req) { | |
72 | ret = src->bs->drv->bdrv_co_copy_range_from(src->bs, | 2644 | @@ -XXX,XX +XXX,XX @@ void bdrv_io_plug(BlockDriverState *bs) |
73 | src, src_offset, | 2645 | bdrv_io_plug(child->bs); |
74 | diff --git a/include/block/block.h b/include/block/block.h | 2646 | } |
75 | index XXXXXXX..XXXXXXX 100644 | 2647 | |
76 | --- a/include/block/block.h | 2648 | - if (atomic_fetch_inc(&bs->io_plugged) == 0) { |
77 | +++ b/include/block/block.h | 2649 | + if (qatomic_fetch_inc(&bs->io_plugged) == 0) { |
78 | @@ -XXX,XX +XXX,XX @@ typedef enum { | 2650 | BlockDriver *drv = bs->drv; |
2651 | if (drv && drv->bdrv_io_plug) { | ||
2652 | drv->bdrv_io_plug(bs); | ||
2653 | @@ -XXX,XX +XXX,XX @@ void bdrv_io_unplug(BlockDriverState *bs) | ||
2654 | BdrvChild *child; | ||
2655 | |||
2656 | assert(bs->io_plugged); | ||
2657 | - if (atomic_fetch_dec(&bs->io_plugged) == 1) { | ||
2658 | + if (qatomic_fetch_dec(&bs->io_plugged) == 1) { | ||
2659 | BlockDriver *drv = bs->drv; | ||
2660 | if (drv && drv->bdrv_io_unplug) { | ||
2661 | drv->bdrv_io_unplug(bs); | ||
2662 | diff --git a/block/nfs.c b/block/nfs.c | ||
2663 | index XXXXXXX..XXXXXXX 100644 | ||
2664 | --- a/block/nfs.c | ||
2665 | +++ b/block/nfs.c | ||
2666 | @@ -XXX,XX +XXX,XX @@ nfs_get_allocated_file_size_cb(int ret, struct nfs_context *nfs, void *data, | ||
2667 | } | ||
2668 | |||
2669 | /* Set task->complete before reading bs->wakeup. */ | ||
2670 | - atomic_mb_set(&task->complete, 1); | ||
2671 | + qatomic_mb_set(&task->complete, 1); | ||
2672 | bdrv_wakeup(task->bs); | ||
2673 | } | ||
2674 | |||
2675 | diff --git a/block/sheepdog.c b/block/sheepdog.c | ||
2676 | index XXXXXXX..XXXXXXX 100644 | ||
2677 | --- a/block/sheepdog.c | ||
2678 | +++ b/block/sheepdog.c | ||
2679 | @@ -XXX,XX +XXX,XX @@ out: | ||
2680 | srco->co = NULL; | ||
2681 | srco->ret = ret; | ||
2682 | /* Set srco->finished before reading bs->wakeup. */ | ||
2683 | - atomic_mb_set(&srco->finished, true); | ||
2684 | + qatomic_mb_set(&srco->finished, true); | ||
2685 | if (srco->bs) { | ||
2686 | bdrv_wakeup(srco->bs); | ||
2687 | } | ||
2688 | diff --git a/block/throttle-groups.c b/block/throttle-groups.c | ||
2689 | index XXXXXXX..XXXXXXX 100644 | ||
2690 | --- a/block/throttle-groups.c | ||
2691 | +++ b/block/throttle-groups.c | ||
2692 | @@ -XXX,XX +XXX,XX @@ static ThrottleGroupMember *next_throttle_token(ThrottleGroupMember *tgm, | ||
2693 | * immediately if it has pending requests. Otherwise we could be | ||
2694 | * forcing it to wait for other member's throttled requests. */ | ||
2695 | if (tgm_has_pending_reqs(tgm, is_write) && | ||
2696 | - atomic_read(&tgm->io_limits_disabled)) { | ||
2697 | + qatomic_read(&tgm->io_limits_disabled)) { | ||
2698 | return tgm; | ||
2699 | } | ||
2700 | |||
2701 | @@ -XXX,XX +XXX,XX @@ static bool throttle_group_schedule_timer(ThrottleGroupMember *tgm, | ||
2702 | ThrottleTimers *tt = &tgm->throttle_timers; | ||
2703 | bool must_wait; | ||
2704 | |||
2705 | - if (atomic_read(&tgm->io_limits_disabled)) { | ||
2706 | + if (qatomic_read(&tgm->io_limits_disabled)) { | ||
2707 | return false; | ||
2708 | } | ||
2709 | |||
2710 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn throttle_group_restart_queue_entry(void *opaque) | ||
2711 | |||
2712 | g_free(data); | ||
2713 | |||
2714 | - atomic_dec(&tgm->restart_pending); | ||
2715 | + qatomic_dec(&tgm->restart_pending); | ||
2716 | aio_wait_kick(); | ||
2717 | } | ||
2718 | |||
2719 | @@ -XXX,XX +XXX,XX @@ static void throttle_group_restart_queue(ThrottleGroupMember *tgm, bool is_write | ||
2720 | * be no timer pending on this tgm at this point */ | ||
2721 | assert(!timer_pending(tgm->throttle_timers.timers[is_write])); | ||
2722 | |||
2723 | - atomic_inc(&tgm->restart_pending); | ||
2724 | + qatomic_inc(&tgm->restart_pending); | ||
2725 | |||
2726 | co = qemu_coroutine_create(throttle_group_restart_queue_entry, rd); | ||
2727 | aio_co_enter(tgm->aio_context, co); | ||
2728 | @@ -XXX,XX +XXX,XX @@ void throttle_group_register_tgm(ThrottleGroupMember *tgm, | ||
2729 | |||
2730 | tgm->throttle_state = ts; | ||
2731 | tgm->aio_context = ctx; | ||
2732 | - atomic_set(&tgm->restart_pending, 0); | ||
2733 | + qatomic_set(&tgm->restart_pending, 0); | ||
2734 | |||
2735 | qemu_mutex_lock(&tg->lock); | ||
2736 | /* If the ThrottleGroup is new set this ThrottleGroupMember as the token */ | ||
2737 | @@ -XXX,XX +XXX,XX @@ void throttle_group_unregister_tgm(ThrottleGroupMember *tgm) | ||
2738 | } | ||
2739 | |||
2740 | /* Wait for throttle_group_restart_queue_entry() coroutines to finish */ | ||
2741 | - AIO_WAIT_WHILE(tgm->aio_context, atomic_read(&tgm->restart_pending) > 0); | ||
2742 | + AIO_WAIT_WHILE(tgm->aio_context, qatomic_read(&tgm->restart_pending) > 0); | ||
2743 | |||
2744 | qemu_mutex_lock(&tg->lock); | ||
2745 | for (i = 0; i < 2; i++) { | ||
2746 | diff --git a/block/throttle.c b/block/throttle.c | ||
2747 | index XXXXXXX..XXXXXXX 100644 | ||
2748 | --- a/block/throttle.c | ||
2749 | +++ b/block/throttle.c | ||
2750 | @@ -XXX,XX +XXX,XX @@ static void throttle_reopen_abort(BDRVReopenState *reopen_state) | ||
2751 | static void coroutine_fn throttle_co_drain_begin(BlockDriverState *bs) | ||
2752 | { | ||
2753 | ThrottleGroupMember *tgm = bs->opaque; | ||
2754 | - if (atomic_fetch_inc(&tgm->io_limits_disabled) == 0) { | ||
2755 | + if (qatomic_fetch_inc(&tgm->io_limits_disabled) == 0) { | ||
2756 | throttle_group_restart_tgm(tgm); | ||
2757 | } | ||
2758 | } | ||
2759 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn throttle_co_drain_end(BlockDriverState *bs) | ||
2760 | { | ||
2761 | ThrottleGroupMember *tgm = bs->opaque; | ||
2762 | assert(tgm->io_limits_disabled); | ||
2763 | - atomic_dec(&tgm->io_limits_disabled); | ||
2764 | + qatomic_dec(&tgm->io_limits_disabled); | ||
2765 | } | ||
2766 | |||
2767 | static const char *const throttle_strong_runtime_opts[] = { | ||
2768 | diff --git a/blockdev.c b/blockdev.c | ||
2769 | index XXXXXXX..XXXXXXX 100644 | ||
2770 | --- a/blockdev.c | ||
2771 | +++ b/blockdev.c | ||
2772 | @@ -XXX,XX +XXX,XX @@ static void external_snapshot_commit(BlkActionState *common) | ||
2773 | /* We don't need (or want) to use the transactional | ||
2774 | * bdrv_reopen_multiple() across all the entries at once, because we | ||
2775 | * don't want to abort all of them if one of them fails the reopen */ | ||
2776 | - if (!atomic_read(&state->old_bs->copy_on_read)) { | ||
2777 | + if (!qatomic_read(&state->old_bs->copy_on_read)) { | ||
2778 | bdrv_reopen_set_read_only(state->old_bs, true, NULL); | ||
2779 | } | ||
2780 | |||
2781 | diff --git a/blockjob.c b/blockjob.c | ||
2782 | index XXXXXXX..XXXXXXX 100644 | ||
2783 | --- a/blockjob.c | ||
2784 | +++ b/blockjob.c | ||
2785 | @@ -XXX,XX +XXX,XX @@ BlockJobInfo *block_job_query(BlockJob *job, Error **errp) | ||
2786 | info = g_new0(BlockJobInfo, 1); | ||
2787 | info->type = g_strdup(job_type_str(&job->job)); | ||
2788 | info->device = g_strdup(job->job.id); | ||
2789 | - info->busy = atomic_read(&job->job.busy); | ||
2790 | + info->busy = qatomic_read(&job->job.busy); | ||
2791 | info->paused = job->job.pause_count > 0; | ||
2792 | info->offset = job->job.progress.current; | ||
2793 | info->len = job->job.progress.total; | ||
2794 | diff --git a/contrib/libvhost-user/libvhost-user.c b/contrib/libvhost-user/libvhost-user.c | ||
2795 | index XXXXXXX..XXXXXXX 100644 | ||
2796 | --- a/contrib/libvhost-user/libvhost-user.c | ||
2797 | +++ b/contrib/libvhost-user/libvhost-user.c | ||
2798 | @@ -XXX,XX +XXX,XX @@ static void | ||
2799 | vu_log_page(uint8_t *log_table, uint64_t page) | ||
2800 | { | ||
2801 | DPRINT("Logged dirty guest page: %"PRId64"\n", page); | ||
2802 | - atomic_or(&log_table[page / 8], 1 << (page % 8)); | ||
2803 | + qatomic_or(&log_table[page / 8], 1 << (page % 8)); | ||
2804 | } | ||
2805 | |||
2806 | static void | ||
2807 | diff --git a/cpus-common.c b/cpus-common.c | ||
2808 | index XXXXXXX..XXXXXXX 100644 | ||
2809 | --- a/cpus-common.c | ||
2810 | +++ b/cpus-common.c | ||
2811 | @@ -XXX,XX +XXX,XX @@ void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data, | ||
2812 | wi.exclusive = false; | ||
2813 | |||
2814 | queue_work_on_cpu(cpu, &wi); | ||
2815 | - while (!atomic_mb_read(&wi.done)) { | ||
2816 | + while (!qatomic_mb_read(&wi.done)) { | ||
2817 | CPUState *self_cpu = current_cpu; | ||
2818 | |||
2819 | qemu_cond_wait(&qemu_work_cond, mutex); | ||
2820 | @@ -XXX,XX +XXX,XX @@ void start_exclusive(void) | ||
2821 | exclusive_idle(); | ||
2822 | |||
2823 | /* Make all other cpus stop executing. */ | ||
2824 | - atomic_set(&pending_cpus, 1); | ||
2825 | + qatomic_set(&pending_cpus, 1); | ||
2826 | |||
2827 | /* Write pending_cpus before reading other_cpu->running. */ | ||
2828 | smp_mb(); | ||
2829 | running_cpus = 0; | ||
2830 | CPU_FOREACH(other_cpu) { | ||
2831 | - if (atomic_read(&other_cpu->running)) { | ||
2832 | + if (qatomic_read(&other_cpu->running)) { | ||
2833 | other_cpu->has_waiter = true; | ||
2834 | running_cpus++; | ||
2835 | qemu_cpu_kick(other_cpu); | ||
2836 | } | ||
2837 | } | ||
2838 | |||
2839 | - atomic_set(&pending_cpus, running_cpus + 1); | ||
2840 | + qatomic_set(&pending_cpus, running_cpus + 1); | ||
2841 | while (pending_cpus > 1) { | ||
2842 | qemu_cond_wait(&exclusive_cond, &qemu_cpu_list_lock); | ||
2843 | } | ||
2844 | @@ -XXX,XX +XXX,XX @@ void end_exclusive(void) | ||
2845 | current_cpu->in_exclusive_context = false; | ||
2846 | |||
2847 | qemu_mutex_lock(&qemu_cpu_list_lock); | ||
2848 | - atomic_set(&pending_cpus, 0); | ||
2849 | + qatomic_set(&pending_cpus, 0); | ||
2850 | qemu_cond_broadcast(&exclusive_resume); | ||
2851 | qemu_mutex_unlock(&qemu_cpu_list_lock); | ||
2852 | } | ||
2853 | @@ -XXX,XX +XXX,XX @@ void end_exclusive(void) | ||
2854 | /* Wait for exclusive ops to finish, and begin cpu execution. */ | ||
2855 | void cpu_exec_start(CPUState *cpu) | ||
2856 | { | ||
2857 | - atomic_set(&cpu->running, true); | ||
2858 | + qatomic_set(&cpu->running, true); | ||
2859 | |||
2860 | /* Write cpu->running before reading pending_cpus. */ | ||
2861 | smp_mb(); | ||
2862 | @@ -XXX,XX +XXX,XX @@ void cpu_exec_start(CPUState *cpu) | ||
2863 | * 3. pending_cpus == 0. Then start_exclusive is definitely going to | ||
2864 | * see cpu->running == true, and it will kick the CPU. | ||
79 | */ | 2865 | */ |
80 | BDRV_REQ_MAY_UNMAP = 0x4, | 2866 | - if (unlikely(atomic_read(&pending_cpus))) { |
81 | 2867 | + if (unlikely(qatomic_read(&pending_cpus))) { | |
82 | - /* | 2868 | QEMU_LOCK_GUARD(&qemu_cpu_list_lock); |
83 | - * The BDRV_REQ_NO_SERIALISING flag is only valid for reads and means that | 2869 | if (!cpu->has_waiter) { |
84 | - * we don't want wait_serialising_requests() during the read operation. | 2870 | /* Not counted in pending_cpus, let the exclusive item |
85 | - * | 2871 | * run. Since we have the lock, just set cpu->running to true |
86 | - * This flag is used for backup copy-on-write operations, when we need to | 2872 | * while holding it; no need to check pending_cpus again. |
87 | - * read old data before write (write notifier triggered). It is okay since | 2873 | */ |
88 | - * we already waited for other serializing requests in the initiating write | 2874 | - atomic_set(&cpu->running, false); |
89 | - * (see bdrv_aligned_pwritev), and it is necessary if the initiating write | 2875 | + qatomic_set(&cpu->running, false); |
90 | - * is already serializing (without the flag, the read would deadlock | 2876 | exclusive_idle(); |
91 | - * waiting for the serialising write to complete). | 2877 | /* Now pending_cpus is zero. */ |
92 | - */ | 2878 | - atomic_set(&cpu->running, true); |
93 | - BDRV_REQ_NO_SERIALISING = 0x8, | 2879 | + qatomic_set(&cpu->running, true); |
94 | BDRV_REQ_FUA = 0x10, | 2880 | } else { |
95 | BDRV_REQ_WRITE_COMPRESSED = 0x20, | 2881 | /* Counted in pending_cpus, go ahead and release the |
96 | 2882 | * waiter at cpu_exec_end. | |
2883 | @@ -XXX,XX +XXX,XX @@ void cpu_exec_start(CPUState *cpu) | ||
2884 | /* Mark cpu as not executing, and release pending exclusive ops. */ | ||
2885 | void cpu_exec_end(CPUState *cpu) | ||
2886 | { | ||
2887 | - atomic_set(&cpu->running, false); | ||
2888 | + qatomic_set(&cpu->running, false); | ||
2889 | |||
2890 | /* Write cpu->running before reading pending_cpus. */ | ||
2891 | smp_mb(); | ||
2892 | @@ -XXX,XX +XXX,XX @@ void cpu_exec_end(CPUState *cpu) | ||
2893 | * see cpu->running == false, and it can ignore this CPU until the | ||
2894 | * next cpu_exec_start. | ||
2895 | */ | ||
2896 | - if (unlikely(atomic_read(&pending_cpus))) { | ||
2897 | + if (unlikely(qatomic_read(&pending_cpus))) { | ||
2898 | QEMU_LOCK_GUARD(&qemu_cpu_list_lock); | ||
2899 | if (cpu->has_waiter) { | ||
2900 | cpu->has_waiter = false; | ||
2901 | - atomic_set(&pending_cpus, pending_cpus - 1); | ||
2902 | + qatomic_set(&pending_cpus, pending_cpus - 1); | ||
2903 | if (pending_cpus == 1) { | ||
2904 | qemu_cond_signal(&exclusive_cond); | ||
2905 | } | ||
2906 | @@ -XXX,XX +XXX,XX @@ void process_queued_cpu_work(CPUState *cpu) | ||
2907 | if (wi->free) { | ||
2908 | g_free(wi); | ||
2909 | } else { | ||
2910 | - atomic_mb_set(&wi->done, true); | ||
2911 | + qatomic_mb_set(&wi->done, true); | ||
2912 | } | ||
2913 | } | ||
2914 | qemu_mutex_unlock(&cpu->work_mutex); | ||
2915 | diff --git a/dump/dump.c b/dump/dump.c | ||
2916 | index XXXXXXX..XXXXXXX 100644 | ||
2917 | --- a/dump/dump.c | ||
2918 | +++ b/dump/dump.c | ||
2919 | @@ -XXX,XX +XXX,XX @@ static void dump_state_prepare(DumpState *s) | ||
2920 | bool dump_in_progress(void) | ||
2921 | { | ||
2922 | DumpState *state = &dump_state_global; | ||
2923 | - return (atomic_read(&state->status) == DUMP_STATUS_ACTIVE); | ||
2924 | + return (qatomic_read(&state->status) == DUMP_STATUS_ACTIVE); | ||
2925 | } | ||
2926 | |||
2927 | /* calculate total size of memory to be dumped (taking filter into | ||
2928 | @@ -XXX,XX +XXX,XX @@ static void dump_process(DumpState *s, Error **errp) | ||
2929 | |||
2930 | /* make sure status is written after written_size updates */ | ||
2931 | smp_wmb(); | ||
2932 | - atomic_set(&s->status, | ||
2933 | + qatomic_set(&s->status, | ||
2934 | (local_err ? DUMP_STATUS_FAILED : DUMP_STATUS_COMPLETED)); | ||
2935 | |||
2936 | /* send DUMP_COMPLETED message (unconditionally) */ | ||
2937 | @@ -XXX,XX +XXX,XX @@ DumpQueryResult *qmp_query_dump(Error **errp) | ||
2938 | { | ||
2939 | DumpQueryResult *result = g_new(DumpQueryResult, 1); | ||
2940 | DumpState *state = &dump_state_global; | ||
2941 | - result->status = atomic_read(&state->status); | ||
2942 | + result->status = qatomic_read(&state->status); | ||
2943 | /* make sure we are reading status and written_size in order */ | ||
2944 | smp_rmb(); | ||
2945 | result->completed = state->written_size; | ||
2946 | @@ -XXX,XX +XXX,XX @@ void qmp_dump_guest_memory(bool paging, const char *file, | ||
2947 | begin, length, &local_err); | ||
2948 | if (local_err) { | ||
2949 | error_propagate(errp, local_err); | ||
2950 | - atomic_set(&s->status, DUMP_STATUS_FAILED); | ||
2951 | + qatomic_set(&s->status, DUMP_STATUS_FAILED); | ||
2952 | return; | ||
2953 | } | ||
2954 | |||
2955 | diff --git a/exec.c b/exec.c | ||
2956 | index XXXXXXX..XXXXXXX 100644 | ||
2957 | --- a/exec.c | ||
2958 | +++ b/exec.c | ||
2959 | @@ -XXX,XX +XXX,XX @@ static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d, | ||
2960 | hwaddr addr, | ||
2961 | bool resolve_subpage) | ||
2962 | { | ||
2963 | - MemoryRegionSection *section = atomic_read(&d->mru_section); | ||
2964 | + MemoryRegionSection *section = qatomic_read(&d->mru_section); | ||
2965 | subpage_t *subpage; | ||
2966 | |||
2967 | if (!section || section == &d->map.sections[PHYS_SECTION_UNASSIGNED] || | ||
2968 | !section_covers_addr(section, addr)) { | ||
2969 | section = phys_page_find(d, addr); | ||
2970 | - atomic_set(&d->mru_section, section); | ||
2971 | + qatomic_set(&d->mru_section, section); | ||
2972 | } | ||
2973 | if (resolve_subpage && section->mr->subpage) { | ||
2974 | subpage = container_of(section->mr, subpage_t, iomem); | ||
2975 | @@ -XXX,XX +XXX,XX @@ address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, | ||
2976 | IOMMUMemoryRegionClass *imrc; | ||
2977 | IOMMUTLBEntry iotlb; | ||
2978 | int iommu_idx; | ||
2979 | - AddressSpaceDispatch *d = atomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch); | ||
2980 | + AddressSpaceDispatch *d = | ||
2981 | + qatomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch); | ||
2982 | |||
2983 | for (;;) { | ||
2984 | section = address_space_translate_internal(d, addr, &addr, plen, false); | ||
2985 | @@ -XXX,XX +XXX,XX @@ static RAMBlock *qemu_get_ram_block(ram_addr_t addr) | ||
2986 | { | ||
2987 | RAMBlock *block; | ||
2988 | |||
2989 | - block = atomic_rcu_read(&ram_list.mru_block); | ||
2990 | + block = qatomic_rcu_read(&ram_list.mru_block); | ||
2991 | if (block && addr - block->offset < block->max_length) { | ||
2992 | return block; | ||
2993 | } | ||
2994 | @@ -XXX,XX +XXX,XX @@ found: | ||
2995 | * call_rcu(reclaim_ramblock, xxx); | ||
2996 | * rcu_read_unlock() | ||
2997 | * | ||
2998 | - * atomic_rcu_set is not needed here. The block was already published | ||
2999 | + * qatomic_rcu_set is not needed here. The block was already published | ||
3000 | * when it was placed into the list. Here we're just making an extra | ||
3001 | * copy of the pointer. | ||
3002 | */ | ||
3003 | @@ -XXX,XX +XXX,XX @@ bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start, | ||
3004 | page = start_page; | ||
3005 | |||
3006 | WITH_RCU_READ_LOCK_GUARD() { | ||
3007 | - blocks = atomic_rcu_read(&ram_list.dirty_memory[client]); | ||
3008 | + blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]); | ||
3009 | ramblock = qemu_get_ram_block(start); | ||
3010 | /* Range sanity check on the ramblock */ | ||
3011 | assert(start >= ramblock->offset && | ||
3012 | @@ -XXX,XX +XXX,XX @@ DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty | ||
3013 | dest = 0; | ||
3014 | |||
3015 | WITH_RCU_READ_LOCK_GUARD() { | ||
3016 | - blocks = atomic_rcu_read(&ram_list.dirty_memory[client]); | ||
3017 | + blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]); | ||
3018 | |||
3019 | while (page < end) { | ||
3020 | unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE; | ||
3021 | @@ -XXX,XX +XXX,XX @@ static void dirty_memory_extend(ram_addr_t old_ram_size, | ||
3022 | DirtyMemoryBlocks *new_blocks; | ||
3023 | int j; | ||
3024 | |||
3025 | - old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]); | ||
3026 | + old_blocks = qatomic_rcu_read(&ram_list.dirty_memory[i]); | ||
3027 | new_blocks = g_malloc(sizeof(*new_blocks) + | ||
3028 | sizeof(new_blocks->blocks[0]) * new_num_blocks); | ||
3029 | |||
3030 | @@ -XXX,XX +XXX,XX @@ static void dirty_memory_extend(ram_addr_t old_ram_size, | ||
3031 | new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE); | ||
3032 | } | ||
3033 | |||
3034 | - atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks); | ||
3035 | + qatomic_rcu_set(&ram_list.dirty_memory[i], new_blocks); | ||
3036 | |||
3037 | if (old_blocks) { | ||
3038 | g_free_rcu(old_blocks, rcu); | ||
3039 | @@ -XXX,XX +XXX,XX @@ RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset, | ||
3040 | } | ||
3041 | |||
3042 | RCU_READ_LOCK_GUARD(); | ||
3043 | - block = atomic_rcu_read(&ram_list.mru_block); | ||
3044 | + block = qatomic_rcu_read(&ram_list.mru_block); | ||
3045 | if (block && block->host && host - block->host < block->max_length) { | ||
3046 | goto found; | ||
3047 | } | ||
3048 | @@ -XXX,XX +XXX,XX @@ MemoryRegionSection *iotlb_to_section(CPUState *cpu, | ||
3049 | { | ||
3050 | int asidx = cpu_asidx_from_attrs(cpu, attrs); | ||
3051 | CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx]; | ||
3052 | - AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch); | ||
3053 | + AddressSpaceDispatch *d = qatomic_rcu_read(&cpuas->memory_dispatch); | ||
3054 | MemoryRegionSection *sections = d->map.sections; | ||
3055 | |||
3056 | return §ions[index & ~TARGET_PAGE_MASK]; | ||
3057 | @@ -XXX,XX +XXX,XX @@ static void tcg_commit(MemoryListener *listener) | ||
3058 | * may have split the RCU critical section. | ||
3059 | */ | ||
3060 | d = address_space_to_dispatch(cpuas->as); | ||
3061 | - atomic_rcu_set(&cpuas->memory_dispatch, d); | ||
3062 | + qatomic_rcu_set(&cpuas->memory_dispatch, d); | ||
3063 | tlb_flush(cpuas->cpu); | ||
3064 | } | ||
3065 | |||
3066 | @@ -XXX,XX +XXX,XX @@ void cpu_register_map_client(QEMUBH *bh) | ||
3067 | qemu_mutex_lock(&map_client_list_lock); | ||
3068 | client->bh = bh; | ||
3069 | QLIST_INSERT_HEAD(&map_client_list, client, link); | ||
3070 | - if (!atomic_read(&bounce.in_use)) { | ||
3071 | + if (!qatomic_read(&bounce.in_use)) { | ||
3072 | cpu_notify_map_clients_locked(); | ||
3073 | } | ||
3074 | qemu_mutex_unlock(&map_client_list_lock); | ||
3075 | @@ -XXX,XX +XXX,XX @@ void *address_space_map(AddressSpace *as, | ||
3076 | mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs); | ||
3077 | |||
3078 | if (!memory_access_is_direct(mr, is_write)) { | ||
3079 | - if (atomic_xchg(&bounce.in_use, true)) { | ||
3080 | + if (qatomic_xchg(&bounce.in_use, true)) { | ||
3081 | *plen = 0; | ||
3082 | return NULL; | ||
3083 | } | ||
3084 | @@ -XXX,XX +XXX,XX @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, | ||
3085 | qemu_vfree(bounce.buffer); | ||
3086 | bounce.buffer = NULL; | ||
3087 | memory_region_unref(bounce.mr); | ||
3088 | - atomic_mb_set(&bounce.in_use, false); | ||
3089 | + qatomic_mb_set(&bounce.in_use, false); | ||
3090 | cpu_notify_map_clients(); | ||
3091 | } | ||
3092 | |||
3093 | @@ -XXX,XX +XXX,XX @@ int ram_block_discard_disable(bool state) | ||
3094 | int old; | ||
3095 | |||
3096 | if (!state) { | ||
3097 | - atomic_dec(&ram_block_discard_disabled); | ||
3098 | + qatomic_dec(&ram_block_discard_disabled); | ||
3099 | return 0; | ||
3100 | } | ||
3101 | |||
3102 | do { | ||
3103 | - old = atomic_read(&ram_block_discard_disabled); | ||
3104 | + old = qatomic_read(&ram_block_discard_disabled); | ||
3105 | if (old < 0) { | ||
3106 | return -EBUSY; | ||
3107 | } | ||
3108 | - } while (atomic_cmpxchg(&ram_block_discard_disabled, old, old + 1) != old); | ||
3109 | + } while (qatomic_cmpxchg(&ram_block_discard_disabled, | ||
3110 | + old, old + 1) != old); | ||
3111 | return 0; | ||
3112 | } | ||
3113 | |||
3114 | @@ -XXX,XX +XXX,XX @@ int ram_block_discard_require(bool state) | ||
3115 | int old; | ||
3116 | |||
3117 | if (!state) { | ||
3118 | - atomic_inc(&ram_block_discard_disabled); | ||
3119 | + qatomic_inc(&ram_block_discard_disabled); | ||
3120 | return 0; | ||
3121 | } | ||
3122 | |||
3123 | do { | ||
3124 | - old = atomic_read(&ram_block_discard_disabled); | ||
3125 | + old = qatomic_read(&ram_block_discard_disabled); | ||
3126 | if (old > 0) { | ||
3127 | return -EBUSY; | ||
3128 | } | ||
3129 | - } while (atomic_cmpxchg(&ram_block_discard_disabled, old, old - 1) != old); | ||
3130 | + } while (qatomic_cmpxchg(&ram_block_discard_disabled, | ||
3131 | + old, old - 1) != old); | ||
3132 | return 0; | ||
3133 | } | ||
3134 | |||
3135 | bool ram_block_discard_is_disabled(void) | ||
3136 | { | ||
3137 | - return atomic_read(&ram_block_discard_disabled) > 0; | ||
3138 | + return qatomic_read(&ram_block_discard_disabled) > 0; | ||
3139 | } | ||
3140 | |||
3141 | bool ram_block_discard_is_required(void) | ||
3142 | { | ||
3143 | - return atomic_read(&ram_block_discard_disabled) < 0; | ||
3144 | + return qatomic_read(&ram_block_discard_disabled) < 0; | ||
3145 | } | ||
3146 | |||
3147 | #endif | ||
3148 | diff --git a/hw/core/cpu.c b/hw/core/cpu.c | ||
3149 | index XXXXXXX..XXXXXXX 100644 | ||
3150 | --- a/hw/core/cpu.c | ||
3151 | +++ b/hw/core/cpu.c | ||
3152 | @@ -XXX,XX +XXX,XX @@ void cpu_reset_interrupt(CPUState *cpu, int mask) | ||
3153 | |||
3154 | void cpu_exit(CPUState *cpu) | ||
3155 | { | ||
3156 | - atomic_set(&cpu->exit_request, 1); | ||
3157 | + qatomic_set(&cpu->exit_request, 1); | ||
3158 | /* Ensure cpu_exec will see the exit request after TCG has exited. */ | ||
3159 | smp_wmb(); | ||
3160 | - atomic_set(&cpu->icount_decr_ptr->u16.high, -1); | ||
3161 | + qatomic_set(&cpu->icount_decr_ptr->u16.high, -1); | ||
3162 | } | ||
3163 | |||
3164 | int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu, | ||
3165 | @@ -XXX,XX +XXX,XX @@ static void cpu_common_reset(DeviceState *dev) | ||
3166 | cpu->halted = cpu->start_powered_off; | ||
3167 | cpu->mem_io_pc = 0; | ||
3168 | cpu->icount_extra = 0; | ||
3169 | - atomic_set(&cpu->icount_decr_ptr->u32, 0); | ||
3170 | + qatomic_set(&cpu->icount_decr_ptr->u32, 0); | ||
3171 | cpu->can_do_io = 1; | ||
3172 | cpu->exception_index = -1; | ||
3173 | cpu->crash_occurred = false; | ||
3174 | diff --git a/hw/display/qxl.c b/hw/display/qxl.c | ||
3175 | index XXXXXXX..XXXXXXX 100644 | ||
3176 | --- a/hw/display/qxl.c | ||
3177 | +++ b/hw/display/qxl.c | ||
3178 | @@ -XXX,XX +XXX,XX @@ static void qxl_send_events(PCIQXLDevice *d, uint32_t events) | ||
3179 | /* | ||
3180 | * Older versions of Spice forgot to define the QXLRam struct | ||
3181 | * with the '__aligned__(4)' attribute. clang 7 and newer will | ||
3182 | - * thus warn that atomic_fetch_or(&d->ram->int_pending, ...) | ||
3183 | + * thus warn that qatomic_fetch_or(&d->ram->int_pending, ...) | ||
3184 | * might be a misaligned atomic access, and will generate an | ||
3185 | * out-of-line call for it, which results in a link error since | ||
3186 | * we don't currently link against libatomic. | ||
3187 | @@ -XXX,XX +XXX,XX @@ static void qxl_send_events(PCIQXLDevice *d, uint32_t events) | ||
3188 | #define ALIGNED_UINT32_PTR(P) ((uint32_t *)P) | ||
3189 | #endif | ||
3190 | |||
3191 | - old_pending = atomic_fetch_or(ALIGNED_UINT32_PTR(&d->ram->int_pending), | ||
3192 | + old_pending = qatomic_fetch_or(ALIGNED_UINT32_PTR(&d->ram->int_pending), | ||
3193 | le_events); | ||
3194 | if ((old_pending & le_events) == le_events) { | ||
3195 | return; | ||
3196 | diff --git a/hw/hyperv/hyperv.c b/hw/hyperv/hyperv.c | ||
3197 | index XXXXXXX..XXXXXXX 100644 | ||
3198 | --- a/hw/hyperv/hyperv.c | ||
3199 | +++ b/hw/hyperv/hyperv.c | ||
3200 | @@ -XXX,XX +XXX,XX @@ static void sint_msg_bh(void *opaque) | ||
3201 | HvSintRoute *sint_route = opaque; | ||
3202 | HvSintStagedMessage *staged_msg = sint_route->staged_msg; | ||
3203 | |||
3204 | - if (atomic_read(&staged_msg->state) != HV_STAGED_MSG_POSTED) { | ||
3205 | + if (qatomic_read(&staged_msg->state) != HV_STAGED_MSG_POSTED) { | ||
3206 | /* status nor ready yet (spurious ack from guest?), ignore */ | ||
3207 | return; | ||
3208 | } | ||
3209 | @@ -XXX,XX +XXX,XX @@ static void sint_msg_bh(void *opaque) | ||
3210 | staged_msg->status = 0; | ||
3211 | |||
3212 | /* staged message processing finished, ready to start over */ | ||
3213 | - atomic_set(&staged_msg->state, HV_STAGED_MSG_FREE); | ||
3214 | + qatomic_set(&staged_msg->state, HV_STAGED_MSG_FREE); | ||
3215 | /* drop the reference taken in hyperv_post_msg */ | ||
3216 | hyperv_sint_route_unref(sint_route); | ||
3217 | } | ||
3218 | @@ -XXX,XX +XXX,XX @@ static void cpu_post_msg(CPUState *cs, run_on_cpu_data data) | ||
3219 | memory_region_set_dirty(&synic->msg_page_mr, 0, sizeof(*synic->msg_page)); | ||
3220 | |||
3221 | posted: | ||
3222 | - atomic_set(&staged_msg->state, HV_STAGED_MSG_POSTED); | ||
3223 | + qatomic_set(&staged_msg->state, HV_STAGED_MSG_POSTED); | ||
3224 | /* | ||
3225 | * Notify the msg originator of the progress made; if the slot was busy we | ||
3226 | * set msg_pending flag in it so it will be the guest who will do EOM and | ||
3227 | @@ -XXX,XX +XXX,XX @@ int hyperv_post_msg(HvSintRoute *sint_route, struct hyperv_message *src_msg) | ||
3228 | assert(staged_msg); | ||
3229 | |||
3230 | /* grab the staging area */ | ||
3231 | - if (atomic_cmpxchg(&staged_msg->state, HV_STAGED_MSG_FREE, | ||
3232 | + if (qatomic_cmpxchg(&staged_msg->state, HV_STAGED_MSG_FREE, | ||
3233 | HV_STAGED_MSG_BUSY) != HV_STAGED_MSG_FREE) { | ||
3234 | return -EAGAIN; | ||
3235 | } | ||
3236 | @@ -XXX,XX +XXX,XX @@ int hyperv_set_event_flag(HvSintRoute *sint_route, unsigned eventno) | ||
3237 | set_mask = BIT_MASK(eventno); | ||
3238 | flags = synic->event_page->slot[sint_route->sint].flags; | ||
3239 | |||
3240 | - if ((atomic_fetch_or(&flags[set_idx], set_mask) & set_mask) != set_mask) { | ||
3241 | + if ((qatomic_fetch_or(&flags[set_idx], set_mask) & set_mask) != set_mask) { | ||
3242 | memory_region_set_dirty(&synic->event_page_mr, 0, | ||
3243 | sizeof(*synic->event_page)); | ||
3244 | ret = hyperv_sint_route_set_sint(sint_route); | ||
3245 | diff --git a/hw/hyperv/vmbus.c b/hw/hyperv/vmbus.c | ||
3246 | index XXXXXXX..XXXXXXX 100644 | ||
3247 | --- a/hw/hyperv/vmbus.c | ||
3248 | +++ b/hw/hyperv/vmbus.c | ||
3249 | @@ -XXX,XX +XXX,XX @@ static int vmbus_channel_notify_guest(VMBusChannel *chan) | ||
3250 | |||
3251 | idx = BIT_WORD(chan->id); | ||
3252 | mask = BIT_MASK(chan->id); | ||
3253 | - if ((atomic_fetch_or(&int_map[idx], mask) & mask) != mask) { | ||
3254 | + if ((qatomic_fetch_or(&int_map[idx], mask) & mask) != mask) { | ||
3255 | res = hyperv_sint_route_set_sint(chan->notify_route); | ||
3256 | dirty = len; | ||
3257 | } | ||
3258 | diff --git a/hw/i386/xen/xen-hvm.c b/hw/i386/xen/xen-hvm.c | ||
3259 | index XXXXXXX..XXXXXXX 100644 | ||
3260 | --- a/hw/i386/xen/xen-hvm.c | ||
3261 | +++ b/hw/i386/xen/xen-hvm.c | ||
3262 | @@ -XXX,XX +XXX,XX @@ static int handle_buffered_iopage(XenIOState *state) | ||
3263 | assert(req.dir == IOREQ_WRITE); | ||
3264 | assert(!req.data_is_ptr); | ||
3265 | |||
3266 | - atomic_add(&buf_page->read_pointer, qw + 1); | ||
3267 | + qatomic_add(&buf_page->read_pointer, qw + 1); | ||
3268 | } | ||
3269 | |||
3270 | return req.count; | ||
3271 | diff --git a/hw/intc/rx_icu.c b/hw/intc/rx_icu.c | ||
3272 | index XXXXXXX..XXXXXXX 100644 | ||
3273 | --- a/hw/intc/rx_icu.c | ||
3274 | +++ b/hw/intc/rx_icu.c | ||
3275 | @@ -XXX,XX +XXX,XX @@ static void rxicu_request(RXICUState *icu, int n_IRQ) | ||
3276 | int enable; | ||
3277 | |||
3278 | enable = icu->ier[n_IRQ / 8] & (1 << (n_IRQ & 7)); | ||
3279 | - if (n_IRQ > 0 && enable != 0 && atomic_read(&icu->req_irq) < 0) { | ||
3280 | - atomic_set(&icu->req_irq, n_IRQ); | ||
3281 | + if (n_IRQ > 0 && enable != 0 && qatomic_read(&icu->req_irq) < 0) { | ||
3282 | + qatomic_set(&icu->req_irq, n_IRQ); | ||
3283 | set_irq(icu, n_IRQ, rxicu_level(icu, n_IRQ)); | ||
3284 | } | ||
3285 | } | ||
3286 | @@ -XXX,XX +XXX,XX @@ static void rxicu_set_irq(void *opaque, int n_IRQ, int level) | ||
3287 | } | ||
3288 | if (issue == 0 && src->sense == TRG_LEVEL) { | ||
3289 | icu->ir[n_IRQ] = 0; | ||
3290 | - if (atomic_read(&icu->req_irq) == n_IRQ) { | ||
3291 | + if (qatomic_read(&icu->req_irq) == n_IRQ) { | ||
3292 | /* clear request */ | ||
3293 | set_irq(icu, n_IRQ, 0); | ||
3294 | - atomic_set(&icu->req_irq, -1); | ||
3295 | + qatomic_set(&icu->req_irq, -1); | ||
3296 | } | ||
3297 | return; | ||
3298 | } | ||
3299 | @@ -XXX,XX +XXX,XX @@ static void rxicu_ack_irq(void *opaque, int no, int level) | ||
3300 | int n_IRQ; | ||
3301 | int max_pri; | ||
3302 | |||
3303 | - n_IRQ = atomic_read(&icu->req_irq); | ||
3304 | + n_IRQ = qatomic_read(&icu->req_irq); | ||
3305 | if (n_IRQ < 0) { | ||
3306 | return; | ||
3307 | } | ||
3308 | - atomic_set(&icu->req_irq, -1); | ||
3309 | + qatomic_set(&icu->req_irq, -1); | ||
3310 | if (icu->src[n_IRQ].sense != TRG_LEVEL) { | ||
3311 | icu->ir[n_IRQ] = 0; | ||
3312 | } | ||
3313 | diff --git a/hw/intc/sifive_plic.c b/hw/intc/sifive_plic.c | ||
3314 | index XXXXXXX..XXXXXXX 100644 | ||
3315 | --- a/hw/intc/sifive_plic.c | ||
3316 | +++ b/hw/intc/sifive_plic.c | ||
3317 | @@ -XXX,XX +XXX,XX @@ static void sifive_plic_print_state(SiFivePLICState *plic) | ||
3318 | |||
3319 | static uint32_t atomic_set_masked(uint32_t *a, uint32_t mask, uint32_t value) | ||
3320 | { | ||
3321 | - uint32_t old, new, cmp = atomic_read(a); | ||
3322 | + uint32_t old, new, cmp = qatomic_read(a); | ||
3323 | |||
3324 | do { | ||
3325 | old = cmp; | ||
3326 | new = (old & ~mask) | (value & mask); | ||
3327 | - cmp = atomic_cmpxchg(a, old, new); | ||
3328 | + cmp = qatomic_cmpxchg(a, old, new); | ||
3329 | } while (old != cmp); | ||
3330 | |||
3331 | return old; | ||
3332 | diff --git a/hw/misc/edu.c b/hw/misc/edu.c | ||
3333 | index XXXXXXX..XXXXXXX 100644 | ||
3334 | --- a/hw/misc/edu.c | ||
3335 | +++ b/hw/misc/edu.c | ||
3336 | @@ -XXX,XX +XXX,XX @@ static uint64_t edu_mmio_read(void *opaque, hwaddr addr, unsigned size) | ||
3337 | qemu_mutex_unlock(&edu->thr_mutex); | ||
3338 | break; | ||
3339 | case 0x20: | ||
3340 | - val = atomic_read(&edu->status); | ||
3341 | + val = qatomic_read(&edu->status); | ||
3342 | break; | ||
3343 | case 0x24: | ||
3344 | val = edu->irq_status; | ||
3345 | @@ -XXX,XX +XXX,XX @@ static void edu_mmio_write(void *opaque, hwaddr addr, uint64_t val, | ||
3346 | edu->addr4 = ~val; | ||
3347 | break; | ||
3348 | case 0x08: | ||
3349 | - if (atomic_read(&edu->status) & EDU_STATUS_COMPUTING) { | ||
3350 | + if (qatomic_read(&edu->status) & EDU_STATUS_COMPUTING) { | ||
3351 | break; | ||
3352 | } | ||
3353 | /* EDU_STATUS_COMPUTING cannot go 0->1 concurrently, because it is only | ||
3354 | @@ -XXX,XX +XXX,XX @@ static void edu_mmio_write(void *opaque, hwaddr addr, uint64_t val, | ||
3355 | */ | ||
3356 | qemu_mutex_lock(&edu->thr_mutex); | ||
3357 | edu->fact = val; | ||
3358 | - atomic_or(&edu->status, EDU_STATUS_COMPUTING); | ||
3359 | + qatomic_or(&edu->status, EDU_STATUS_COMPUTING); | ||
3360 | qemu_cond_signal(&edu->thr_cond); | ||
3361 | qemu_mutex_unlock(&edu->thr_mutex); | ||
3362 | break; | ||
3363 | case 0x20: | ||
3364 | if (val & EDU_STATUS_IRQFACT) { | ||
3365 | - atomic_or(&edu->status, EDU_STATUS_IRQFACT); | ||
3366 | + qatomic_or(&edu->status, EDU_STATUS_IRQFACT); | ||
3367 | } else { | ||
3368 | - atomic_and(&edu->status, ~EDU_STATUS_IRQFACT); | ||
3369 | + qatomic_and(&edu->status, ~EDU_STATUS_IRQFACT); | ||
3370 | } | ||
3371 | break; | ||
3372 | case 0x60: | ||
3373 | @@ -XXX,XX +XXX,XX @@ static void *edu_fact_thread(void *opaque) | ||
3374 | uint32_t val, ret = 1; | ||
3375 | |||
3376 | qemu_mutex_lock(&edu->thr_mutex); | ||
3377 | - while ((atomic_read(&edu->status) & EDU_STATUS_COMPUTING) == 0 && | ||
3378 | + while ((qatomic_read(&edu->status) & EDU_STATUS_COMPUTING) == 0 && | ||
3379 | !edu->stopping) { | ||
3380 | qemu_cond_wait(&edu->thr_cond, &edu->thr_mutex); | ||
3381 | } | ||
3382 | @@ -XXX,XX +XXX,XX @@ static void *edu_fact_thread(void *opaque) | ||
3383 | qemu_mutex_lock(&edu->thr_mutex); | ||
3384 | edu->fact = ret; | ||
3385 | qemu_mutex_unlock(&edu->thr_mutex); | ||
3386 | - atomic_and(&edu->status, ~EDU_STATUS_COMPUTING); | ||
3387 | + qatomic_and(&edu->status, ~EDU_STATUS_COMPUTING); | ||
3388 | |||
3389 | - if (atomic_read(&edu->status) & EDU_STATUS_IRQFACT) { | ||
3390 | + if (qatomic_read(&edu->status) & EDU_STATUS_IRQFACT) { | ||
3391 | qemu_mutex_lock_iothread(); | ||
3392 | edu_raise_irq(edu, FACT_IRQ); | ||
3393 | qemu_mutex_unlock_iothread(); | ||
3394 | diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c | ||
3395 | index XXXXXXX..XXXXXXX 100644 | ||
3396 | --- a/hw/net/virtio-net.c | ||
3397 | +++ b/hw/net/virtio-net.c | ||
3398 | @@ -XXX,XX +XXX,XX @@ static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features) | ||
3399 | |||
3400 | if (virtio_has_feature(features, VIRTIO_NET_F_STANDBY)) { | ||
3401 | qapi_event_send_failover_negotiated(n->netclient_name); | ||
3402 | - atomic_set(&n->primary_should_be_hidden, false); | ||
3403 | + qatomic_set(&n->primary_should_be_hidden, false); | ||
3404 | failover_add_primary(n, &err); | ||
3405 | if (err) { | ||
3406 | n->primary_dev = virtio_connect_failover_devices(n, n->qdev, &err); | ||
3407 | @@ -XXX,XX +XXX,XX @@ static void virtio_net_handle_migration_primary(VirtIONet *n, | ||
3408 | bool should_be_hidden; | ||
3409 | Error *err = NULL; | ||
3410 | |||
3411 | - should_be_hidden = atomic_read(&n->primary_should_be_hidden); | ||
3412 | + should_be_hidden = qatomic_read(&n->primary_should_be_hidden); | ||
3413 | |||
3414 | if (!n->primary_dev) { | ||
3415 | n->primary_dev = virtio_connect_failover_devices(n, n->qdev, &err); | ||
3416 | @@ -XXX,XX +XXX,XX @@ static void virtio_net_handle_migration_primary(VirtIONet *n, | ||
3417 | qdev_get_vmsd(n->primary_dev), | ||
3418 | n->primary_dev); | ||
3419 | qapi_event_send_unplug_primary(n->primary_device_id); | ||
3420 | - atomic_set(&n->primary_should_be_hidden, true); | ||
3421 | + qatomic_set(&n->primary_should_be_hidden, true); | ||
3422 | } else { | ||
3423 | warn_report("couldn't unplug primary device"); | ||
3424 | } | ||
3425 | @@ -XXX,XX +XXX,XX @@ static int virtio_net_primary_should_be_hidden(DeviceListener *listener, | ||
3426 | n->primary_device_opts = device_opts; | ||
3427 | |||
3428 | /* primary_should_be_hidden is set during feature negotiation */ | ||
3429 | - hide = atomic_read(&n->primary_should_be_hidden); | ||
3430 | + hide = qatomic_read(&n->primary_should_be_hidden); | ||
3431 | |||
3432 | if (n->primary_device_dict) { | ||
3433 | g_free(n->primary_device_id); | ||
3434 | @@ -XXX,XX +XXX,XX @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp) | ||
3435 | if (n->failover) { | ||
3436 | n->primary_listener.should_be_hidden = | ||
3437 | virtio_net_primary_should_be_hidden; | ||
3438 | - atomic_set(&n->primary_should_be_hidden, true); | ||
3439 | + qatomic_set(&n->primary_should_be_hidden, true); | ||
3440 | device_listener_register(&n->primary_listener); | ||
3441 | n->migration_state.notify = virtio_net_migration_state_notifier; | ||
3442 | add_migration_state_change_notifier(&n->migration_state); | ||
3443 | diff --git a/hw/rdma/rdma_backend.c b/hw/rdma/rdma_backend.c | ||
3444 | index XXXXXXX..XXXXXXX 100644 | ||
3445 | --- a/hw/rdma/rdma_backend.c | ||
3446 | +++ b/hw/rdma/rdma_backend.c | ||
3447 | @@ -XXX,XX +XXX,XX @@ static void free_cqe_ctx(gpointer data, gpointer user_data) | ||
3448 | bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, cqe_ctx_id); | ||
3449 | if (bctx) { | ||
3450 | rdma_rm_dealloc_cqe_ctx(rdma_dev_res, cqe_ctx_id); | ||
3451 | - atomic_dec(&rdma_dev_res->stats.missing_cqe); | ||
3452 | + qatomic_dec(&rdma_dev_res->stats.missing_cqe); | ||
3453 | } | ||
3454 | g_free(bctx); | ||
3455 | } | ||
3456 | @@ -XXX,XX +XXX,XX @@ static void clean_recv_mads(RdmaBackendDev *backend_dev) | ||
3457 | cqe_ctx_id = rdma_protected_qlist_pop_int64(&backend_dev-> | ||
3458 | recv_mads_list); | ||
3459 | if (cqe_ctx_id != -ENOENT) { | ||
3460 | - atomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe); | ||
3461 | + qatomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe); | ||
3462 | free_cqe_ctx(GINT_TO_POINTER(cqe_ctx_id), | ||
3463 | backend_dev->rdma_dev_res); | ||
3464 | } | ||
3465 | @@ -XXX,XX +XXX,XX @@ static int rdma_poll_cq(RdmaDeviceResources *rdma_dev_res, struct ibv_cq *ibcq) | ||
3466 | } | ||
3467 | total_ne += ne; | ||
3468 | } while (ne > 0); | ||
3469 | - atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne); | ||
3470 | + qatomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne); | ||
3471 | } | ||
3472 | |||
3473 | if (ne < 0) { | ||
3474 | @@ -XXX,XX +XXX,XX @@ static void *comp_handler_thread(void *arg) | ||
3475 | |||
3476 | static inline void disable_rdmacm_mux_async(RdmaBackendDev *backend_dev) | ||
3477 | { | ||
3478 | - atomic_set(&backend_dev->rdmacm_mux.can_receive, 0); | ||
3479 | + qatomic_set(&backend_dev->rdmacm_mux.can_receive, 0); | ||
3480 | } | ||
3481 | |||
3482 | static inline void enable_rdmacm_mux_async(RdmaBackendDev *backend_dev) | ||
3483 | { | ||
3484 | - atomic_set(&backend_dev->rdmacm_mux.can_receive, sizeof(RdmaCmMuxMsg)); | ||
3485 | + qatomic_set(&backend_dev->rdmacm_mux.can_receive, sizeof(RdmaCmMuxMsg)); | ||
3486 | } | ||
3487 | |||
3488 | static inline int rdmacm_mux_can_process_async(RdmaBackendDev *backend_dev) | ||
3489 | { | ||
3490 | - return atomic_read(&backend_dev->rdmacm_mux.can_receive); | ||
3491 | + return qatomic_read(&backend_dev->rdmacm_mux.can_receive); | ||
3492 | } | ||
3493 | |||
3494 | static int rdmacm_mux_check_op_status(CharBackend *mad_chr_be) | ||
3495 | @@ -XXX,XX +XXX,XX @@ void rdma_backend_post_send(RdmaBackendDev *backend_dev, | ||
3496 | goto err_dealloc_cqe_ctx; | ||
3497 | } | ||
3498 | |||
3499 | - atomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe); | ||
3500 | + qatomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe); | ||
3501 | backend_dev->rdma_dev_res->stats.tx++; | ||
3502 | |||
3503 | return; | ||
3504 | @@ -XXX,XX +XXX,XX @@ void rdma_backend_post_recv(RdmaBackendDev *backend_dev, | ||
3505 | goto err_dealloc_cqe_ctx; | ||
3506 | } | ||
3507 | |||
3508 | - atomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe); | ||
3509 | + qatomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe); | ||
3510 | backend_dev->rdma_dev_res->stats.rx_bufs++; | ||
3511 | |||
3512 | return; | ||
3513 | @@ -XXX,XX +XXX,XX @@ void rdma_backend_post_srq_recv(RdmaBackendDev *backend_dev, | ||
3514 | goto err_dealloc_cqe_ctx; | ||
3515 | } | ||
3516 | |||
3517 | - atomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe); | ||
3518 | + qatomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe); | ||
3519 | backend_dev->rdma_dev_res->stats.rx_bufs++; | ||
3520 | backend_dev->rdma_dev_res->stats.rx_srq++; | ||
3521 | |||
3522 | diff --git a/hw/rdma/rdma_rm.c b/hw/rdma/rdma_rm.c | ||
3523 | index XXXXXXX..XXXXXXX 100644 | ||
3524 | --- a/hw/rdma/rdma_rm.c | ||
3525 | +++ b/hw/rdma/rdma_rm.c | ||
3526 | @@ -XXX,XX +XXX,XX @@ int rdma_rm_init(RdmaDeviceResources *dev_res, struct ibv_device_attr *dev_attr) | ||
3527 | qemu_mutex_init(&dev_res->lock); | ||
3528 | |||
3529 | memset(&dev_res->stats, 0, sizeof(dev_res->stats)); | ||
3530 | - atomic_set(&dev_res->stats.missing_cqe, 0); | ||
3531 | + qatomic_set(&dev_res->stats.missing_cqe, 0); | ||
3532 | |||
3533 | return 0; | ||
3534 | } | ||
3535 | diff --git a/hw/rdma/vmw/pvrdma_dev_ring.c b/hw/rdma/vmw/pvrdma_dev_ring.c | ||
3536 | index XXXXXXX..XXXXXXX 100644 | ||
3537 | --- a/hw/rdma/vmw/pvrdma_dev_ring.c | ||
3538 | +++ b/hw/rdma/vmw/pvrdma_dev_ring.c | ||
3539 | @@ -XXX,XX +XXX,XX @@ int pvrdma_ring_init(PvrdmaRing *ring, const char *name, PCIDevice *dev, | ||
3540 | ring->max_elems = max_elems; | ||
3541 | ring->elem_sz = elem_sz; | ||
3542 | /* TODO: Give a moment to think if we want to redo driver settings | ||
3543 | - atomic_set(&ring->ring_state->prod_tail, 0); | ||
3544 | - atomic_set(&ring->ring_state->cons_head, 0); | ||
3545 | + qatomic_set(&ring->ring_state->prod_tail, 0); | ||
3546 | + qatomic_set(&ring->ring_state->cons_head, 0); | ||
3547 | */ | ||
3548 | ring->npages = npages; | ||
3549 | ring->pages = g_malloc(npages * sizeof(void *)); | ||
3550 | diff --git a/hw/s390x/s390-pci-bus.c b/hw/s390x/s390-pci-bus.c | ||
3551 | index XXXXXXX..XXXXXXX 100644 | ||
3552 | --- a/hw/s390x/s390-pci-bus.c | ||
3553 | +++ b/hw/s390x/s390-pci-bus.c | ||
3554 | @@ -XXX,XX +XXX,XX @@ static uint8_t set_ind_atomic(uint64_t ind_loc, uint8_t to_be_set) | ||
3555 | actual = *ind_addr; | ||
3556 | do { | ||
3557 | expected = actual; | ||
3558 | - actual = atomic_cmpxchg(ind_addr, expected, expected | to_be_set); | ||
3559 | + actual = qatomic_cmpxchg(ind_addr, expected, expected | to_be_set); | ||
3560 | } while (actual != expected); | ||
3561 | cpu_physical_memory_unmap((void *)ind_addr, len, 1, len); | ||
3562 | |||
3563 | diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c | ||
3564 | index XXXXXXX..XXXXXXX 100644 | ||
3565 | --- a/hw/s390x/virtio-ccw.c | ||
3566 | +++ b/hw/s390x/virtio-ccw.c | ||
3567 | @@ -XXX,XX +XXX,XX @@ static uint8_t virtio_set_ind_atomic(SubchDev *sch, uint64_t ind_loc, | ||
3568 | actual = *ind_addr; | ||
3569 | do { | ||
3570 | expected = actual; | ||
3571 | - actual = atomic_cmpxchg(ind_addr, expected, expected | to_be_set); | ||
3572 | + actual = qatomic_cmpxchg(ind_addr, expected, expected | to_be_set); | ||
3573 | } while (actual != expected); | ||
3574 | trace_virtio_ccw_set_ind(ind_loc, actual, actual | to_be_set); | ||
3575 | cpu_physical_memory_unmap((void *)ind_addr, len, 1, len); | ||
3576 | diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c | ||
3577 | index XXXXXXX..XXXXXXX 100644 | ||
3578 | --- a/hw/virtio/vhost.c | ||
3579 | +++ b/hw/virtio/vhost.c | ||
3580 | @@ -XXX,XX +XXX,XX @@ static void vhost_dev_sync_region(struct vhost_dev *dev, | ||
3581 | } | ||
3582 | /* Data must be read atomically. We don't really need barrier semantics | ||
3583 | * but it's easier to use atomic_* than roll our own. */ | ||
3584 | - log = atomic_xchg(from, 0); | ||
3585 | + log = qatomic_xchg(from, 0); | ||
3586 | while (log) { | ||
3587 | int bit = ctzl(log); | ||
3588 | hwaddr page_addr; | ||
3589 | diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c | ||
3590 | index XXXXXXX..XXXXXXX 100644 | ||
3591 | --- a/hw/virtio/virtio-mmio.c | ||
3592 | +++ b/hw/virtio/virtio-mmio.c | ||
3593 | @@ -XXX,XX +XXX,XX @@ static uint64_t virtio_mmio_read(void *opaque, hwaddr offset, unsigned size) | ||
3594 | } | ||
3595 | return proxy->vqs[vdev->queue_sel].enabled; | ||
3596 | case VIRTIO_MMIO_INTERRUPT_STATUS: | ||
3597 | - return atomic_read(&vdev->isr); | ||
3598 | + return qatomic_read(&vdev->isr); | ||
3599 | case VIRTIO_MMIO_STATUS: | ||
3600 | return vdev->status; | ||
3601 | case VIRTIO_MMIO_CONFIG_GENERATION: | ||
3602 | @@ -XXX,XX +XXX,XX @@ static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value, | ||
3603 | } | ||
3604 | break; | ||
3605 | case VIRTIO_MMIO_INTERRUPT_ACK: | ||
3606 | - atomic_and(&vdev->isr, ~value); | ||
3607 | + qatomic_and(&vdev->isr, ~value); | ||
3608 | virtio_update_irq(vdev); | ||
3609 | break; | ||
3610 | case VIRTIO_MMIO_STATUS: | ||
3611 | @@ -XXX,XX +XXX,XX @@ static void virtio_mmio_update_irq(DeviceState *opaque, uint16_t vector) | ||
3612 | if (!vdev) { | ||
3613 | return; | ||
3614 | } | ||
3615 | - level = (atomic_read(&vdev->isr) != 0); | ||
3616 | + level = (qatomic_read(&vdev->isr) != 0); | ||
3617 | trace_virtio_mmio_setting_irq(level); | ||
3618 | qemu_set_irq(proxy->irq, level); | ||
3619 | } | ||
3620 | diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c | ||
3621 | index XXXXXXX..XXXXXXX 100644 | ||
3622 | --- a/hw/virtio/virtio-pci.c | ||
3623 | +++ b/hw/virtio/virtio-pci.c | ||
3624 | @@ -XXX,XX +XXX,XX @@ static void virtio_pci_notify(DeviceState *d, uint16_t vector) | ||
3625 | msix_notify(&proxy->pci_dev, vector); | ||
3626 | else { | ||
3627 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); | ||
3628 | - pci_set_irq(&proxy->pci_dev, atomic_read(&vdev->isr) & 1); | ||
3629 | + pci_set_irq(&proxy->pci_dev, qatomic_read(&vdev->isr) & 1); | ||
3630 | } | ||
3631 | } | ||
3632 | |||
3633 | @@ -XXX,XX +XXX,XX @@ static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr) | ||
3634 | break; | ||
3635 | case VIRTIO_PCI_ISR: | ||
3636 | /* reading from the ISR also clears it. */ | ||
3637 | - ret = atomic_xchg(&vdev->isr, 0); | ||
3638 | + ret = qatomic_xchg(&vdev->isr, 0); | ||
3639 | pci_irq_deassert(&proxy->pci_dev); | ||
3640 | break; | ||
3641 | case VIRTIO_MSI_CONFIG_VECTOR: | ||
3642 | @@ -XXX,XX +XXX,XX @@ static uint64_t virtio_pci_isr_read(void *opaque, hwaddr addr, | ||
3643 | { | ||
3644 | VirtIOPCIProxy *proxy = opaque; | ||
3645 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); | ||
3646 | - uint64_t val = atomic_xchg(&vdev->isr, 0); | ||
3647 | + uint64_t val = qatomic_xchg(&vdev->isr, 0); | ||
3648 | pci_irq_deassert(&proxy->pci_dev); | ||
3649 | |||
3650 | return val; | ||
3651 | diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c | ||
3652 | index XXXXXXX..XXXXXXX 100644 | ||
3653 | --- a/hw/virtio/virtio.c | ||
3654 | +++ b/hw/virtio/virtio.c | ||
3655 | @@ -XXX,XX +XXX,XX @@ static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq) | ||
3656 | { | ||
3657 | VRingMemoryRegionCaches *caches; | ||
3658 | |||
3659 | - caches = atomic_read(&vq->vring.caches); | ||
3660 | - atomic_rcu_set(&vq->vring.caches, NULL); | ||
3661 | + caches = qatomic_read(&vq->vring.caches); | ||
3662 | + qatomic_rcu_set(&vq->vring.caches, NULL); | ||
3663 | if (caches) { | ||
3664 | call_rcu(caches, virtio_free_region_cache, rcu); | ||
3665 | } | ||
3666 | @@ -XXX,XX +XXX,XX @@ static void virtio_init_region_cache(VirtIODevice *vdev, int n) | ||
3667 | goto err_avail; | ||
3668 | } | ||
3669 | |||
3670 | - atomic_rcu_set(&vq->vring.caches, new); | ||
3671 | + qatomic_rcu_set(&vq->vring.caches, new); | ||
3672 | if (old) { | ||
3673 | call_rcu(old, virtio_free_region_cache, rcu); | ||
3674 | } | ||
3675 | @@ -XXX,XX +XXX,XX @@ static void vring_packed_flags_write(VirtIODevice *vdev, | ||
3676 | /* Called within rcu_read_lock(). */ | ||
3677 | static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq) | ||
3678 | { | ||
3679 | - return atomic_rcu_read(&vq->vring.caches); | ||
3680 | + return qatomic_rcu_read(&vq->vring.caches); | ||
3681 | } | ||
3682 | |||
3683 | /* Called within rcu_read_lock(). */ | ||
3684 | @@ -XXX,XX +XXX,XX @@ void virtio_reset(void *opaque) | ||
3685 | vdev->queue_sel = 0; | ||
3686 | vdev->status = 0; | ||
3687 | vdev->disabled = false; | ||
3688 | - atomic_set(&vdev->isr, 0); | ||
3689 | + qatomic_set(&vdev->isr, 0); | ||
3690 | vdev->config_vector = VIRTIO_NO_VECTOR; | ||
3691 | virtio_notify_vector(vdev, vdev->config_vector); | ||
3692 | |||
3693 | @@ -XXX,XX +XXX,XX @@ void virtio_del_queue(VirtIODevice *vdev, int n) | ||
3694 | |||
3695 | static void virtio_set_isr(VirtIODevice *vdev, int value) | ||
3696 | { | ||
3697 | - uint8_t old = atomic_read(&vdev->isr); | ||
3698 | + uint8_t old = qatomic_read(&vdev->isr); | ||
3699 | |||
3700 | /* Do not write ISR if it does not change, so that its cacheline remains | ||
3701 | * shared in the common case where the guest does not read it. | ||
3702 | */ | ||
3703 | if ((old & value) != value) { | ||
3704 | - atomic_or(&vdev->isr, value); | ||
3705 | + qatomic_or(&vdev->isr, value); | ||
3706 | } | ||
3707 | } | ||
3708 | |||
3709 | @@ -XXX,XX +XXX,XX @@ void virtio_init(VirtIODevice *vdev, const char *name, | ||
3710 | vdev->started = false; | ||
3711 | vdev->device_id = device_id; | ||
3712 | vdev->status = 0; | ||
3713 | - atomic_set(&vdev->isr, 0); | ||
3714 | + qatomic_set(&vdev->isr, 0); | ||
3715 | vdev->queue_sel = 0; | ||
3716 | vdev->config_vector = VIRTIO_NO_VECTOR; | ||
3717 | vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_QUEUE_MAX); | ||
3718 | diff --git a/hw/xtensa/pic_cpu.c b/hw/xtensa/pic_cpu.c | ||
3719 | index XXXXXXX..XXXXXXX 100644 | ||
3720 | --- a/hw/xtensa/pic_cpu.c | ||
3721 | +++ b/hw/xtensa/pic_cpu.c | ||
3722 | @@ -XXX,XX +XXX,XX @@ static void xtensa_set_irq(void *opaque, int irq, int active) | ||
3723 | uint32_t irq_bit = 1 << irq; | ||
3724 | |||
3725 | if (active) { | ||
3726 | - atomic_or(&env->sregs[INTSET], irq_bit); | ||
3727 | + qatomic_or(&env->sregs[INTSET], irq_bit); | ||
3728 | } else if (env->config->interrupt[irq].inttype == INTTYPE_LEVEL) { | ||
3729 | - atomic_and(&env->sregs[INTSET], ~irq_bit); | ||
3730 | + qatomic_and(&env->sregs[INTSET], ~irq_bit); | ||
3731 | } | ||
3732 | |||
3733 | check_interrupts(env); | ||
3734 | diff --git a/iothread.c b/iothread.c | ||
3735 | index XXXXXXX..XXXXXXX 100644 | ||
3736 | --- a/iothread.c | ||
3737 | +++ b/iothread.c | ||
3738 | @@ -XXX,XX +XXX,XX @@ static void *iothread_run(void *opaque) | ||
3739 | * We must check the running state again in case it was | ||
3740 | * changed in previous aio_poll() | ||
3741 | */ | ||
3742 | - if (iothread->running && atomic_read(&iothread->run_gcontext)) { | ||
3743 | + if (iothread->running && qatomic_read(&iothread->run_gcontext)) { | ||
3744 | g_main_loop_run(iothread->main_loop); | ||
3745 | } | ||
3746 | } | ||
3747 | @@ -XXX,XX +XXX,XX @@ static void iothread_instance_init(Object *obj) | ||
3748 | iothread->thread_id = -1; | ||
3749 | qemu_sem_init(&iothread->init_done_sem, 0); | ||
3750 | /* By default, we don't run gcontext */ | ||
3751 | - atomic_set(&iothread->run_gcontext, 0); | ||
3752 | + qatomic_set(&iothread->run_gcontext, 0); | ||
3753 | } | ||
3754 | |||
3755 | static void iothread_instance_finalize(Object *obj) | ||
3756 | @@ -XXX,XX +XXX,XX @@ IOThreadInfoList *qmp_query_iothreads(Error **errp) | ||
3757 | |||
3758 | GMainContext *iothread_get_g_main_context(IOThread *iothread) | ||
3759 | { | ||
3760 | - atomic_set(&iothread->run_gcontext, 1); | ||
3761 | + qatomic_set(&iothread->run_gcontext, 1); | ||
3762 | aio_notify(iothread->ctx); | ||
3763 | return iothread->worker_context; | ||
3764 | } | ||
3765 | diff --git a/linux-user/hppa/cpu_loop.c b/linux-user/hppa/cpu_loop.c | ||
3766 | index XXXXXXX..XXXXXXX 100644 | ||
3767 | --- a/linux-user/hppa/cpu_loop.c | ||
3768 | +++ b/linux-user/hppa/cpu_loop.c | ||
3769 | @@ -XXX,XX +XXX,XX @@ static abi_ulong hppa_lws(CPUHPPAState *env) | ||
3770 | } | ||
3771 | old = tswap32(old); | ||
3772 | new = tswap32(new); | ||
3773 | - ret = atomic_cmpxchg((uint32_t *)g2h(addr), old, new); | ||
3774 | + ret = qatomic_cmpxchg((uint32_t *)g2h(addr), old, new); | ||
3775 | ret = tswap32(ret); | ||
3776 | break; | ||
3777 | |||
3778 | @@ -XXX,XX +XXX,XX @@ static abi_ulong hppa_lws(CPUHPPAState *env) | ||
3779 | case 0: | ||
3780 | old = *(uint8_t *)g2h(old); | ||
3781 | new = *(uint8_t *)g2h(new); | ||
3782 | - ret = atomic_cmpxchg((uint8_t *)g2h(addr), old, new); | ||
3783 | + ret = qatomic_cmpxchg((uint8_t *)g2h(addr), old, new); | ||
3784 | ret = ret != old; | ||
3785 | break; | ||
3786 | case 1: | ||
3787 | old = *(uint16_t *)g2h(old); | ||
3788 | new = *(uint16_t *)g2h(new); | ||
3789 | - ret = atomic_cmpxchg((uint16_t *)g2h(addr), old, new); | ||
3790 | + ret = qatomic_cmpxchg((uint16_t *)g2h(addr), old, new); | ||
3791 | ret = ret != old; | ||
3792 | break; | ||
3793 | case 2: | ||
3794 | old = *(uint32_t *)g2h(old); | ||
3795 | new = *(uint32_t *)g2h(new); | ||
3796 | - ret = atomic_cmpxchg((uint32_t *)g2h(addr), old, new); | ||
3797 | + ret = qatomic_cmpxchg((uint32_t *)g2h(addr), old, new); | ||
3798 | ret = ret != old; | ||
3799 | break; | ||
3800 | case 3: | ||
3801 | @@ -XXX,XX +XXX,XX @@ static abi_ulong hppa_lws(CPUHPPAState *env) | ||
3802 | o64 = *(uint64_t *)g2h(old); | ||
3803 | n64 = *(uint64_t *)g2h(new); | ||
3804 | #ifdef CONFIG_ATOMIC64 | ||
3805 | - r64 = atomic_cmpxchg__nocheck((uint64_t *)g2h(addr), o64, n64); | ||
3806 | + r64 = qatomic_cmpxchg__nocheck((uint64_t *)g2h(addr), | ||
3807 | + o64, n64); | ||
3808 | ret = r64 != o64; | ||
3809 | #else | ||
3810 | start_exclusive(); | ||
3811 | diff --git a/linux-user/signal.c b/linux-user/signal.c | ||
3812 | index XXXXXXX..XXXXXXX 100644 | ||
3813 | --- a/linux-user/signal.c | ||
3814 | +++ b/linux-user/signal.c | ||
3815 | @@ -XXX,XX +XXX,XX @@ int block_signals(void) | ||
3816 | sigfillset(&set); | ||
3817 | sigprocmask(SIG_SETMASK, &set, 0); | ||
3818 | |||
3819 | - return atomic_xchg(&ts->signal_pending, 1); | ||
3820 | + return qatomic_xchg(&ts->signal_pending, 1); | ||
3821 | } | ||
3822 | |||
3823 | /* Wrapper for sigprocmask function | ||
3824 | @@ -XXX,XX +XXX,XX @@ int queue_signal(CPUArchState *env, int sig, int si_type, | ||
3825 | ts->sync_signal.info = *info; | ||
3826 | ts->sync_signal.pending = sig; | ||
3827 | /* signal that a new signal is pending */ | ||
3828 | - atomic_set(&ts->signal_pending, 1); | ||
3829 | + qatomic_set(&ts->signal_pending, 1); | ||
3830 | return 1; /* indicates that the signal was queued */ | ||
3831 | } | ||
3832 | |||
3833 | @@ -XXX,XX +XXX,XX @@ void process_pending_signals(CPUArchState *cpu_env) | ||
3834 | sigset_t set; | ||
3835 | sigset_t *blocked_set; | ||
3836 | |||
3837 | - while (atomic_read(&ts->signal_pending)) { | ||
3838 | + while (qatomic_read(&ts->signal_pending)) { | ||
3839 | /* FIXME: This is not threadsafe. */ | ||
3840 | sigfillset(&set); | ||
3841 | sigprocmask(SIG_SETMASK, &set, 0); | ||
3842 | @@ -XXX,XX +XXX,XX @@ void process_pending_signals(CPUArchState *cpu_env) | ||
3843 | * of unblocking might cause us to take another host signal which | ||
3844 | * will set signal_pending again). | ||
3845 | */ | ||
3846 | - atomic_set(&ts->signal_pending, 0); | ||
3847 | + qatomic_set(&ts->signal_pending, 0); | ||
3848 | ts->in_sigsuspend = 0; | ||
3849 | set = ts->signal_mask; | ||
3850 | sigdelset(&set, SIGSEGV); | ||
3851 | diff --git a/migration/colo-failover.c b/migration/colo-failover.c | ||
3852 | index XXXXXXX..XXXXXXX 100644 | ||
3853 | --- a/migration/colo-failover.c | ||
3854 | +++ b/migration/colo-failover.c | ||
3855 | @@ -XXX,XX +XXX,XX @@ FailoverStatus failover_set_state(FailoverStatus old_state, | ||
3856 | { | ||
3857 | FailoverStatus old; | ||
3858 | |||
3859 | - old = atomic_cmpxchg(&failover_state, old_state, new_state); | ||
3860 | + old = qatomic_cmpxchg(&failover_state, old_state, new_state); | ||
3861 | if (old == old_state) { | ||
3862 | trace_colo_failover_set_state(FailoverStatus_str(new_state)); | ||
3863 | } | ||
3864 | @@ -XXX,XX +XXX,XX @@ FailoverStatus failover_set_state(FailoverStatus old_state, | ||
3865 | |||
3866 | FailoverStatus failover_get_state(void) | ||
3867 | { | ||
3868 | - return atomic_read(&failover_state); | ||
3869 | + return qatomic_read(&failover_state); | ||
3870 | } | ||
3871 | |||
3872 | void qmp_x_colo_lost_heartbeat(Error **errp) | ||
3873 | diff --git a/migration/migration.c b/migration/migration.c | ||
3874 | index XXXXXXX..XXXXXXX 100644 | ||
3875 | --- a/migration/migration.c | ||
3876 | +++ b/migration/migration.c | ||
3877 | @@ -XXX,XX +XXX,XX @@ void qmp_migrate_start_postcopy(Error **errp) | ||
3878 | * we don't error if migration has finished since that would be racy | ||
3879 | * with issuing this command. | ||
3880 | */ | ||
3881 | - atomic_set(&s->start_postcopy, true); | ||
3882 | + qatomic_set(&s->start_postcopy, true); | ||
3883 | } | ||
3884 | |||
3885 | /* shared migration helpers */ | ||
3886 | @@ -XXX,XX +XXX,XX @@ void qmp_migrate_start_postcopy(Error **errp) | ||
3887 | void migrate_set_state(int *state, int old_state, int new_state) | ||
3888 | { | ||
3889 | assert(new_state < MIGRATION_STATUS__MAX); | ||
3890 | - if (atomic_cmpxchg(state, old_state, new_state) == old_state) { | ||
3891 | + if (qatomic_cmpxchg(state, old_state, new_state) == old_state) { | ||
3892 | trace_migrate_set_state(MigrationStatus_str(new_state)); | ||
3893 | migrate_generate_event(new_state); | ||
3894 | } | ||
3895 | @@ -XXX,XX +XXX,XX @@ void qmp_migrate_recover(const char *uri, Error **errp) | ||
3896 | return; | ||
3897 | } | ||
3898 | |||
3899 | - if (atomic_cmpxchg(&mis->postcopy_recover_triggered, | ||
3900 | + if (qatomic_cmpxchg(&mis->postcopy_recover_triggered, | ||
3901 | false, true) == true) { | ||
3902 | error_setg(errp, "Migrate recovery is triggered already"); | ||
3903 | return; | ||
3904 | @@ -XXX,XX +XXX,XX @@ static MigIterateState migration_iteration_run(MigrationState *s) | ||
3905 | if (pending_size && pending_size >= s->threshold_size) { | ||
3906 | /* Still a significant amount to transfer */ | ||
3907 | if (!in_postcopy && pend_pre <= s->threshold_size && | ||
3908 | - atomic_read(&s->start_postcopy)) { | ||
3909 | + qatomic_read(&s->start_postcopy)) { | ||
3910 | if (postcopy_start(s)) { | ||
3911 | error_report("%s: postcopy failed to start", __func__); | ||
3912 | } | ||
3913 | diff --git a/migration/multifd.c b/migration/multifd.c | ||
3914 | index XXXXXXX..XXXXXXX 100644 | ||
3915 | --- a/migration/multifd.c | ||
3916 | +++ b/migration/multifd.c | ||
3917 | @@ -XXX,XX +XXX,XX @@ static int multifd_send_pages(QEMUFile *f) | ||
3918 | MultiFDPages_t *pages = multifd_send_state->pages; | ||
3919 | uint64_t transferred; | ||
3920 | |||
3921 | - if (atomic_read(&multifd_send_state->exiting)) { | ||
3922 | + if (qatomic_read(&multifd_send_state->exiting)) { | ||
3923 | return -1; | ||
3924 | } | ||
3925 | |||
3926 | @@ -XXX,XX +XXX,XX @@ static void multifd_send_terminate_threads(Error *err) | ||
3927 | * threads at the same time, we can end calling this function | ||
3928 | * twice. | ||
3929 | */ | ||
3930 | - if (atomic_xchg(&multifd_send_state->exiting, 1)) { | ||
3931 | + if (qatomic_xchg(&multifd_send_state->exiting, 1)) { | ||
3932 | return; | ||
3933 | } | ||
3934 | |||
3935 | @@ -XXX,XX +XXX,XX @@ static void *multifd_send_thread(void *opaque) | ||
3936 | while (true) { | ||
3937 | qemu_sem_wait(&p->sem); | ||
3938 | |||
3939 | - if (atomic_read(&multifd_send_state->exiting)) { | ||
3940 | + if (qatomic_read(&multifd_send_state->exiting)) { | ||
3941 | break; | ||
3942 | } | ||
3943 | qemu_mutex_lock(&p->mutex); | ||
3944 | @@ -XXX,XX +XXX,XX @@ int multifd_save_setup(Error **errp) | ||
3945 | multifd_send_state->params = g_new0(MultiFDSendParams, thread_count); | ||
3946 | multifd_send_state->pages = multifd_pages_init(page_count); | ||
3947 | qemu_sem_init(&multifd_send_state->channels_ready, 0); | ||
3948 | - atomic_set(&multifd_send_state->exiting, 0); | ||
3949 | + qatomic_set(&multifd_send_state->exiting, 0); | ||
3950 | multifd_send_state->ops = multifd_ops[migrate_multifd_compression()]; | ||
3951 | |||
3952 | for (i = 0; i < thread_count; i++) { | ||
3953 | @@ -XXX,XX +XXX,XX @@ int multifd_load_setup(Error **errp) | ||
3954 | thread_count = migrate_multifd_channels(); | ||
3955 | multifd_recv_state = g_malloc0(sizeof(*multifd_recv_state)); | ||
3956 | multifd_recv_state->params = g_new0(MultiFDRecvParams, thread_count); | ||
3957 | - atomic_set(&multifd_recv_state->count, 0); | ||
3958 | + qatomic_set(&multifd_recv_state->count, 0); | ||
3959 | qemu_sem_init(&multifd_recv_state->sem_sync, 0); | ||
3960 | multifd_recv_state->ops = multifd_ops[migrate_multifd_compression()]; | ||
3961 | |||
3962 | @@ -XXX,XX +XXX,XX @@ bool multifd_recv_all_channels_created(void) | ||
3963 | return true; | ||
3964 | } | ||
3965 | |||
3966 | - return thread_count == atomic_read(&multifd_recv_state->count); | ||
3967 | + return thread_count == qatomic_read(&multifd_recv_state->count); | ||
3968 | } | ||
3969 | |||
3970 | /* | ||
3971 | @@ -XXX,XX +XXX,XX @@ bool multifd_recv_new_channel(QIOChannel *ioc, Error **errp) | ||
3972 | error_propagate_prepend(errp, local_err, | ||
3973 | "failed to receive packet" | ||
3974 | " via multifd channel %d: ", | ||
3975 | - atomic_read(&multifd_recv_state->count)); | ||
3976 | + qatomic_read(&multifd_recv_state->count)); | ||
3977 | return false; | ||
3978 | } | ||
3979 | trace_multifd_recv_new_channel(id); | ||
3980 | @@ -XXX,XX +XXX,XX @@ bool multifd_recv_new_channel(QIOChannel *ioc, Error **errp) | ||
3981 | p->running = true; | ||
3982 | qemu_thread_create(&p->thread, p->name, multifd_recv_thread, p, | ||
3983 | QEMU_THREAD_JOINABLE); | ||
3984 | - atomic_inc(&multifd_recv_state->count); | ||
3985 | - return atomic_read(&multifd_recv_state->count) == | ||
3986 | + qatomic_inc(&multifd_recv_state->count); | ||
3987 | + return qatomic_read(&multifd_recv_state->count) == | ||
3988 | migrate_multifd_channels(); | ||
3989 | } | ||
3990 | diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c | ||
3991 | index XXXXXXX..XXXXXXX 100644 | ||
3992 | --- a/migration/postcopy-ram.c | ||
3993 | +++ b/migration/postcopy-ram.c | ||
3994 | @@ -XXX,XX +XXX,XX @@ int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) | ||
3995 | Error *local_err = NULL; | ||
3996 | |||
3997 | /* Let the fault thread quit */ | ||
3998 | - atomic_set(&mis->fault_thread_quit, 1); | ||
3999 | + qatomic_set(&mis->fault_thread_quit, 1); | ||
4000 | postcopy_fault_thread_notify(mis); | ||
4001 | trace_postcopy_ram_incoming_cleanup_join(); | ||
4002 | qemu_thread_join(&mis->fault_thread); | ||
4003 | @@ -XXX,XX +XXX,XX @@ static void mark_postcopy_blocktime_begin(uintptr_t addr, uint32_t ptid, | ||
4004 | |||
4005 | low_time_offset = get_low_time_offset(dc); | ||
4006 | if (dc->vcpu_addr[cpu] == 0) { | ||
4007 | - atomic_inc(&dc->smp_cpus_down); | ||
4008 | + qatomic_inc(&dc->smp_cpus_down); | ||
4009 | } | ||
4010 | |||
4011 | - atomic_xchg(&dc->last_begin, low_time_offset); | ||
4012 | - atomic_xchg(&dc->page_fault_vcpu_time[cpu], low_time_offset); | ||
4013 | - atomic_xchg(&dc->vcpu_addr[cpu], addr); | ||
4014 | + qatomic_xchg(&dc->last_begin, low_time_offset); | ||
4015 | + qatomic_xchg(&dc->page_fault_vcpu_time[cpu], low_time_offset); | ||
4016 | + qatomic_xchg(&dc->vcpu_addr[cpu], addr); | ||
4017 | |||
4018 | /* | ||
4019 | * check it here, not at the beginning of the function, | ||
4020 | @@ -XXX,XX +XXX,XX @@ static void mark_postcopy_blocktime_begin(uintptr_t addr, uint32_t ptid, | ||
4021 | */ | ||
4022 | already_received = ramblock_recv_bitmap_test(rb, (void *)addr); | ||
4023 | if (already_received) { | ||
4024 | - atomic_xchg(&dc->vcpu_addr[cpu], 0); | ||
4025 | - atomic_xchg(&dc->page_fault_vcpu_time[cpu], 0); | ||
4026 | - atomic_dec(&dc->smp_cpus_down); | ||
4027 | + qatomic_xchg(&dc->vcpu_addr[cpu], 0); | ||
4028 | + qatomic_xchg(&dc->page_fault_vcpu_time[cpu], 0); | ||
4029 | + qatomic_dec(&dc->smp_cpus_down); | ||
4030 | } | ||
4031 | trace_mark_postcopy_blocktime_begin(addr, dc, dc->page_fault_vcpu_time[cpu], | ||
4032 | cpu, already_received); | ||
4033 | @@ -XXX,XX +XXX,XX @@ static void mark_postcopy_blocktime_end(uintptr_t addr) | ||
4034 | for (i = 0; i < smp_cpus; i++) { | ||
4035 | uint32_t vcpu_blocktime = 0; | ||
4036 | |||
4037 | - read_vcpu_time = atomic_fetch_add(&dc->page_fault_vcpu_time[i], 0); | ||
4038 | - if (atomic_fetch_add(&dc->vcpu_addr[i], 0) != addr || | ||
4039 | + read_vcpu_time = qatomic_fetch_add(&dc->page_fault_vcpu_time[i], 0); | ||
4040 | + if (qatomic_fetch_add(&dc->vcpu_addr[i], 0) != addr || | ||
4041 | read_vcpu_time == 0) { | ||
4042 | continue; | ||
4043 | } | ||
4044 | - atomic_xchg(&dc->vcpu_addr[i], 0); | ||
4045 | + qatomic_xchg(&dc->vcpu_addr[i], 0); | ||
4046 | vcpu_blocktime = low_time_offset - read_vcpu_time; | ||
4047 | affected_cpu += 1; | ||
4048 | /* we need to know is that mark_postcopy_end was due to | ||
4049 | * faulted page, another possible case it's prefetched | ||
4050 | * page and in that case we shouldn't be here */ | ||
4051 | if (!vcpu_total_blocktime && | ||
4052 | - atomic_fetch_add(&dc->smp_cpus_down, 0) == smp_cpus) { | ||
4053 | + qatomic_fetch_add(&dc->smp_cpus_down, 0) == smp_cpus) { | ||
4054 | vcpu_total_blocktime = true; | ||
4055 | } | ||
4056 | /* continue cycle, due to one page could affect several vCPUs */ | ||
4057 | dc->vcpu_blocktime[i] += vcpu_blocktime; | ||
4058 | } | ||
4059 | |||
4060 | - atomic_sub(&dc->smp_cpus_down, affected_cpu); | ||
4061 | + qatomic_sub(&dc->smp_cpus_down, affected_cpu); | ||
4062 | if (vcpu_total_blocktime) { | ||
4063 | - dc->total_blocktime += low_time_offset - atomic_fetch_add( | ||
4064 | + dc->total_blocktime += low_time_offset - qatomic_fetch_add( | ||
4065 | &dc->last_begin, 0); | ||
4066 | } | ||
4067 | trace_mark_postcopy_blocktime_end(addr, dc, dc->total_blocktime, | ||
4068 | @@ -XXX,XX +XXX,XX @@ static void *postcopy_ram_fault_thread(void *opaque) | ||
4069 | error_report("%s: read() failed", __func__); | ||
4070 | } | ||
4071 | |||
4072 | - if (atomic_read(&mis->fault_thread_quit)) { | ||
4073 | + if (qatomic_read(&mis->fault_thread_quit)) { | ||
4074 | trace_postcopy_ram_fault_thread_quit(); | ||
4075 | break; | ||
4076 | } | ||
4077 | @@ -XXX,XX +XXX,XX @@ static PostcopyState incoming_postcopy_state; | ||
4078 | |||
4079 | PostcopyState postcopy_state_get(void) | ||
4080 | { | ||
4081 | - return atomic_mb_read(&incoming_postcopy_state); | ||
4082 | + return qatomic_mb_read(&incoming_postcopy_state); | ||
4083 | } | ||
4084 | |||
4085 | /* Set the state and return the old state */ | ||
4086 | PostcopyState postcopy_state_set(PostcopyState new_state) | ||
4087 | { | ||
4088 | - return atomic_xchg(&incoming_postcopy_state, new_state); | ||
4089 | + return qatomic_xchg(&incoming_postcopy_state, new_state); | ||
4090 | } | ||
4091 | |||
4092 | /* Register a handler for external shared memory postcopy | ||
4093 | diff --git a/migration/rdma.c b/migration/rdma.c | ||
4094 | index XXXXXXX..XXXXXXX 100644 | ||
4095 | --- a/migration/rdma.c | ||
4096 | +++ b/migration/rdma.c | ||
4097 | @@ -XXX,XX +XXX,XX @@ static ssize_t qio_channel_rdma_writev(QIOChannel *ioc, | ||
4098 | size_t len = 0; | ||
4099 | |||
4100 | RCU_READ_LOCK_GUARD(); | ||
4101 | - rdma = atomic_rcu_read(&rioc->rdmaout); | ||
4102 | + rdma = qatomic_rcu_read(&rioc->rdmaout); | ||
4103 | |||
4104 | if (!rdma) { | ||
4105 | return -EIO; | ||
4106 | @@ -XXX,XX +XXX,XX @@ static ssize_t qio_channel_rdma_readv(QIOChannel *ioc, | ||
4107 | size_t done = 0; | ||
4108 | |||
4109 | RCU_READ_LOCK_GUARD(); | ||
4110 | - rdma = atomic_rcu_read(&rioc->rdmain); | ||
4111 | + rdma = qatomic_rcu_read(&rioc->rdmain); | ||
4112 | |||
4113 | if (!rdma) { | ||
4114 | return -EIO; | ||
4115 | @@ -XXX,XX +XXX,XX @@ qio_channel_rdma_source_prepare(GSource *source, | ||
4116 | |||
4117 | RCU_READ_LOCK_GUARD(); | ||
4118 | if (rsource->condition == G_IO_IN) { | ||
4119 | - rdma = atomic_rcu_read(&rsource->rioc->rdmain); | ||
4120 | + rdma = qatomic_rcu_read(&rsource->rioc->rdmain); | ||
4121 | } else { | ||
4122 | - rdma = atomic_rcu_read(&rsource->rioc->rdmaout); | ||
4123 | + rdma = qatomic_rcu_read(&rsource->rioc->rdmaout); | ||
4124 | } | ||
4125 | |||
4126 | if (!rdma) { | ||
4127 | @@ -XXX,XX +XXX,XX @@ qio_channel_rdma_source_check(GSource *source) | ||
4128 | |||
4129 | RCU_READ_LOCK_GUARD(); | ||
4130 | if (rsource->condition == G_IO_IN) { | ||
4131 | - rdma = atomic_rcu_read(&rsource->rioc->rdmain); | ||
4132 | + rdma = qatomic_rcu_read(&rsource->rioc->rdmain); | ||
4133 | } else { | ||
4134 | - rdma = atomic_rcu_read(&rsource->rioc->rdmaout); | ||
4135 | + rdma = qatomic_rcu_read(&rsource->rioc->rdmaout); | ||
4136 | } | ||
4137 | |||
4138 | if (!rdma) { | ||
4139 | @@ -XXX,XX +XXX,XX @@ qio_channel_rdma_source_dispatch(GSource *source, | ||
4140 | |||
4141 | RCU_READ_LOCK_GUARD(); | ||
4142 | if (rsource->condition == G_IO_IN) { | ||
4143 | - rdma = atomic_rcu_read(&rsource->rioc->rdmain); | ||
4144 | + rdma = qatomic_rcu_read(&rsource->rioc->rdmain); | ||
4145 | } else { | ||
4146 | - rdma = atomic_rcu_read(&rsource->rioc->rdmaout); | ||
4147 | + rdma = qatomic_rcu_read(&rsource->rioc->rdmaout); | ||
4148 | } | ||
4149 | |||
4150 | if (!rdma) { | ||
4151 | @@ -XXX,XX +XXX,XX @@ static int qio_channel_rdma_close(QIOChannel *ioc, | ||
4152 | |||
4153 | rdmain = rioc->rdmain; | ||
4154 | if (rdmain) { | ||
4155 | - atomic_rcu_set(&rioc->rdmain, NULL); | ||
4156 | + qatomic_rcu_set(&rioc->rdmain, NULL); | ||
4157 | } | ||
4158 | |||
4159 | rdmaout = rioc->rdmaout; | ||
4160 | if (rdmaout) { | ||
4161 | - atomic_rcu_set(&rioc->rdmaout, NULL); | ||
4162 | + qatomic_rcu_set(&rioc->rdmaout, NULL); | ||
4163 | } | ||
4164 | |||
4165 | rcu->rdmain = rdmain; | ||
4166 | @@ -XXX,XX +XXX,XX @@ qio_channel_rdma_shutdown(QIOChannel *ioc, | ||
4167 | |||
4168 | RCU_READ_LOCK_GUARD(); | ||
4169 | |||
4170 | - rdmain = atomic_rcu_read(&rioc->rdmain); | ||
4171 | - rdmaout = atomic_rcu_read(&rioc->rdmain); | ||
4172 | + rdmain = qatomic_rcu_read(&rioc->rdmain); | ||
4173 | + rdmaout = qatomic_rcu_read(&rioc->rdmain); | ||
4174 | |||
4175 | switch (how) { | ||
4176 | case QIO_CHANNEL_SHUTDOWN_READ: | ||
4177 | @@ -XXX,XX +XXX,XX @@ static size_t qemu_rdma_save_page(QEMUFile *f, void *opaque, | ||
4178 | int ret; | ||
4179 | |||
4180 | RCU_READ_LOCK_GUARD(); | ||
4181 | - rdma = atomic_rcu_read(&rioc->rdmaout); | ||
4182 | + rdma = qatomic_rcu_read(&rioc->rdmaout); | ||
4183 | |||
4184 | if (!rdma) { | ||
4185 | return -EIO; | ||
4186 | @@ -XXX,XX +XXX,XX @@ static int qemu_rdma_registration_handle(QEMUFile *f, void *opaque) | ||
4187 | int i = 0; | ||
4188 | |||
4189 | RCU_READ_LOCK_GUARD(); | ||
4190 | - rdma = atomic_rcu_read(&rioc->rdmain); | ||
4191 | + rdma = qatomic_rcu_read(&rioc->rdmain); | ||
4192 | |||
4193 | if (!rdma) { | ||
4194 | return -EIO; | ||
4195 | @@ -XXX,XX +XXX,XX @@ rdma_block_notification_handle(QIOChannelRDMA *rioc, const char *name) | ||
4196 | int found = -1; | ||
4197 | |||
4198 | RCU_READ_LOCK_GUARD(); | ||
4199 | - rdma = atomic_rcu_read(&rioc->rdmain); | ||
4200 | + rdma = qatomic_rcu_read(&rioc->rdmain); | ||
4201 | |||
4202 | if (!rdma) { | ||
4203 | return -EIO; | ||
4204 | @@ -XXX,XX +XXX,XX @@ static int qemu_rdma_registration_start(QEMUFile *f, void *opaque, | ||
4205 | RDMAContext *rdma; | ||
4206 | |||
4207 | RCU_READ_LOCK_GUARD(); | ||
4208 | - rdma = atomic_rcu_read(&rioc->rdmaout); | ||
4209 | + rdma = qatomic_rcu_read(&rioc->rdmaout); | ||
4210 | if (!rdma) { | ||
4211 | return -EIO; | ||
4212 | } | ||
4213 | @@ -XXX,XX +XXX,XX @@ static int qemu_rdma_registration_stop(QEMUFile *f, void *opaque, | ||
4214 | int ret = 0; | ||
4215 | |||
4216 | RCU_READ_LOCK_GUARD(); | ||
4217 | - rdma = atomic_rcu_read(&rioc->rdmaout); | ||
4218 | + rdma = qatomic_rcu_read(&rioc->rdmaout); | ||
4219 | if (!rdma) { | ||
4220 | return -EIO; | ||
4221 | } | ||
4222 | diff --git a/monitor/hmp.c b/monitor/hmp.c | ||
4223 | index XXXXXXX..XXXXXXX 100644 | ||
4224 | --- a/monitor/hmp.c | ||
4225 | +++ b/monitor/hmp.c | ||
4226 | @@ -XXX,XX +XXX,XX @@ static void monitor_event(void *opaque, QEMUChrEvent event) | ||
4227 | monitor_resume(mon); | ||
4228 | monitor_flush(mon); | ||
4229 | } else { | ||
4230 | - atomic_mb_set(&mon->suspend_cnt, 0); | ||
4231 | + qatomic_mb_set(&mon->suspend_cnt, 0); | ||
4232 | } | ||
4233 | break; | ||
4234 | |||
4235 | case CHR_EVENT_MUX_OUT: | ||
4236 | if (mon->reset_seen) { | ||
4237 | - if (atomic_mb_read(&mon->suspend_cnt) == 0) { | ||
4238 | + if (qatomic_mb_read(&mon->suspend_cnt) == 0) { | ||
4239 | monitor_printf(mon, "\n"); | ||
4240 | } | ||
4241 | monitor_flush(mon); | ||
4242 | monitor_suspend(mon); | ||
4243 | } else { | ||
4244 | - atomic_inc(&mon->suspend_cnt); | ||
4245 | + qatomic_inc(&mon->suspend_cnt); | ||
4246 | } | ||
4247 | qemu_mutex_lock(&mon->mon_lock); | ||
4248 | mon->mux_out = 1; | ||
4249 | diff --git a/monitor/misc.c b/monitor/misc.c | ||
4250 | index XXXXXXX..XXXXXXX 100644 | ||
4251 | --- a/monitor/misc.c | ||
4252 | +++ b/monitor/misc.c | ||
4253 | @@ -XXX,XX +XXX,XX @@ static uint64_t vtop(void *ptr, Error **errp) | ||
4254 | } | ||
4255 | |||
4256 | /* Force copy-on-write if necessary. */ | ||
4257 | - atomic_add((uint8_t *)ptr, 0); | ||
4258 | + qatomic_add((uint8_t *)ptr, 0); | ||
4259 | |||
4260 | if (pread(fd, &pinfo, sizeof(pinfo), offset) != sizeof(pinfo)) { | ||
4261 | error_setg_errno(errp, errno, "Cannot read pagemap"); | ||
4262 | diff --git a/monitor/monitor.c b/monitor/monitor.c | ||
4263 | index XXXXXXX..XXXXXXX 100644 | ||
4264 | --- a/monitor/monitor.c | ||
4265 | +++ b/monitor/monitor.c | ||
4266 | @@ -XXX,XX +XXX,XX @@ int monitor_suspend(Monitor *mon) | ||
4267 | return -ENOTTY; | ||
4268 | } | ||
4269 | |||
4270 | - atomic_inc(&mon->suspend_cnt); | ||
4271 | + qatomic_inc(&mon->suspend_cnt); | ||
4272 | |||
4273 | if (mon->use_io_thread) { | ||
4274 | /* | ||
4275 | @@ -XXX,XX +XXX,XX @@ void monitor_resume(Monitor *mon) | ||
4276 | return; | ||
4277 | } | ||
4278 | |||
4279 | - if (atomic_dec_fetch(&mon->suspend_cnt) == 0) { | ||
4280 | + if (qatomic_dec_fetch(&mon->suspend_cnt) == 0) { | ||
4281 | AioContext *ctx; | ||
4282 | |||
4283 | if (mon->use_io_thread) { | ||
4284 | @@ -XXX,XX +XXX,XX @@ int monitor_can_read(void *opaque) | ||
4285 | { | ||
4286 | Monitor *mon = opaque; | ||
4287 | |||
4288 | - return !atomic_mb_read(&mon->suspend_cnt); | ||
4289 | + return !qatomic_mb_read(&mon->suspend_cnt); | ||
4290 | } | ||
4291 | |||
4292 | void monitor_list_append(Monitor *mon) | ||
4293 | diff --git a/qemu-nbd.c b/qemu-nbd.c | ||
4294 | index XXXXXXX..XXXXXXX 100644 | ||
4295 | --- a/qemu-nbd.c | ||
4296 | +++ b/qemu-nbd.c | ||
4297 | @@ -XXX,XX +XXX,XX @@ QEMU_COPYRIGHT "\n" | ||
4298 | #if HAVE_NBD_DEVICE | ||
4299 | static void termsig_handler(int signum) | ||
4300 | { | ||
4301 | - atomic_cmpxchg(&state, RUNNING, TERMINATE); | ||
4302 | + qatomic_cmpxchg(&state, RUNNING, TERMINATE); | ||
4303 | qemu_notify_event(); | ||
4304 | } | ||
4305 | #endif /* HAVE_NBD_DEVICE */ | ||
4306 | diff --git a/qga/commands.c b/qga/commands.c | ||
4307 | index XXXXXXX..XXXXXXX 100644 | ||
4308 | --- a/qga/commands.c | ||
4309 | +++ b/qga/commands.c | ||
4310 | @@ -XXX,XX +XXX,XX @@ GuestExecStatus *qmp_guest_exec_status(int64_t pid, Error **errp) | ||
4311 | |||
4312 | ges = g_new0(GuestExecStatus, 1); | ||
4313 | |||
4314 | - bool finished = atomic_mb_read(&gei->finished); | ||
4315 | + bool finished = qatomic_mb_read(&gei->finished); | ||
4316 | |||
4317 | /* need to wait till output channels are closed | ||
4318 | * to be sure we captured all output at this point */ | ||
4319 | if (gei->has_output) { | ||
4320 | - finished = finished && atomic_mb_read(&gei->out.closed); | ||
4321 | - finished = finished && atomic_mb_read(&gei->err.closed); | ||
4322 | + finished = finished && qatomic_mb_read(&gei->out.closed); | ||
4323 | + finished = finished && qatomic_mb_read(&gei->err.closed); | ||
4324 | } | ||
4325 | |||
4326 | ges->exited = finished; | ||
4327 | @@ -XXX,XX +XXX,XX @@ static void guest_exec_child_watch(GPid pid, gint status, gpointer data) | ||
4328 | (int32_t)gpid_to_int64(pid), (uint32_t)status); | ||
4329 | |||
4330 | gei->status = status; | ||
4331 | - atomic_mb_set(&gei->finished, true); | ||
4332 | + qatomic_mb_set(&gei->finished, true); | ||
4333 | |||
4334 | g_spawn_close_pid(pid); | ||
4335 | } | ||
4336 | @@ -XXX,XX +XXX,XX @@ static gboolean guest_exec_input_watch(GIOChannel *ch, | ||
4337 | done: | ||
4338 | g_io_channel_shutdown(ch, true, NULL); | ||
4339 | g_io_channel_unref(ch); | ||
4340 | - atomic_mb_set(&p->closed, true); | ||
4341 | + qatomic_mb_set(&p->closed, true); | ||
4342 | g_free(p->data); | ||
4343 | |||
4344 | return false; | ||
4345 | @@ -XXX,XX +XXX,XX @@ static gboolean guest_exec_output_watch(GIOChannel *ch, | ||
4346 | close: | ||
4347 | g_io_channel_shutdown(ch, true, NULL); | ||
4348 | g_io_channel_unref(ch); | ||
4349 | - atomic_mb_set(&p->closed, true); | ||
4350 | + qatomic_mb_set(&p->closed, true); | ||
4351 | return false; | ||
4352 | } | ||
4353 | |||
4354 | diff --git a/qom/object.c b/qom/object.c | ||
4355 | index XXXXXXX..XXXXXXX 100644 | ||
4356 | --- a/qom/object.c | ||
4357 | +++ b/qom/object.c | ||
4358 | @@ -XXX,XX +XXX,XX @@ Object *object_dynamic_cast_assert(Object *obj, const char *typename, | ||
4359 | Object *inst; | ||
4360 | |||
4361 | for (i = 0; obj && i < OBJECT_CLASS_CAST_CACHE; i++) { | ||
4362 | - if (atomic_read(&obj->class->object_cast_cache[i]) == typename) { | ||
4363 | + if (qatomic_read(&obj->class->object_cast_cache[i]) == typename) { | ||
4364 | goto out; | ||
4365 | } | ||
4366 | } | ||
4367 | @@ -XXX,XX +XXX,XX @@ Object *object_dynamic_cast_assert(Object *obj, const char *typename, | ||
4368 | |||
4369 | if (obj && obj == inst) { | ||
4370 | for (i = 1; i < OBJECT_CLASS_CAST_CACHE; i++) { | ||
4371 | - atomic_set(&obj->class->object_cast_cache[i - 1], | ||
4372 | - atomic_read(&obj->class->object_cast_cache[i])); | ||
4373 | + qatomic_set(&obj->class->object_cast_cache[i - 1], | ||
4374 | + qatomic_read(&obj->class->object_cast_cache[i])); | ||
4375 | } | ||
4376 | - atomic_set(&obj->class->object_cast_cache[i - 1], typename); | ||
4377 | + qatomic_set(&obj->class->object_cast_cache[i - 1], typename); | ||
4378 | } | ||
4379 | |||
4380 | out: | ||
4381 | @@ -XXX,XX +XXX,XX @@ ObjectClass *object_class_dynamic_cast_assert(ObjectClass *class, | ||
4382 | int i; | ||
4383 | |||
4384 | for (i = 0; class && i < OBJECT_CLASS_CAST_CACHE; i++) { | ||
4385 | - if (atomic_read(&class->class_cast_cache[i]) == typename) { | ||
4386 | + if (qatomic_read(&class->class_cast_cache[i]) == typename) { | ||
4387 | ret = class; | ||
4388 | goto out; | ||
4389 | } | ||
4390 | @@ -XXX,XX +XXX,XX @@ ObjectClass *object_class_dynamic_cast_assert(ObjectClass *class, | ||
4391 | #ifdef CONFIG_QOM_CAST_DEBUG | ||
4392 | if (class && ret == class) { | ||
4393 | for (i = 1; i < OBJECT_CLASS_CAST_CACHE; i++) { | ||
4394 | - atomic_set(&class->class_cast_cache[i - 1], | ||
4395 | - atomic_read(&class->class_cast_cache[i])); | ||
4396 | + qatomic_set(&class->class_cast_cache[i - 1], | ||
4397 | + qatomic_read(&class->class_cast_cache[i])); | ||
4398 | } | ||
4399 | - atomic_set(&class->class_cast_cache[i - 1], typename); | ||
4400 | + qatomic_set(&class->class_cast_cache[i - 1], typename); | ||
4401 | } | ||
4402 | out: | ||
4403 | #endif | ||
4404 | @@ -XXX,XX +XXX,XX @@ Object *object_ref(void *objptr) | ||
4405 | if (!obj) { | ||
4406 | return NULL; | ||
4407 | } | ||
4408 | - atomic_inc(&obj->ref); | ||
4409 | + qatomic_inc(&obj->ref); | ||
4410 | return obj; | ||
4411 | } | ||
4412 | |||
4413 | @@ -XXX,XX +XXX,XX @@ void object_unref(void *objptr) | ||
4414 | g_assert(obj->ref > 0); | ||
4415 | |||
4416 | /* parent always holds a reference to its children */ | ||
4417 | - if (atomic_fetch_dec(&obj->ref) == 1) { | ||
4418 | + if (qatomic_fetch_dec(&obj->ref) == 1) { | ||
4419 | object_finalize(obj); | ||
4420 | } | ||
4421 | } | ||
4422 | diff --git a/scsi/qemu-pr-helper.c b/scsi/qemu-pr-helper.c | ||
4423 | index XXXXXXX..XXXXXXX 100644 | ||
4424 | --- a/scsi/qemu-pr-helper.c | ||
4425 | +++ b/scsi/qemu-pr-helper.c | ||
4426 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn prh_co_entry(void *opaque) | ||
4427 | goto out; | ||
4428 | } | ||
4429 | |||
4430 | - while (atomic_read(&state) == RUNNING) { | ||
4431 | + while (qatomic_read(&state) == RUNNING) { | ||
4432 | PRHelperRequest req; | ||
4433 | PRHelperResponse resp; | ||
4434 | int sz; | ||
4435 | @@ -XXX,XX +XXX,XX @@ static gboolean accept_client(QIOChannel *ioc, GIOCondition cond, gpointer opaqu | ||
4436 | |||
4437 | static void termsig_handler(int signum) | ||
4438 | { | ||
4439 | - atomic_cmpxchg(&state, RUNNING, TERMINATE); | ||
4440 | + qatomic_cmpxchg(&state, RUNNING, TERMINATE); | ||
4441 | qemu_notify_event(); | ||
4442 | } | ||
4443 | |||
4444 | diff --git a/softmmu/cpu-throttle.c b/softmmu/cpu-throttle.c | ||
4445 | index XXXXXXX..XXXXXXX 100644 | ||
4446 | --- a/softmmu/cpu-throttle.c | ||
4447 | +++ b/softmmu/cpu-throttle.c | ||
4448 | @@ -XXX,XX +XXX,XX @@ static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque) | ||
4449 | } | ||
4450 | sleeptime_ns = endtime_ns - qemu_clock_get_ns(QEMU_CLOCK_REALTIME); | ||
4451 | } | ||
4452 | - atomic_set(&cpu->throttle_thread_scheduled, 0); | ||
4453 | + qatomic_set(&cpu->throttle_thread_scheduled, 0); | ||
4454 | } | ||
4455 | |||
4456 | static void cpu_throttle_timer_tick(void *opaque) | ||
4457 | @@ -XXX,XX +XXX,XX @@ static void cpu_throttle_timer_tick(void *opaque) | ||
4458 | return; | ||
4459 | } | ||
4460 | CPU_FOREACH(cpu) { | ||
4461 | - if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) { | ||
4462 | + if (!qatomic_xchg(&cpu->throttle_thread_scheduled, 1)) { | ||
4463 | async_run_on_cpu(cpu, cpu_throttle_thread, | ||
4464 | RUN_ON_CPU_NULL); | ||
4465 | } | ||
4466 | @@ -XXX,XX +XXX,XX @@ void cpu_throttle_set(int new_throttle_pct) | ||
4467 | new_throttle_pct = MIN(new_throttle_pct, CPU_THROTTLE_PCT_MAX); | ||
4468 | new_throttle_pct = MAX(new_throttle_pct, CPU_THROTTLE_PCT_MIN); | ||
4469 | |||
4470 | - atomic_set(&throttle_percentage, new_throttle_pct); | ||
4471 | + qatomic_set(&throttle_percentage, new_throttle_pct); | ||
4472 | |||
4473 | timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) + | ||
4474 | CPU_THROTTLE_TIMESLICE_NS); | ||
4475 | @@ -XXX,XX +XXX,XX @@ void cpu_throttle_set(int new_throttle_pct) | ||
4476 | |||
4477 | void cpu_throttle_stop(void) | ||
4478 | { | ||
4479 | - atomic_set(&throttle_percentage, 0); | ||
4480 | + qatomic_set(&throttle_percentage, 0); | ||
4481 | } | ||
4482 | |||
4483 | bool cpu_throttle_active(void) | ||
4484 | @@ -XXX,XX +XXX,XX @@ bool cpu_throttle_active(void) | ||
4485 | |||
4486 | int cpu_throttle_get_percentage(void) | ||
4487 | { | ||
4488 | - return atomic_read(&throttle_percentage); | ||
4489 | + return qatomic_read(&throttle_percentage); | ||
4490 | } | ||
4491 | |||
4492 | void cpu_throttle_init(void) | ||
4493 | diff --git a/softmmu/cpus.c b/softmmu/cpus.c | ||
4494 | index XXXXXXX..XXXXXXX 100644 | ||
4495 | --- a/softmmu/cpus.c | ||
4496 | +++ b/softmmu/cpus.c | ||
4497 | @@ -XXX,XX +XXX,XX @@ static void cpu_update_icount_locked(CPUState *cpu) | ||
4498 | int64_t executed = cpu_get_icount_executed(cpu); | ||
4499 | cpu->icount_budget -= executed; | ||
4500 | |||
4501 | - atomic_set_i64(&timers_state.qemu_icount, | ||
4502 | + qatomic_set_i64(&timers_state.qemu_icount, | ||
4503 | timers_state.qemu_icount + executed); | ||
4504 | } | ||
4505 | |||
4506 | @@ -XXX,XX +XXX,XX @@ static int64_t cpu_get_icount_raw_locked(void) | ||
4507 | cpu_update_icount_locked(cpu); | ||
4508 | } | ||
4509 | /* The read is protected by the seqlock, but needs atomic64 to avoid UB */ | ||
4510 | - return atomic_read_i64(&timers_state.qemu_icount); | ||
4511 | + return qatomic_read_i64(&timers_state.qemu_icount); | ||
4512 | } | ||
4513 | |||
4514 | static int64_t cpu_get_icount_locked(void) | ||
4515 | { | ||
4516 | int64_t icount = cpu_get_icount_raw_locked(); | ||
4517 | - return atomic_read_i64(&timers_state.qemu_icount_bias) + | ||
4518 | + return qatomic_read_i64(&timers_state.qemu_icount_bias) + | ||
4519 | cpu_icount_to_ns(icount); | ||
4520 | } | ||
4521 | |||
4522 | @@ -XXX,XX +XXX,XX @@ int64_t cpu_get_icount(void) | ||
4523 | |||
4524 | int64_t cpu_icount_to_ns(int64_t icount) | ||
4525 | { | ||
4526 | - return icount << atomic_read(&timers_state.icount_time_shift); | ||
4527 | + return icount << qatomic_read(&timers_state.icount_time_shift); | ||
4528 | } | ||
4529 | |||
4530 | static int64_t cpu_get_ticks_locked(void) | ||
4531 | @@ -XXX,XX +XXX,XX @@ static void icount_adjust(void) | ||
4532 | && last_delta + ICOUNT_WOBBLE < delta * 2 | ||
4533 | && timers_state.icount_time_shift > 0) { | ||
4534 | /* The guest is getting too far ahead. Slow time down. */ | ||
4535 | - atomic_set(&timers_state.icount_time_shift, | ||
4536 | + qatomic_set(&timers_state.icount_time_shift, | ||
4537 | timers_state.icount_time_shift - 1); | ||
4538 | } | ||
4539 | if (delta < 0 | ||
4540 | && last_delta - ICOUNT_WOBBLE > delta * 2 | ||
4541 | && timers_state.icount_time_shift < MAX_ICOUNT_SHIFT) { | ||
4542 | /* The guest is getting too far behind. Speed time up. */ | ||
4543 | - atomic_set(&timers_state.icount_time_shift, | ||
4544 | + qatomic_set(&timers_state.icount_time_shift, | ||
4545 | timers_state.icount_time_shift + 1); | ||
4546 | } | ||
4547 | last_delta = delta; | ||
4548 | - atomic_set_i64(&timers_state.qemu_icount_bias, | ||
4549 | + qatomic_set_i64(&timers_state.qemu_icount_bias, | ||
4550 | cur_icount - (timers_state.qemu_icount | ||
4551 | << timers_state.icount_time_shift)); | ||
4552 | seqlock_write_unlock(&timers_state.vm_clock_seqlock, | ||
4553 | @@ -XXX,XX +XXX,XX @@ static void icount_adjust_vm(void *opaque) | ||
4554 | |||
4555 | static int64_t qemu_icount_round(int64_t count) | ||
4556 | { | ||
4557 | - int shift = atomic_read(&timers_state.icount_time_shift); | ||
4558 | + int shift = qatomic_read(&timers_state.icount_time_shift); | ||
4559 | return (count + (1 << shift) - 1) >> shift; | ||
4560 | } | ||
4561 | |||
4562 | @@ -XXX,XX +XXX,XX @@ static void icount_warp_rt(void) | ||
4563 | int64_t delta = clock - cur_icount; | ||
4564 | warp_delta = MIN(warp_delta, delta); | ||
4565 | } | ||
4566 | - atomic_set_i64(&timers_state.qemu_icount_bias, | ||
4567 | + qatomic_set_i64(&timers_state.qemu_icount_bias, | ||
4568 | timers_state.qemu_icount_bias + warp_delta); | ||
4569 | } | ||
4570 | timers_state.vm_clock_warp_start = -1; | ||
4571 | @@ -XXX,XX +XXX,XX @@ void qtest_clock_warp(int64_t dest) | ||
4572 | |||
4573 | seqlock_write_lock(&timers_state.vm_clock_seqlock, | ||
4574 | &timers_state.vm_clock_lock); | ||
4575 | - atomic_set_i64(&timers_state.qemu_icount_bias, | ||
4576 | + qatomic_set_i64(&timers_state.qemu_icount_bias, | ||
4577 | timers_state.qemu_icount_bias + warp); | ||
4578 | seqlock_write_unlock(&timers_state.vm_clock_seqlock, | ||
4579 | &timers_state.vm_clock_lock); | ||
4580 | @@ -XXX,XX +XXX,XX @@ void qemu_start_warp_timer(void) | ||
4581 | */ | ||
4582 | seqlock_write_lock(&timers_state.vm_clock_seqlock, | ||
4583 | &timers_state.vm_clock_lock); | ||
4584 | - atomic_set_i64(&timers_state.qemu_icount_bias, | ||
4585 | + qatomic_set_i64(&timers_state.qemu_icount_bias, | ||
4586 | timers_state.qemu_icount_bias + deadline); | ||
4587 | seqlock_write_unlock(&timers_state.vm_clock_seqlock, | ||
4588 | &timers_state.vm_clock_lock); | ||
4589 | @@ -XXX,XX +XXX,XX @@ static void qemu_cpu_kick_rr_next_cpu(void) | ||
4590 | { | ||
4591 | CPUState *cpu; | ||
4592 | do { | ||
4593 | - cpu = atomic_mb_read(&tcg_current_rr_cpu); | ||
4594 | + cpu = qatomic_mb_read(&tcg_current_rr_cpu); | ||
4595 | if (cpu) { | ||
4596 | cpu_exit(cpu); | ||
4597 | } | ||
4598 | - } while (cpu != atomic_mb_read(&tcg_current_rr_cpu)); | ||
4599 | + } while (cpu != qatomic_mb_read(&tcg_current_rr_cpu)); | ||
4600 | } | ||
4601 | |||
4602 | /* Kick all RR vCPUs */ | ||
4603 | @@ -XXX,XX +XXX,XX @@ static void qemu_cpu_stop(CPUState *cpu, bool exit) | ||
4604 | |||
4605 | static void qemu_wait_io_event_common(CPUState *cpu) | ||
4606 | { | ||
4607 | - atomic_mb_set(&cpu->thread_kicked, false); | ||
4608 | + qatomic_mb_set(&cpu->thread_kicked, false); | ||
4609 | if (cpu->stop) { | ||
4610 | qemu_cpu_stop(cpu, false); | ||
4611 | } | ||
4612 | @@ -XXX,XX +XXX,XX @@ static int tcg_cpu_exec(CPUState *cpu) | ||
4613 | ret = cpu_exec(cpu); | ||
4614 | cpu_exec_end(cpu); | ||
4615 | #ifdef CONFIG_PROFILER | ||
4616 | - atomic_set(&tcg_ctx->prof.cpu_exec_time, | ||
4617 | + qatomic_set(&tcg_ctx->prof.cpu_exec_time, | ||
4618 | tcg_ctx->prof.cpu_exec_time + profile_getclock() - ti); | ||
4619 | #endif | ||
4620 | return ret; | ||
4621 | @@ -XXX,XX +XXX,XX @@ static void *qemu_tcg_rr_cpu_thread_fn(void *arg) | ||
4622 | |||
4623 | while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) { | ||
4624 | |||
4625 | - atomic_mb_set(&tcg_current_rr_cpu, cpu); | ||
4626 | + qatomic_mb_set(&tcg_current_rr_cpu, cpu); | ||
4627 | current_cpu = cpu; | ||
4628 | |||
4629 | qemu_clock_enable(QEMU_CLOCK_VIRTUAL, | ||
4630 | @@ -XXX,XX +XXX,XX @@ static void *qemu_tcg_rr_cpu_thread_fn(void *arg) | ||
4631 | cpu = CPU_NEXT(cpu); | ||
4632 | } /* while (cpu && !cpu->exit_request).. */ | ||
4633 | |||
4634 | - /* Does not need atomic_mb_set because a spurious wakeup is okay. */ | ||
4635 | - atomic_set(&tcg_current_rr_cpu, NULL); | ||
4636 | + /* Does not need qatomic_mb_set because a spurious wakeup is okay. */ | ||
4637 | + qatomic_set(&tcg_current_rr_cpu, NULL); | ||
4638 | |||
4639 | if (cpu && cpu->exit_request) { | ||
4640 | - atomic_mb_set(&cpu->exit_request, 0); | ||
4641 | + qatomic_mb_set(&cpu->exit_request, 0); | ||
4642 | } | ||
4643 | |||
4644 | if (use_icount && all_cpu_threads_idle()) { | ||
4645 | @@ -XXX,XX +XXX,XX @@ static void *qemu_tcg_cpu_thread_fn(void *arg) | ||
4646 | } | ||
4647 | } | ||
4648 | |||
4649 | - atomic_mb_set(&cpu->exit_request, 0); | ||
4650 | + qatomic_mb_set(&cpu->exit_request, 0); | ||
4651 | qemu_wait_io_event(cpu); | ||
4652 | } while (!cpu->unplug || cpu_can_run(cpu)); | ||
4653 | |||
4654 | @@ -XXX,XX +XXX,XX @@ bool qemu_mutex_iothread_locked(void) | ||
4655 | */ | ||
4656 | void qemu_mutex_lock_iothread_impl(const char *file, int line) | ||
4657 | { | ||
4658 | - QemuMutexLockFunc bql_lock = atomic_read(&qemu_bql_mutex_lock_func); | ||
4659 | + QemuMutexLockFunc bql_lock = qatomic_read(&qemu_bql_mutex_lock_func); | ||
4660 | |||
4661 | g_assert(!qemu_mutex_iothread_locked()); | ||
4662 | bql_lock(&qemu_global_mutex, file, line); | ||
4663 | diff --git a/softmmu/memory.c b/softmmu/memory.c | ||
4664 | index XXXXXXX..XXXXXXX 100644 | ||
4665 | --- a/softmmu/memory.c | ||
4666 | +++ b/softmmu/memory.c | ||
4667 | @@ -XXX,XX +XXX,XX @@ static void flatview_destroy(FlatView *view) | ||
4668 | |||
4669 | static bool flatview_ref(FlatView *view) | ||
4670 | { | ||
4671 | - return atomic_fetch_inc_nonzero(&view->ref) > 0; | ||
4672 | + return qatomic_fetch_inc_nonzero(&view->ref) > 0; | ||
4673 | } | ||
4674 | |||
4675 | void flatview_unref(FlatView *view) | ||
4676 | { | ||
4677 | - if (atomic_fetch_dec(&view->ref) == 1) { | ||
4678 | + if (qatomic_fetch_dec(&view->ref) == 1) { | ||
4679 | trace_flatview_destroy_rcu(view, view->root); | ||
4680 | assert(view->root); | ||
4681 | call_rcu(view, flatview_destroy, rcu); | ||
4682 | @@ -XXX,XX +XXX,XX @@ static void address_space_set_flatview(AddressSpace *as) | ||
4683 | } | ||
4684 | |||
4685 | /* Writes are protected by the BQL. */ | ||
4686 | - atomic_rcu_set(&as->current_map, new_view); | ||
4687 | + qatomic_rcu_set(&as->current_map, new_view); | ||
4688 | if (old_view) { | ||
4689 | flatview_unref(old_view); | ||
4690 | } | ||
4691 | diff --git a/softmmu/vl.c b/softmmu/vl.c | ||
4692 | index XXXXXXX..XXXXXXX 100644 | ||
4693 | --- a/softmmu/vl.c | ||
4694 | +++ b/softmmu/vl.c | ||
4695 | @@ -XXX,XX +XXX,XX @@ ShutdownCause qemu_reset_requested_get(void) | ||
4696 | |||
4697 | static int qemu_shutdown_requested(void) | ||
4698 | { | ||
4699 | - return atomic_xchg(&shutdown_requested, SHUTDOWN_CAUSE_NONE); | ||
4700 | + return qatomic_xchg(&shutdown_requested, SHUTDOWN_CAUSE_NONE); | ||
4701 | } | ||
4702 | |||
4703 | static void qemu_kill_report(void) | ||
4704 | diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c | ||
4705 | index XXXXXXX..XXXXXXX 100644 | ||
4706 | --- a/target/arm/mte_helper.c | ||
4707 | +++ b/target/arm/mte_helper.c | ||
4708 | @@ -XXX,XX +XXX,XX @@ static void store_tag1(uint64_t ptr, uint8_t *mem, int tag) | ||
4709 | static void store_tag1_parallel(uint64_t ptr, uint8_t *mem, int tag) | ||
4710 | { | ||
4711 | int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4; | ||
4712 | - uint8_t old = atomic_read(mem); | ||
4713 | + uint8_t old = qatomic_read(mem); | ||
4714 | |||
4715 | while (1) { | ||
4716 | uint8_t new = deposit32(old, ofs, 4, tag); | ||
4717 | - uint8_t cmp = atomic_cmpxchg(mem, old, new); | ||
4718 | + uint8_t cmp = qatomic_cmpxchg(mem, old, new); | ||
4719 | if (likely(cmp == old)) { | ||
4720 | return; | ||
4721 | } | ||
4722 | @@ -XXX,XX +XXX,XX @@ static inline void do_st2g(CPUARMState *env, uint64_t ptr, uint64_t xt, | ||
4723 | 2 * TAG_GRANULE, MMU_DATA_STORE, 1, ra); | ||
4724 | if (mem1) { | ||
4725 | tag |= tag << 4; | ||
4726 | - atomic_set(mem1, tag); | ||
4727 | + qatomic_set(mem1, tag); | ||
4728 | } | ||
4729 | } | ||
4730 | } | ||
4731 | diff --git a/target/hppa/op_helper.c b/target/hppa/op_helper.c | ||
4732 | index XXXXXXX..XXXXXXX 100644 | ||
4733 | --- a/target/hppa/op_helper.c | ||
4734 | +++ b/target/hppa/op_helper.c | ||
4735 | @@ -XXX,XX +XXX,XX @@ static void atomic_store_3(CPUHPPAState *env, target_ulong addr, uint32_t val, | ||
4736 | old = *haddr; | ||
4737 | while (1) { | ||
4738 | new = (old & ~mask) | (val & mask); | ||
4739 | - cmp = atomic_cmpxchg(haddr, old, new); | ||
4740 | + cmp = qatomic_cmpxchg(haddr, old, new); | ||
4741 | if (cmp == old) { | ||
4742 | return; | ||
4743 | } | ||
4744 | diff --git a/target/i386/mem_helper.c b/target/i386/mem_helper.c | ||
4745 | index XXXXXXX..XXXXXXX 100644 | ||
4746 | --- a/target/i386/mem_helper.c | ||
4747 | +++ b/target/i386/mem_helper.c | ||
4748 | @@ -XXX,XX +XXX,XX @@ void helper_cmpxchg8b(CPUX86State *env, target_ulong a0) | ||
4749 | uint64_t *haddr = g2h(a0); | ||
4750 | cmpv = cpu_to_le64(cmpv); | ||
4751 | newv = cpu_to_le64(newv); | ||
4752 | - oldv = atomic_cmpxchg__nocheck(haddr, cmpv, newv); | ||
4753 | + oldv = qatomic_cmpxchg__nocheck(haddr, cmpv, newv); | ||
4754 | oldv = le64_to_cpu(oldv); | ||
4755 | } | ||
4756 | #else | ||
4757 | diff --git a/target/i386/whpx-all.c b/target/i386/whpx-all.c | ||
4758 | index XXXXXXX..XXXXXXX 100644 | ||
4759 | --- a/target/i386/whpx-all.c | ||
4760 | +++ b/target/i386/whpx-all.c | ||
4761 | @@ -XXX,XX +XXX,XX @@ static int whpx_vcpu_run(CPUState *cpu) | ||
4762 | whpx_vcpu_process_async_events(cpu); | ||
4763 | if (cpu->halted) { | ||
4764 | cpu->exception_index = EXCP_HLT; | ||
4765 | - atomic_set(&cpu->exit_request, false); | ||
4766 | + qatomic_set(&cpu->exit_request, false); | ||
4767 | return 0; | ||
4768 | } | ||
4769 | |||
4770 | @@ -XXX,XX +XXX,XX @@ static int whpx_vcpu_run(CPUState *cpu) | ||
4771 | |||
4772 | whpx_vcpu_pre_run(cpu); | ||
4773 | |||
4774 | - if (atomic_read(&cpu->exit_request)) { | ||
4775 | + if (qatomic_read(&cpu->exit_request)) { | ||
4776 | whpx_vcpu_kick(cpu); | ||
4777 | } | ||
4778 | |||
4779 | @@ -XXX,XX +XXX,XX @@ static int whpx_vcpu_run(CPUState *cpu) | ||
4780 | qemu_mutex_lock_iothread(); | ||
4781 | current_cpu = cpu; | ||
4782 | |||
4783 | - atomic_set(&cpu->exit_request, false); | ||
4784 | + qatomic_set(&cpu->exit_request, false); | ||
4785 | |||
4786 | return ret < 0; | ||
4787 | } | ||
4788 | diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c | ||
4789 | index XXXXXXX..XXXXXXX 100644 | ||
4790 | --- a/target/riscv/cpu_helper.c | ||
4791 | +++ b/target/riscv/cpu_helper.c | ||
4792 | @@ -XXX,XX +XXX,XX @@ restart: | ||
4793 | *pte_pa = pte = updated_pte; | ||
4794 | #else | ||
4795 | target_ulong old_pte = | ||
4796 | - atomic_cmpxchg(pte_pa, pte, updated_pte); | ||
4797 | + qatomic_cmpxchg(pte_pa, pte, updated_pte); | ||
4798 | if (old_pte != pte) { | ||
4799 | goto restart; | ||
4800 | } else { | ||
4801 | diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c | ||
4802 | index XXXXXXX..XXXXXXX 100644 | ||
4803 | --- a/target/s390x/mem_helper.c | ||
4804 | +++ b/target/s390x/mem_helper.c | ||
4805 | @@ -XXX,XX +XXX,XX @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1, | ||
4806 | if (parallel) { | ||
4807 | #ifdef CONFIG_USER_ONLY | ||
4808 | uint32_t *haddr = g2h(a1); | ||
4809 | - ov = atomic_cmpxchg__nocheck(haddr, cv, nv); | ||
4810 | + ov = qatomic_cmpxchg__nocheck(haddr, cv, nv); | ||
4811 | #else | ||
4812 | TCGMemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mem_idx); | ||
4813 | ov = helper_atomic_cmpxchgl_be_mmu(env, a1, cv, nv, oi, ra); | ||
4814 | @@ -XXX,XX +XXX,XX @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1, | ||
4815 | #ifdef CONFIG_ATOMIC64 | ||
4816 | # ifdef CONFIG_USER_ONLY | ||
4817 | uint64_t *haddr = g2h(a1); | ||
4818 | - ov = atomic_cmpxchg__nocheck(haddr, cv, nv); | ||
4819 | + ov = qatomic_cmpxchg__nocheck(haddr, cv, nv); | ||
4820 | # else | ||
4821 | TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN, mem_idx); | ||
4822 | ov = helper_atomic_cmpxchgq_be_mmu(env, a1, cv, nv, oi, ra); | ||
4823 | diff --git a/target/xtensa/exc_helper.c b/target/xtensa/exc_helper.c | ||
4824 | index XXXXXXX..XXXXXXX 100644 | ||
4825 | --- a/target/xtensa/exc_helper.c | ||
4826 | +++ b/target/xtensa/exc_helper.c | ||
4827 | @@ -XXX,XX +XXX,XX @@ void HELPER(check_interrupts)(CPUXtensaState *env) | ||
4828 | |||
4829 | void HELPER(intset)(CPUXtensaState *env, uint32_t v) | ||
4830 | { | ||
4831 | - atomic_or(&env->sregs[INTSET], | ||
4832 | + qatomic_or(&env->sregs[INTSET], | ||
4833 | v & env->config->inttype_mask[INTTYPE_SOFTWARE]); | ||
4834 | } | ||
4835 | |||
4836 | static void intclear(CPUXtensaState *env, uint32_t v) | ||
4837 | { | ||
4838 | - atomic_and(&env->sregs[INTSET], ~v); | ||
4839 | + qatomic_and(&env->sregs[INTSET], ~v); | ||
4840 | } | ||
4841 | |||
4842 | void HELPER(intclear)(CPUXtensaState *env, uint32_t v) | ||
4843 | diff --git a/target/xtensa/op_helper.c b/target/xtensa/op_helper.c | ||
4844 | index XXXXXXX..XXXXXXX 100644 | ||
4845 | --- a/target/xtensa/op_helper.c | ||
4846 | +++ b/target/xtensa/op_helper.c | ||
4847 | @@ -XXX,XX +XXX,XX @@ void HELPER(update_ccompare)(CPUXtensaState *env, uint32_t i) | ||
4848 | { | ||
4849 | uint64_t dcc; | ||
4850 | |||
4851 | - atomic_and(&env->sregs[INTSET], | ||
4852 | + qatomic_and(&env->sregs[INTSET], | ||
4853 | ~(1u << env->config->timerint[i])); | ||
4854 | HELPER(update_ccount)(env); | ||
4855 | dcc = (uint64_t)(env->sregs[CCOMPARE + i] - env->sregs[CCOUNT] - 1) + 1; | ||
4856 | diff --git a/tcg/tcg.c b/tcg/tcg.c | ||
4857 | index XXXXXXX..XXXXXXX 100644 | ||
4858 | --- a/tcg/tcg.c | ||
4859 | +++ b/tcg/tcg.c | ||
4860 | @@ -XXX,XX +XXX,XX @@ static inline bool tcg_region_initial_alloc__locked(TCGContext *s) | ||
4861 | /* Call from a safe-work context */ | ||
4862 | void tcg_region_reset_all(void) | ||
4863 | { | ||
4864 | - unsigned int n_ctxs = atomic_read(&n_tcg_ctxs); | ||
4865 | + unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs); | ||
4866 | unsigned int i; | ||
4867 | |||
4868 | qemu_mutex_lock(®ion.lock); | ||
4869 | @@ -XXX,XX +XXX,XX @@ void tcg_region_reset_all(void) | ||
4870 | region.agg_size_full = 0; | ||
4871 | |||
4872 | for (i = 0; i < n_ctxs; i++) { | ||
4873 | - TCGContext *s = atomic_read(&tcg_ctxs[i]); | ||
4874 | + TCGContext *s = qatomic_read(&tcg_ctxs[i]); | ||
4875 | bool err = tcg_region_initial_alloc__locked(s); | ||
4876 | |||
4877 | g_assert(!err); | ||
4878 | @@ -XXX,XX +XXX,XX @@ void tcg_register_thread(void) | ||
4879 | } | ||
4880 | |||
4881 | /* Claim an entry in tcg_ctxs */ | ||
4882 | - n = atomic_fetch_inc(&n_tcg_ctxs); | ||
4883 | + n = qatomic_fetch_inc(&n_tcg_ctxs); | ||
4884 | g_assert(n < ms->smp.max_cpus); | ||
4885 | - atomic_set(&tcg_ctxs[n], s); | ||
4886 | + qatomic_set(&tcg_ctxs[n], s); | ||
4887 | |||
4888 | if (n > 0) { | ||
4889 | alloc_tcg_plugin_context(s); | ||
4890 | @@ -XXX,XX +XXX,XX @@ void tcg_register_thread(void) | ||
4891 | */ | ||
4892 | size_t tcg_code_size(void) | ||
4893 | { | ||
4894 | - unsigned int n_ctxs = atomic_read(&n_tcg_ctxs); | ||
4895 | + unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs); | ||
4896 | unsigned int i; | ||
4897 | size_t total; | ||
4898 | |||
4899 | qemu_mutex_lock(®ion.lock); | ||
4900 | total = region.agg_size_full; | ||
4901 | for (i = 0; i < n_ctxs; i++) { | ||
4902 | - const TCGContext *s = atomic_read(&tcg_ctxs[i]); | ||
4903 | + const TCGContext *s = qatomic_read(&tcg_ctxs[i]); | ||
4904 | size_t size; | ||
4905 | |||
4906 | - size = atomic_read(&s->code_gen_ptr) - s->code_gen_buffer; | ||
4907 | + size = qatomic_read(&s->code_gen_ptr) - s->code_gen_buffer; | ||
4908 | g_assert(size <= s->code_gen_buffer_size); | ||
4909 | total += size; | ||
4910 | } | ||
4911 | @@ -XXX,XX +XXX,XX @@ size_t tcg_code_capacity(void) | ||
4912 | |||
4913 | size_t tcg_tb_phys_invalidate_count(void) | ||
4914 | { | ||
4915 | - unsigned int n_ctxs = atomic_read(&n_tcg_ctxs); | ||
4916 | + unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs); | ||
4917 | unsigned int i; | ||
4918 | size_t total = 0; | ||
4919 | |||
4920 | for (i = 0; i < n_ctxs; i++) { | ||
4921 | - const TCGContext *s = atomic_read(&tcg_ctxs[i]); | ||
4922 | + const TCGContext *s = qatomic_read(&tcg_ctxs[i]); | ||
4923 | |||
4924 | - total += atomic_read(&s->tb_phys_invalidate_count); | ||
4925 | + total += qatomic_read(&s->tb_phys_invalidate_count); | ||
4926 | } | ||
4927 | return total; | ||
4928 | } | ||
4929 | @@ -XXX,XX +XXX,XX @@ TranslationBlock *tcg_tb_alloc(TCGContext *s) | ||
4930 | } | ||
4931 | goto retry; | ||
4932 | } | ||
4933 | - atomic_set(&s->code_gen_ptr, next); | ||
4934 | + qatomic_set(&s->code_gen_ptr, next); | ||
4935 | s->data_gen_ptr = NULL; | ||
4936 | return tb; | ||
4937 | } | ||
4938 | @@ -XXX,XX +XXX,XX @@ static void tcg_dump_ops(TCGContext *s, bool have_prefs) | ||
4939 | QemuLogFile *logfile; | ||
4940 | |||
4941 | rcu_read_lock(); | ||
4942 | - logfile = atomic_rcu_read(&qemu_logfile); | ||
4943 | + logfile = qatomic_rcu_read(&qemu_logfile); | ||
4944 | if (logfile) { | ||
4945 | for (; col < 40; ++col) { | ||
4946 | putc(' ', logfile->fd); | ||
4947 | @@ -XXX,XX +XXX,XX @@ void tcg_op_remove(TCGContext *s, TCGOp *op) | ||
4948 | s->nb_ops--; | ||
4949 | |||
4950 | #ifdef CONFIG_PROFILER | ||
4951 | - atomic_set(&s->prof.del_op_count, s->prof.del_op_count + 1); | ||
4952 | + qatomic_set(&s->prof.del_op_count, s->prof.del_op_count + 1); | ||
4953 | #endif | ||
4954 | } | ||
4955 | |||
4956 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op) | ||
4957 | /* avoid copy/paste errors */ | ||
4958 | #define PROF_ADD(to, from, field) \ | ||
4959 | do { \ | ||
4960 | - (to)->field += atomic_read(&((from)->field)); \ | ||
4961 | + (to)->field += qatomic_read(&((from)->field)); \ | ||
4962 | } while (0) | ||
4963 | |||
4964 | #define PROF_MAX(to, from, field) \ | ||
4965 | do { \ | ||
4966 | - typeof((from)->field) val__ = atomic_read(&((from)->field)); \ | ||
4967 | + typeof((from)->field) val__ = qatomic_read(&((from)->field)); \ | ||
4968 | if (val__ > (to)->field) { \ | ||
4969 | (to)->field = val__; \ | ||
4970 | } \ | ||
4971 | @@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op) | ||
4972 | static inline | ||
4973 | void tcg_profile_snapshot(TCGProfile *prof, bool counters, bool table) | ||
4974 | { | ||
4975 | - unsigned int n_ctxs = atomic_read(&n_tcg_ctxs); | ||
4976 | + unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs); | ||
4977 | unsigned int i; | ||
4978 | |||
4979 | for (i = 0; i < n_ctxs; i++) { | ||
4980 | - TCGContext *s = atomic_read(&tcg_ctxs[i]); | ||
4981 | + TCGContext *s = qatomic_read(&tcg_ctxs[i]); | ||
4982 | const TCGProfile *orig = &s->prof; | ||
4983 | |||
4984 | if (counters) { | ||
4985 | @@ -XXX,XX +XXX,XX @@ void tcg_dump_op_count(void) | ||
4986 | |||
4987 | int64_t tcg_cpu_exec_time(void) | ||
4988 | { | ||
4989 | - unsigned int n_ctxs = atomic_read(&n_tcg_ctxs); | ||
4990 | + unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs); | ||
4991 | unsigned int i; | ||
4992 | int64_t ret = 0; | ||
4993 | |||
4994 | for (i = 0; i < n_ctxs; i++) { | ||
4995 | - const TCGContext *s = atomic_read(&tcg_ctxs[i]); | ||
4996 | + const TCGContext *s = qatomic_read(&tcg_ctxs[i]); | ||
4997 | const TCGProfile *prof = &s->prof; | ||
4998 | |||
4999 | - ret += atomic_read(&prof->cpu_exec_time); | ||
5000 | + ret += qatomic_read(&prof->cpu_exec_time); | ||
5001 | } | ||
5002 | return ret; | ||
5003 | } | ||
5004 | @@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb) | ||
5005 | QTAILQ_FOREACH(op, &s->ops, link) { | ||
5006 | n++; | ||
5007 | } | ||
5008 | - atomic_set(&prof->op_count, prof->op_count + n); | ||
5009 | + qatomic_set(&prof->op_count, prof->op_count + n); | ||
5010 | if (n > prof->op_count_max) { | ||
5011 | - atomic_set(&prof->op_count_max, n); | ||
5012 | + qatomic_set(&prof->op_count_max, n); | ||
5013 | } | ||
5014 | |||
5015 | n = s->nb_temps; | ||
5016 | - atomic_set(&prof->temp_count, prof->temp_count + n); | ||
5017 | + qatomic_set(&prof->temp_count, prof->temp_count + n); | ||
5018 | if (n > prof->temp_count_max) { | ||
5019 | - atomic_set(&prof->temp_count_max, n); | ||
5020 | + qatomic_set(&prof->temp_count_max, n); | ||
5021 | } | ||
5022 | } | ||
5023 | #endif | ||
5024 | @@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb) | ||
5025 | #endif | ||
5026 | |||
5027 | #ifdef CONFIG_PROFILER | ||
5028 | - atomic_set(&prof->opt_time, prof->opt_time - profile_getclock()); | ||
5029 | + qatomic_set(&prof->opt_time, prof->opt_time - profile_getclock()); | ||
5030 | #endif | ||
5031 | |||
5032 | #ifdef USE_TCG_OPTIMIZATIONS | ||
5033 | @@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb) | ||
5034 | #endif | ||
5035 | |||
5036 | #ifdef CONFIG_PROFILER | ||
5037 | - atomic_set(&prof->opt_time, prof->opt_time + profile_getclock()); | ||
5038 | - atomic_set(&prof->la_time, prof->la_time - profile_getclock()); | ||
5039 | + qatomic_set(&prof->opt_time, prof->opt_time + profile_getclock()); | ||
5040 | + qatomic_set(&prof->la_time, prof->la_time - profile_getclock()); | ||
5041 | #endif | ||
5042 | |||
5043 | reachable_code_pass(s); | ||
5044 | @@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb) | ||
5045 | } | ||
5046 | |||
5047 | #ifdef CONFIG_PROFILER | ||
5048 | - atomic_set(&prof->la_time, prof->la_time + profile_getclock()); | ||
5049 | + qatomic_set(&prof->la_time, prof->la_time + profile_getclock()); | ||
5050 | #endif | ||
5051 | |||
5052 | #ifdef DEBUG_DISAS | ||
5053 | @@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb) | ||
5054 | TCGOpcode opc = op->opc; | ||
5055 | |||
5056 | #ifdef CONFIG_PROFILER | ||
5057 | - atomic_set(&prof->table_op_count[opc], prof->table_op_count[opc] + 1); | ||
5058 | + qatomic_set(&prof->table_op_count[opc], prof->table_op_count[opc] + 1); | ||
5059 | #endif | ||
5060 | |||
5061 | switch (opc) { | ||
5062 | diff --git a/tcg/tci.c b/tcg/tci.c | ||
5063 | index XXXXXXX..XXXXXXX 100644 | ||
5064 | --- a/tcg/tci.c | ||
5065 | +++ b/tcg/tci.c | ||
5066 | @@ -XXX,XX +XXX,XX @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr) | ||
5067 | case INDEX_op_goto_tb: | ||
5068 | /* Jump address is aligned */ | ||
5069 | tb_ptr = QEMU_ALIGN_PTR_UP(tb_ptr, 4); | ||
5070 | - t0 = atomic_read((int32_t *)tb_ptr); | ||
5071 | + t0 = qatomic_read((int32_t *)tb_ptr); | ||
5072 | tb_ptr += sizeof(int32_t); | ||
5073 | tci_assert(tb_ptr == old_code_ptr + op_size); | ||
5074 | tb_ptr += (int32_t)t0; | ||
5075 | diff --git a/tests/atomic64-bench.c b/tests/atomic64-bench.c | ||
5076 | index XXXXXXX..XXXXXXX 100644 | ||
5077 | --- a/tests/atomic64-bench.c | ||
5078 | +++ b/tests/atomic64-bench.c | ||
5079 | @@ -XXX,XX +XXX,XX @@ static void *thread_func(void *arg) | ||
5080 | { | ||
5081 | struct thread_info *info = arg; | ||
5082 | |||
5083 | - atomic_inc(&n_ready_threads); | ||
5084 | - while (!atomic_read(&test_start)) { | ||
5085 | + qatomic_inc(&n_ready_threads); | ||
5086 | + while (!qatomic_read(&test_start)) { | ||
5087 | cpu_relax(); | ||
5088 | } | ||
5089 | |||
5090 | - while (!atomic_read(&test_stop)) { | ||
5091 | + while (!qatomic_read(&test_stop)) { | ||
5092 | unsigned int index; | ||
5093 | |||
5094 | info->r = xorshift64star(info->r); | ||
5095 | index = info->r & (range - 1); | ||
5096 | - atomic_read_i64(&counts[index].i64); | ||
5097 | + qatomic_read_i64(&counts[index].i64); | ||
5098 | info->accesses++; | ||
5099 | } | ||
5100 | return NULL; | ||
5101 | @@ -XXX,XX +XXX,XX @@ static void run_test(void) | ||
5102 | { | ||
5103 | unsigned int i; | ||
5104 | |||
5105 | - while (atomic_read(&n_ready_threads) != n_threads) { | ||
5106 | + while (qatomic_read(&n_ready_threads) != n_threads) { | ||
5107 | cpu_relax(); | ||
5108 | } | ||
5109 | |||
5110 | - atomic_set(&test_start, true); | ||
5111 | + qatomic_set(&test_start, true); | ||
5112 | g_usleep(duration * G_USEC_PER_SEC); | ||
5113 | - atomic_set(&test_stop, true); | ||
5114 | + qatomic_set(&test_stop, true); | ||
5115 | |||
5116 | for (i = 0; i < n_threads; i++) { | ||
5117 | qemu_thread_join(&threads[i]); | ||
5118 | diff --git a/tests/atomic_add-bench.c b/tests/atomic_add-bench.c | ||
5119 | index XXXXXXX..XXXXXXX 100644 | ||
5120 | --- a/tests/atomic_add-bench.c | ||
5121 | +++ b/tests/atomic_add-bench.c | ||
5122 | @@ -XXX,XX +XXX,XX @@ static void *thread_func(void *arg) | ||
5123 | { | ||
5124 | struct thread_info *info = arg; | ||
5125 | |||
5126 | - atomic_inc(&n_ready_threads); | ||
5127 | - while (!atomic_read(&test_start)) { | ||
5128 | + qatomic_inc(&n_ready_threads); | ||
5129 | + while (!qatomic_read(&test_start)) { | ||
5130 | cpu_relax(); | ||
5131 | } | ||
5132 | |||
5133 | - while (!atomic_read(&test_stop)) { | ||
5134 | + while (!qatomic_read(&test_stop)) { | ||
5135 | unsigned int index; | ||
5136 | |||
5137 | info->r = xorshift64star(info->r); | ||
5138 | @@ -XXX,XX +XXX,XX @@ static void *thread_func(void *arg) | ||
5139 | counts[index].val += 1; | ||
5140 | qemu_mutex_unlock(&counts[index].lock); | ||
5141 | } else { | ||
5142 | - atomic_inc(&counts[index].val); | ||
5143 | + qatomic_inc(&counts[index].val); | ||
5144 | } | ||
5145 | } | ||
5146 | return NULL; | ||
5147 | @@ -XXX,XX +XXX,XX @@ static void run_test(void) | ||
5148 | { | ||
5149 | unsigned int i; | ||
5150 | |||
5151 | - while (atomic_read(&n_ready_threads) != n_threads) { | ||
5152 | + while (qatomic_read(&n_ready_threads) != n_threads) { | ||
5153 | cpu_relax(); | ||
5154 | } | ||
5155 | |||
5156 | - atomic_set(&test_start, true); | ||
5157 | + qatomic_set(&test_start, true); | ||
5158 | g_usleep(duration * G_USEC_PER_SEC); | ||
5159 | - atomic_set(&test_stop, true); | ||
5160 | + qatomic_set(&test_stop, true); | ||
5161 | |||
5162 | for (i = 0; i < n_threads; i++) { | ||
5163 | qemu_thread_join(&threads[i]); | ||
5164 | diff --git a/tests/iothread.c b/tests/iothread.c | ||
5165 | index XXXXXXX..XXXXXXX 100644 | ||
5166 | --- a/tests/iothread.c | ||
5167 | +++ b/tests/iothread.c | ||
5168 | @@ -XXX,XX +XXX,XX @@ static void *iothread_run(void *opaque) | ||
5169 | qemu_cond_signal(&iothread->init_done_cond); | ||
5170 | qemu_mutex_unlock(&iothread->init_done_lock); | ||
5171 | |||
5172 | - while (!atomic_read(&iothread->stopping)) { | ||
5173 | + while (!qatomic_read(&iothread->stopping)) { | ||
5174 | aio_poll(iothread->ctx, true); | ||
5175 | } | ||
5176 | |||
5177 | diff --git a/tests/qht-bench.c b/tests/qht-bench.c | ||
5178 | index XXXXXXX..XXXXXXX 100644 | ||
5179 | --- a/tests/qht-bench.c | ||
5180 | +++ b/tests/qht-bench.c | ||
5181 | @@ -XXX,XX +XXX,XX @@ static void *thread_func(void *p) | ||
5182 | |||
5183 | rcu_register_thread(); | ||
5184 | |||
5185 | - atomic_inc(&n_ready_threads); | ||
5186 | - while (!atomic_read(&test_start)) { | ||
5187 | + qatomic_inc(&n_ready_threads); | ||
5188 | + while (!qatomic_read(&test_start)) { | ||
5189 | cpu_relax(); | ||
5190 | } | ||
5191 | |||
5192 | rcu_read_lock(); | ||
5193 | - while (!atomic_read(&test_stop)) { | ||
5194 | + while (!qatomic_read(&test_stop)) { | ||
5195 | info->seed = xorshift64star(info->seed); | ||
5196 | info->func(info); | ||
5197 | } | ||
5198 | @@ -XXX,XX +XXX,XX @@ static void run_test(void) | ||
5199 | { | ||
5200 | int i; | ||
5201 | |||
5202 | - while (atomic_read(&n_ready_threads) != n_rw_threads + n_rz_threads) { | ||
5203 | + while (qatomic_read(&n_ready_threads) != n_rw_threads + n_rz_threads) { | ||
5204 | cpu_relax(); | ||
5205 | } | ||
5206 | |||
5207 | - atomic_set(&test_start, true); | ||
5208 | + qatomic_set(&test_start, true); | ||
5209 | g_usleep(duration * G_USEC_PER_SEC); | ||
5210 | - atomic_set(&test_stop, true); | ||
5211 | + qatomic_set(&test_stop, true); | ||
5212 | |||
5213 | for (i = 0; i < n_rw_threads; i++) { | ||
5214 | qemu_thread_join(&rw_threads[i]); | ||
5215 | diff --git a/tests/rcutorture.c b/tests/rcutorture.c | ||
5216 | index XXXXXXX..XXXXXXX 100644 | ||
5217 | --- a/tests/rcutorture.c | ||
5218 | +++ b/tests/rcutorture.c | ||
5219 | @@ -XXX,XX +XXX,XX @@ static void *rcu_read_perf_test(void *arg) | ||
5220 | rcu_register_thread(); | ||
5221 | |||
5222 | *(struct rcu_reader_data **)arg = &rcu_reader; | ||
5223 | - atomic_inc(&nthreadsrunning); | ||
5224 | + qatomic_inc(&nthreadsrunning); | ||
5225 | while (goflag == GOFLAG_INIT) { | ||
5226 | g_usleep(1000); | ||
5227 | } | ||
5228 | @@ -XXX,XX +XXX,XX @@ static void *rcu_update_perf_test(void *arg) | ||
5229 | rcu_register_thread(); | ||
5230 | |||
5231 | *(struct rcu_reader_data **)arg = &rcu_reader; | ||
5232 | - atomic_inc(&nthreadsrunning); | ||
5233 | + qatomic_inc(&nthreadsrunning); | ||
5234 | while (goflag == GOFLAG_INIT) { | ||
5235 | g_usleep(1000); | ||
5236 | } | ||
5237 | @@ -XXX,XX +XXX,XX @@ static void perftestinit(void) | ||
5238 | |||
5239 | static void perftestrun(int nthreads, int duration, int nreaders, int nupdaters) | ||
5240 | { | ||
5241 | - while (atomic_read(&nthreadsrunning) < nthreads) { | ||
5242 | + while (qatomic_read(&nthreadsrunning) < nthreads) { | ||
5243 | g_usleep(1000); | ||
5244 | } | ||
5245 | goflag = GOFLAG_RUN; | ||
5246 | @@ -XXX,XX +XXX,XX @@ static void *rcu_read_stress_test(void *arg) | ||
5247 | } | ||
5248 | while (goflag == GOFLAG_RUN) { | ||
5249 | rcu_read_lock(); | ||
5250 | - p = atomic_rcu_read(&rcu_stress_current); | ||
5251 | - if (atomic_read(&p->mbtest) == 0) { | ||
5252 | + p = qatomic_rcu_read(&rcu_stress_current); | ||
5253 | + if (qatomic_read(&p->mbtest) == 0) { | ||
5254 | n_mberror++; | ||
5255 | } | ||
5256 | rcu_read_lock(); | ||
5257 | @@ -XXX,XX +XXX,XX @@ static void *rcu_read_stress_test(void *arg) | ||
5258 | garbage++; | ||
5259 | } | ||
5260 | rcu_read_unlock(); | ||
5261 | - pc = atomic_read(&p->age); | ||
5262 | + pc = qatomic_read(&p->age); | ||
5263 | rcu_read_unlock(); | ||
5264 | if ((pc > RCU_STRESS_PIPE_LEN) || (pc < 0)) { | ||
5265 | pc = RCU_STRESS_PIPE_LEN; | ||
5266 | @@ -XXX,XX +XXX,XX @@ static void *rcu_read_stress_test(void *arg) | ||
5267 | static void *rcu_update_stress_test(void *arg) | ||
5268 | { | ||
5269 | int i, rcu_stress_idx = 0; | ||
5270 | - struct rcu_stress *cp = atomic_read(&rcu_stress_current); | ||
5271 | + struct rcu_stress *cp = qatomic_read(&rcu_stress_current); | ||
5272 | |||
5273 | rcu_register_thread(); | ||
5274 | *(struct rcu_reader_data **)arg = &rcu_reader; | ||
5275 | @@ -XXX,XX +XXX,XX @@ static void *rcu_update_stress_test(void *arg) | ||
5276 | p = &rcu_stress_array[rcu_stress_idx]; | ||
5277 | /* catching up with ourselves would be a bug */ | ||
5278 | assert(p != cp); | ||
5279 | - atomic_set(&p->mbtest, 0); | ||
5280 | + qatomic_set(&p->mbtest, 0); | ||
5281 | smp_mb(); | ||
5282 | - atomic_set(&p->age, 0); | ||
5283 | - atomic_set(&p->mbtest, 1); | ||
5284 | - atomic_rcu_set(&rcu_stress_current, p); | ||
5285 | + qatomic_set(&p->age, 0); | ||
5286 | + qatomic_set(&p->mbtest, 1); | ||
5287 | + qatomic_rcu_set(&rcu_stress_current, p); | ||
5288 | cp = p; | ||
5289 | /* | ||
5290 | * New RCU structure is now live, update pipe counts on old | ||
5291 | @@ -XXX,XX +XXX,XX @@ static void *rcu_update_stress_test(void *arg) | ||
5292 | */ | ||
5293 | for (i = 0; i < RCU_STRESS_PIPE_LEN; i++) { | ||
5294 | if (i != rcu_stress_idx) { | ||
5295 | - atomic_set(&rcu_stress_array[i].age, | ||
5296 | + qatomic_set(&rcu_stress_array[i].age, | ||
5297 | rcu_stress_array[i].age + 1); | ||
5298 | } | ||
5299 | } | ||
5300 | diff --git a/tests/test-aio-multithread.c b/tests/test-aio-multithread.c | ||
5301 | index XXXXXXX..XXXXXXX 100644 | ||
5302 | --- a/tests/test-aio-multithread.c | ||
5303 | +++ b/tests/test-aio-multithread.c | ||
5304 | @@ -XXX,XX +XXX,XX @@ static bool schedule_next(int n) | ||
5305 | { | ||
5306 | Coroutine *co; | ||
5307 | |||
5308 | - co = atomic_xchg(&to_schedule[n], NULL); | ||
5309 | + co = qatomic_xchg(&to_schedule[n], NULL); | ||
5310 | if (!co) { | ||
5311 | - atomic_inc(&count_retry); | ||
5312 | + qatomic_inc(&count_retry); | ||
5313 | return false; | ||
5314 | } | ||
5315 | |||
5316 | if (n == id) { | ||
5317 | - atomic_inc(&count_here); | ||
5318 | + qatomic_inc(&count_here); | ||
5319 | } else { | ||
5320 | - atomic_inc(&count_other); | ||
5321 | + qatomic_inc(&count_other); | ||
5322 | } | ||
5323 | |||
5324 | aio_co_schedule(ctx[n], co); | ||
5325 | @@ -XXX,XX +XXX,XX @@ static coroutine_fn void test_multi_co_schedule_entry(void *opaque) | ||
5326 | { | ||
5327 | g_assert(to_schedule[id] == NULL); | ||
5328 | |||
5329 | - while (!atomic_mb_read(&now_stopping)) { | ||
5330 | + while (!qatomic_mb_read(&now_stopping)) { | ||
5331 | int n; | ||
5332 | |||
5333 | n = g_test_rand_int_range(0, NUM_CONTEXTS); | ||
5334 | schedule_next(n); | ||
5335 | |||
5336 | - atomic_mb_set(&to_schedule[id], qemu_coroutine_self()); | ||
5337 | + qatomic_mb_set(&to_schedule[id], qemu_coroutine_self()); | ||
5338 | qemu_coroutine_yield(); | ||
5339 | g_assert(to_schedule[id] == NULL); | ||
5340 | } | ||
5341 | @@ -XXX,XX +XXX,XX @@ static void test_multi_co_schedule(int seconds) | ||
5342 | |||
5343 | g_usleep(seconds * 1000000); | ||
5344 | |||
5345 | - atomic_mb_set(&now_stopping, true); | ||
5346 | + qatomic_mb_set(&now_stopping, true); | ||
5347 | for (i = 0; i < NUM_CONTEXTS; i++) { | ||
5348 | ctx_run(i, finish_cb, NULL); | ||
5349 | to_schedule[i] = NULL; | ||
5350 | @@ -XXX,XX +XXX,XX @@ static CoMutex comutex; | ||
5351 | |||
5352 | static void coroutine_fn test_multi_co_mutex_entry(void *opaque) | ||
5353 | { | ||
5354 | - while (!atomic_mb_read(&now_stopping)) { | ||
5355 | + while (!qatomic_mb_read(&now_stopping)) { | ||
5356 | qemu_co_mutex_lock(&comutex); | ||
5357 | counter++; | ||
5358 | qemu_co_mutex_unlock(&comutex); | ||
5359 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn test_multi_co_mutex_entry(void *opaque) | ||
5360 | * exits before the coroutine is woken up, causing a spurious | ||
5361 | * assertion failure. | ||
5362 | */ | ||
5363 | - atomic_inc(&atomic_counter); | ||
5364 | + qatomic_inc(&atomic_counter); | ||
5365 | } | ||
5366 | - atomic_dec(&running); | ||
5367 | + qatomic_dec(&running); | ||
5368 | } | ||
5369 | |||
5370 | static void test_multi_co_mutex(int threads, int seconds) | ||
5371 | @@ -XXX,XX +XXX,XX @@ static void test_multi_co_mutex(int threads, int seconds) | ||
5372 | |||
5373 | g_usleep(seconds * 1000000); | ||
5374 | |||
5375 | - atomic_mb_set(&now_stopping, true); | ||
5376 | + qatomic_mb_set(&now_stopping, true); | ||
5377 | while (running > 0) { | ||
5378 | g_usleep(100000); | ||
5379 | } | ||
5380 | @@ -XXX,XX +XXX,XX @@ static void mcs_mutex_lock(void) | ||
5381 | |||
5382 | nodes[id].next = -1; | ||
5383 | nodes[id].locked = 1; | ||
5384 | - prev = atomic_xchg(&mutex_head, id); | ||
5385 | + prev = qatomic_xchg(&mutex_head, id); | ||
5386 | if (prev != -1) { | ||
5387 | - atomic_set(&nodes[prev].next, id); | ||
5388 | + qatomic_set(&nodes[prev].next, id); | ||
5389 | qemu_futex_wait(&nodes[id].locked, 1); | ||
5390 | } | ||
5391 | } | ||
5392 | @@ -XXX,XX +XXX,XX @@ static void mcs_mutex_lock(void) | ||
5393 | static void mcs_mutex_unlock(void) | ||
5394 | { | ||
5395 | int next; | ||
5396 | - if (atomic_read(&nodes[id].next) == -1) { | ||
5397 | - if (atomic_read(&mutex_head) == id && | ||
5398 | - atomic_cmpxchg(&mutex_head, id, -1) == id) { | ||
5399 | + if (qatomic_read(&nodes[id].next) == -1) { | ||
5400 | + if (qatomic_read(&mutex_head) == id && | ||
5401 | + qatomic_cmpxchg(&mutex_head, id, -1) == id) { | ||
5402 | /* Last item in the list, exit. */ | ||
5403 | return; | ||
5404 | } | ||
5405 | - while (atomic_read(&nodes[id].next) == -1) { | ||
5406 | + while (qatomic_read(&nodes[id].next) == -1) { | ||
5407 | /* mcs_mutex_lock did the xchg, but has not updated | ||
5408 | * nodes[prev].next yet. | ||
5409 | */ | ||
5410 | @@ -XXX,XX +XXX,XX @@ static void mcs_mutex_unlock(void) | ||
5411 | } | ||
5412 | |||
5413 | /* Wake up the next in line. */ | ||
5414 | - next = atomic_read(&nodes[id].next); | ||
5415 | + next = qatomic_read(&nodes[id].next); | ||
5416 | nodes[next].locked = 0; | ||
5417 | qemu_futex_wake(&nodes[next].locked, 1); | ||
5418 | } | ||
5419 | |||
5420 | static void test_multi_fair_mutex_entry(void *opaque) | ||
5421 | { | ||
5422 | - while (!atomic_mb_read(&now_stopping)) { | ||
5423 | + while (!qatomic_mb_read(&now_stopping)) { | ||
5424 | mcs_mutex_lock(); | ||
5425 | counter++; | ||
5426 | mcs_mutex_unlock(); | ||
5427 | - atomic_inc(&atomic_counter); | ||
5428 | + qatomic_inc(&atomic_counter); | ||
5429 | } | ||
5430 | - atomic_dec(&running); | ||
5431 | + qatomic_dec(&running); | ||
5432 | } | ||
5433 | |||
5434 | static void test_multi_fair_mutex(int threads, int seconds) | ||
5435 | @@ -XXX,XX +XXX,XX @@ static void test_multi_fair_mutex(int threads, int seconds) | ||
5436 | |||
5437 | g_usleep(seconds * 1000000); | ||
5438 | |||
5439 | - atomic_mb_set(&now_stopping, true); | ||
5440 | + qatomic_mb_set(&now_stopping, true); | ||
5441 | while (running > 0) { | ||
5442 | g_usleep(100000); | ||
5443 | } | ||
5444 | @@ -XXX,XX +XXX,XX @@ static QemuMutex mutex; | ||
5445 | |||
5446 | static void test_multi_mutex_entry(void *opaque) | ||
5447 | { | ||
5448 | - while (!atomic_mb_read(&now_stopping)) { | ||
5449 | + while (!qatomic_mb_read(&now_stopping)) { | ||
5450 | qemu_mutex_lock(&mutex); | ||
5451 | counter++; | ||
5452 | qemu_mutex_unlock(&mutex); | ||
5453 | - atomic_inc(&atomic_counter); | ||
5454 | + qatomic_inc(&atomic_counter); | ||
5455 | } | ||
5456 | - atomic_dec(&running); | ||
5457 | + qatomic_dec(&running); | ||
5458 | } | ||
5459 | |||
5460 | static void test_multi_mutex(int threads, int seconds) | ||
5461 | @@ -XXX,XX +XXX,XX @@ static void test_multi_mutex(int threads, int seconds) | ||
5462 | |||
5463 | g_usleep(seconds * 1000000); | ||
5464 | |||
5465 | - atomic_mb_set(&now_stopping, true); | ||
5466 | + qatomic_mb_set(&now_stopping, true); | ||
5467 | while (running > 0) { | ||
5468 | g_usleep(100000); | ||
5469 | } | ||
5470 | diff --git a/tests/test-logging.c b/tests/test-logging.c | ||
5471 | index XXXXXXX..XXXXXXX 100644 | ||
5472 | --- a/tests/test-logging.c | ||
5473 | +++ b/tests/test-logging.c | ||
5474 | @@ -XXX,XX +XXX,XX @@ static void test_logfile_write(gconstpointer data) | ||
5475 | */ | ||
5476 | qemu_set_log_filename(file_path, &error_abort); | ||
5477 | rcu_read_lock(); | ||
5478 | - logfile = atomic_rcu_read(&qemu_logfile); | ||
5479 | + logfile = qatomic_rcu_read(&qemu_logfile); | ||
5480 | orig_fd = logfile->fd; | ||
5481 | g_assert(logfile && logfile->fd); | ||
5482 | fprintf(logfile->fd, "%s 1st write to file\n", __func__); | ||
5483 | @@ -XXX,XX +XXX,XX @@ static void test_logfile_write(gconstpointer data) | ||
5484 | |||
5485 | /* Change the logfile and ensure that the handle is still valid. */ | ||
5486 | qemu_set_log_filename(file_path1, &error_abort); | ||
5487 | - logfile2 = atomic_rcu_read(&qemu_logfile); | ||
5488 | + logfile2 = qatomic_rcu_read(&qemu_logfile); | ||
5489 | g_assert(logfile->fd == orig_fd); | ||
5490 | g_assert(logfile2->fd != logfile->fd); | ||
5491 | fprintf(logfile->fd, "%s 2nd write to file\n", __func__); | ||
5492 | diff --git a/tests/test-rcu-list.c b/tests/test-rcu-list.c | ||
5493 | index XXXXXXX..XXXXXXX 100644 | ||
5494 | --- a/tests/test-rcu-list.c | ||
5495 | +++ b/tests/test-rcu-list.c | ||
5496 | @@ -XXX,XX +XXX,XX @@ static void reclaim_list_el(struct rcu_head *prcu) | ||
5497 | struct list_element *el = container_of(prcu, struct list_element, rcu); | ||
5498 | g_free(el); | ||
5499 | /* Accessed only from call_rcu thread. */ | ||
5500 | - atomic_set_i64(&n_reclaims, n_reclaims + 1); | ||
5501 | + qatomic_set_i64(&n_reclaims, n_reclaims + 1); | ||
5502 | } | ||
5503 | |||
5504 | #if TEST_LIST_TYPE == 1 | ||
5505 | @@ -XXX,XX +XXX,XX @@ static void *rcu_q_reader(void *arg) | ||
5506 | rcu_register_thread(); | ||
5507 | |||
5508 | *(struct rcu_reader_data **)arg = &rcu_reader; | ||
5509 | - atomic_inc(&nthreadsrunning); | ||
5510 | - while (atomic_read(&goflag) == GOFLAG_INIT) { | ||
5511 | + qatomic_inc(&nthreadsrunning); | ||
5512 | + while (qatomic_read(&goflag) == GOFLAG_INIT) { | ||
5513 | g_usleep(1000); | ||
5514 | } | ||
5515 | |||
5516 | - while (atomic_read(&goflag) == GOFLAG_RUN) { | ||
5517 | + while (qatomic_read(&goflag) == GOFLAG_RUN) { | ||
5518 | rcu_read_lock(); | ||
5519 | TEST_LIST_FOREACH_RCU(el, &Q_list_head, entry) { | ||
5520 | n_reads_local++; | ||
5521 | - if (atomic_read(&goflag) == GOFLAG_STOP) { | ||
5522 | + if (qatomic_read(&goflag) == GOFLAG_STOP) { | ||
5523 | break; | ||
5524 | } | ||
5525 | } | ||
5526 | @@ -XXX,XX +XXX,XX @@ static void *rcu_q_updater(void *arg) | ||
5527 | struct list_element *el, *prev_el; | ||
5528 | |||
5529 | *(struct rcu_reader_data **)arg = &rcu_reader; | ||
5530 | - atomic_inc(&nthreadsrunning); | ||
5531 | - while (atomic_read(&goflag) == GOFLAG_INIT) { | ||
5532 | + qatomic_inc(&nthreadsrunning); | ||
5533 | + while (qatomic_read(&goflag) == GOFLAG_INIT) { | ||
5534 | g_usleep(1000); | ||
5535 | } | ||
5536 | |||
5537 | - while (atomic_read(&goflag) == GOFLAG_RUN) { | ||
5538 | + while (qatomic_read(&goflag) == GOFLAG_RUN) { | ||
5539 | target_el = select_random_el(RCU_Q_LEN); | ||
5540 | j = 0; | ||
5541 | /* FOREACH_RCU could work here but let's use both macros */ | ||
5542 | @@ -XXX,XX +XXX,XX @@ static void *rcu_q_updater(void *arg) | ||
5543 | break; | ||
5544 | } | ||
5545 | } | ||
5546 | - if (atomic_read(&goflag) == GOFLAG_STOP) { | ||
5547 | + if (qatomic_read(&goflag) == GOFLAG_STOP) { | ||
5548 | break; | ||
5549 | } | ||
5550 | target_el = select_random_el(RCU_Q_LEN); | ||
5551 | @@ -XXX,XX +XXX,XX @@ static void *rcu_q_updater(void *arg) | ||
5552 | qemu_mutex_lock(&counts_mutex); | ||
5553 | n_nodes += n_nodes_local; | ||
5554 | n_updates += n_updates_local; | ||
5555 | - atomic_set_i64(&n_nodes_removed, n_nodes_removed + n_removed_local); | ||
5556 | + qatomic_set_i64(&n_nodes_removed, n_nodes_removed + n_removed_local); | ||
5557 | qemu_mutex_unlock(&counts_mutex); | ||
5558 | return NULL; | ||
5559 | } | ||
5560 | @@ -XXX,XX +XXX,XX @@ static void rcu_qtest_init(void) | ||
5561 | static void rcu_qtest_run(int duration, int nreaders) | ||
5562 | { | ||
5563 | int nthreads = nreaders + 1; | ||
5564 | - while (atomic_read(&nthreadsrunning) < nthreads) { | ||
5565 | + while (qatomic_read(&nthreadsrunning) < nthreads) { | ||
5566 | g_usleep(1000); | ||
5567 | } | ||
5568 | |||
5569 | - atomic_set(&goflag, GOFLAG_RUN); | ||
5570 | + qatomic_set(&goflag, GOFLAG_RUN); | ||
5571 | sleep(duration); | ||
5572 | - atomic_set(&goflag, GOFLAG_STOP); | ||
5573 | + qatomic_set(&goflag, GOFLAG_STOP); | ||
5574 | wait_all_threads(); | ||
5575 | } | ||
5576 | |||
5577 | @@ -XXX,XX +XXX,XX @@ static void rcu_qtest(const char *test, int duration, int nreaders) | ||
5578 | n_removed_local++; | ||
5579 | } | ||
5580 | qemu_mutex_lock(&counts_mutex); | ||
5581 | - atomic_set_i64(&n_nodes_removed, n_nodes_removed + n_removed_local); | ||
5582 | + qatomic_set_i64(&n_nodes_removed, n_nodes_removed + n_removed_local); | ||
5583 | qemu_mutex_unlock(&counts_mutex); | ||
5584 | synchronize_rcu(); | ||
5585 | - while (atomic_read_i64(&n_nodes_removed) > atomic_read_i64(&n_reclaims)) { | ||
5586 | + while (qatomic_read_i64(&n_nodes_removed) > | ||
5587 | + qatomic_read_i64(&n_reclaims)) { | ||
5588 | g_usleep(100); | ||
5589 | synchronize_rcu(); | ||
5590 | } | ||
5591 | if (g_test_in_charge) { | ||
5592 | - g_assert_cmpint(atomic_read_i64(&n_nodes_removed), ==, | ||
5593 | - atomic_read_i64(&n_reclaims)); | ||
5594 | + g_assert_cmpint(qatomic_read_i64(&n_nodes_removed), ==, | ||
5595 | + qatomic_read_i64(&n_reclaims)); | ||
5596 | } else { | ||
5597 | printf("%s: %d readers; 1 updater; nodes read: " \ | ||
5598 | "%lld, nodes removed: %"PRIi64"; nodes reclaimed: %"PRIi64"\n", | ||
5599 | test, nthreadsrunning - 1, n_reads, | ||
5600 | - atomic_read_i64(&n_nodes_removed), atomic_read_i64(&n_reclaims)); | ||
5601 | + qatomic_read_i64(&n_nodes_removed), | ||
5602 | + qatomic_read_i64(&n_reclaims)); | ||
5603 | exit(0); | ||
5604 | } | ||
5605 | } | ||
5606 | diff --git a/tests/test-thread-pool.c b/tests/test-thread-pool.c | ||
5607 | index XXXXXXX..XXXXXXX 100644 | ||
5608 | --- a/tests/test-thread-pool.c | ||
5609 | +++ b/tests/test-thread-pool.c | ||
5610 | @@ -XXX,XX +XXX,XX @@ typedef struct { | ||
5611 | static int worker_cb(void *opaque) | ||
5612 | { | ||
5613 | WorkerTestData *data = opaque; | ||
5614 | - return atomic_fetch_inc(&data->n); | ||
5615 | + return qatomic_fetch_inc(&data->n); | ||
5616 | } | ||
5617 | |||
5618 | static int long_cb(void *opaque) | ||
5619 | { | ||
5620 | WorkerTestData *data = opaque; | ||
5621 | - if (atomic_cmpxchg(&data->n, 0, 1) == 0) { | ||
5622 | + if (qatomic_cmpxchg(&data->n, 0, 1) == 0) { | ||
5623 | g_usleep(2000000); | ||
5624 | - atomic_or(&data->n, 2); | ||
5625 | + qatomic_or(&data->n, 2); | ||
5626 | } | ||
5627 | return 0; | ||
5628 | } | ||
5629 | @@ -XXX,XX +XXX,XX @@ static void do_test_cancel(bool sync) | ||
5630 | /* Cancel the jobs that haven't been started yet. */ | ||
5631 | num_canceled = 0; | ||
5632 | for (i = 0; i < 100; i++) { | ||
5633 | - if (atomic_cmpxchg(&data[i].n, 0, 4) == 0) { | ||
5634 | + if (qatomic_cmpxchg(&data[i].n, 0, 4) == 0) { | ||
5635 | data[i].ret = -ECANCELED; | ||
5636 | if (sync) { | ||
5637 | bdrv_aio_cancel(data[i].aiocb); | ||
5638 | @@ -XXX,XX +XXX,XX @@ static void do_test_cancel(bool sync) | ||
5639 | g_assert_cmpint(num_canceled, <, 100); | ||
5640 | |||
5641 | for (i = 0; i < 100; i++) { | ||
5642 | - if (data[i].aiocb && atomic_read(&data[i].n) < 4) { | ||
5643 | + if (data[i].aiocb && qatomic_read(&data[i].n) < 4) { | ||
5644 | if (sync) { | ||
5645 | /* Canceling the others will be a blocking operation. */ | ||
5646 | bdrv_aio_cancel(data[i].aiocb); | ||
5647 | diff --git a/util/aio-posix.c b/util/aio-posix.c | ||
5648 | index XXXXXXX..XXXXXXX 100644 | ||
5649 | --- a/util/aio-posix.c | ||
5650 | +++ b/util/aio-posix.c | ||
5651 | @@ -XXX,XX +XXX,XX @@ | ||
5652 | |||
5653 | bool aio_poll_disabled(AioContext *ctx) | ||
5654 | { | ||
5655 | - return atomic_read(&ctx->poll_disable_cnt); | ||
5656 | + return qatomic_read(&ctx->poll_disable_cnt); | ||
5657 | } | ||
5658 | |||
5659 | void aio_add_ready_handler(AioHandlerList *ready_list, | ||
5660 | @@ -XXX,XX +XXX,XX @@ void aio_set_fd_handler(AioContext *ctx, | ||
5661 | * Changing handlers is a rare event, and a little wasted polling until | ||
5662 | * the aio_notify below is not an issue. | ||
5663 | */ | ||
5664 | - atomic_set(&ctx->poll_disable_cnt, | ||
5665 | - atomic_read(&ctx->poll_disable_cnt) + poll_disable_change); | ||
5666 | + qatomic_set(&ctx->poll_disable_cnt, | ||
5667 | + qatomic_read(&ctx->poll_disable_cnt) + poll_disable_change); | ||
5668 | |||
5669 | ctx->fdmon_ops->update(ctx, node, new_node); | ||
5670 | if (node) { | ||
5671 | @@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking) | ||
5672 | */ | ||
5673 | use_notify_me = timeout != 0; | ||
5674 | if (use_notify_me) { | ||
5675 | - atomic_set(&ctx->notify_me, atomic_read(&ctx->notify_me) + 2); | ||
5676 | + qatomic_set(&ctx->notify_me, qatomic_read(&ctx->notify_me) + 2); | ||
5677 | /* | ||
5678 | * Write ctx->notify_me before reading ctx->notified. Pairs with | ||
5679 | * smp_mb in aio_notify(). | ||
5680 | @@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking) | ||
5681 | smp_mb(); | ||
5682 | |||
5683 | /* Don't block if aio_notify() was called */ | ||
5684 | - if (atomic_read(&ctx->notified)) { | ||
5685 | + if (qatomic_read(&ctx->notified)) { | ||
5686 | timeout = 0; | ||
5687 | } | ||
5688 | } | ||
5689 | @@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking) | ||
5690 | |||
5691 | if (use_notify_me) { | ||
5692 | /* Finish the poll before clearing the flag. */ | ||
5693 | - atomic_store_release(&ctx->notify_me, | ||
5694 | - atomic_read(&ctx->notify_me) - 2); | ||
5695 | + qatomic_store_release(&ctx->notify_me, | ||
5696 | + qatomic_read(&ctx->notify_me) - 2); | ||
5697 | } | ||
5698 | |||
5699 | aio_notify_accept(ctx); | ||
5700 | diff --git a/util/aio-wait.c b/util/aio-wait.c | ||
5701 | index XXXXXXX..XXXXXXX 100644 | ||
5702 | --- a/util/aio-wait.c | ||
5703 | +++ b/util/aio-wait.c | ||
5704 | @@ -XXX,XX +XXX,XX @@ static void dummy_bh_cb(void *opaque) | ||
5705 | void aio_wait_kick(void) | ||
5706 | { | ||
5707 | /* The barrier (or an atomic op) is in the caller. */ | ||
5708 | - if (atomic_read(&global_aio_wait.num_waiters)) { | ||
5709 | + if (qatomic_read(&global_aio_wait.num_waiters)) { | ||
5710 | aio_bh_schedule_oneshot(qemu_get_aio_context(), dummy_bh_cb, NULL); | ||
5711 | } | ||
5712 | } | ||
5713 | diff --git a/util/aio-win32.c b/util/aio-win32.c | ||
5714 | index XXXXXXX..XXXXXXX 100644 | ||
5715 | --- a/util/aio-win32.c | ||
5716 | +++ b/util/aio-win32.c | ||
5717 | @@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking) | ||
5718 | * so disable the optimization now. | ||
5719 | */ | ||
5720 | if (blocking) { | ||
5721 | - atomic_set(&ctx->notify_me, atomic_read(&ctx->notify_me) + 2); | ||
5722 | + qatomic_set(&ctx->notify_me, qatomic_read(&ctx->notify_me) + 2); | ||
5723 | /* | ||
5724 | * Write ctx->notify_me before computing the timeout | ||
5725 | * (reading bottom half flags, etc.). Pairs with | ||
5726 | @@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking) | ||
5727 | ret = WaitForMultipleObjects(count, events, FALSE, timeout); | ||
5728 | if (blocking) { | ||
5729 | assert(first); | ||
5730 | - atomic_store_release(&ctx->notify_me, atomic_read(&ctx->notify_me) - 2); | ||
5731 | + qatomic_store_release(&ctx->notify_me, | ||
5732 | + qatomic_read(&ctx->notify_me) - 2); | ||
5733 | aio_notify_accept(ctx); | ||
5734 | } | ||
5735 | |||
5736 | diff --git a/util/async.c b/util/async.c | ||
5737 | index XXXXXXX..XXXXXXX 100644 | ||
5738 | --- a/util/async.c | ||
5739 | +++ b/util/async.c | ||
5740 | @@ -XXX,XX +XXX,XX @@ static void aio_bh_enqueue(QEMUBH *bh, unsigned new_flags) | ||
5741 | unsigned old_flags; | ||
5742 | |||
5743 | /* | ||
5744 | - * The memory barrier implicit in atomic_fetch_or makes sure that: | ||
5745 | + * The memory barrier implicit in qatomic_fetch_or makes sure that: | ||
5746 | * 1. idle & any writes needed by the callback are done before the | ||
5747 | * locations are read in the aio_bh_poll. | ||
5748 | * 2. ctx is loaded before the callback has a chance to execute and bh | ||
5749 | * could be freed. | ||
5750 | */ | ||
5751 | - old_flags = atomic_fetch_or(&bh->flags, BH_PENDING | new_flags); | ||
5752 | + old_flags = qatomic_fetch_or(&bh->flags, BH_PENDING | new_flags); | ||
5753 | if (!(old_flags & BH_PENDING)) { | ||
5754 | QSLIST_INSERT_HEAD_ATOMIC(&ctx->bh_list, bh, next); | ||
5755 | } | ||
5756 | @@ -XXX,XX +XXX,XX @@ static QEMUBH *aio_bh_dequeue(BHList *head, unsigned *flags) | ||
5757 | QSLIST_REMOVE_HEAD(head, next); | ||
5758 | |||
5759 | /* | ||
5760 | - * The atomic_and is paired with aio_bh_enqueue(). The implicit memory | ||
5761 | + * The qatomic_and is paired with aio_bh_enqueue(). The implicit memory | ||
5762 | * barrier ensures that the callback sees all writes done by the scheduling | ||
5763 | * thread. It also ensures that the scheduling thread sees the cleared | ||
5764 | * flag before bh->cb has run, and thus will call aio_notify again if | ||
5765 | * necessary. | ||
5766 | */ | ||
5767 | - *flags = atomic_fetch_and(&bh->flags, | ||
5768 | + *flags = qatomic_fetch_and(&bh->flags, | ||
5769 | ~(BH_PENDING | BH_SCHEDULED | BH_IDLE)); | ||
5770 | return bh; | ||
5771 | } | ||
5772 | @@ -XXX,XX +XXX,XX @@ void qemu_bh_schedule(QEMUBH *bh) | ||
5773 | */ | ||
5774 | void qemu_bh_cancel(QEMUBH *bh) | ||
5775 | { | ||
5776 | - atomic_and(&bh->flags, ~BH_SCHEDULED); | ||
5777 | + qatomic_and(&bh->flags, ~BH_SCHEDULED); | ||
5778 | } | ||
5779 | |||
5780 | /* This func is async.The bottom half will do the delete action at the finial | ||
5781 | @@ -XXX,XX +XXX,XX @@ aio_ctx_prepare(GSource *source, gint *timeout) | ||
5782 | { | ||
5783 | AioContext *ctx = (AioContext *) source; | ||
5784 | |||
5785 | - atomic_set(&ctx->notify_me, atomic_read(&ctx->notify_me) | 1); | ||
5786 | + qatomic_set(&ctx->notify_me, qatomic_read(&ctx->notify_me) | 1); | ||
5787 | |||
5788 | /* | ||
5789 | * Write ctx->notify_me before computing the timeout | ||
5790 | @@ -XXX,XX +XXX,XX @@ aio_ctx_check(GSource *source) | ||
5791 | BHListSlice *s; | ||
5792 | |||
5793 | /* Finish computing the timeout before clearing the flag. */ | ||
5794 | - atomic_store_release(&ctx->notify_me, atomic_read(&ctx->notify_me) & ~1); | ||
5795 | + qatomic_store_release(&ctx->notify_me, qatomic_read(&ctx->notify_me) & ~1); | ||
5796 | aio_notify_accept(ctx); | ||
5797 | |||
5798 | QSLIST_FOREACH_RCU(bh, &ctx->bh_list, next) { | ||
5799 | @@ -XXX,XX +XXX,XX @@ void aio_notify(AioContext *ctx) | ||
5800 | * aio_notify_accept. | ||
5801 | */ | ||
5802 | smp_wmb(); | ||
5803 | - atomic_set(&ctx->notified, true); | ||
5804 | + qatomic_set(&ctx->notified, true); | ||
5805 | |||
5806 | /* | ||
5807 | * Write ctx->notified before reading ctx->notify_me. Pairs | ||
5808 | * with smp_mb in aio_ctx_prepare or aio_poll. | ||
5809 | */ | ||
5810 | smp_mb(); | ||
5811 | - if (atomic_read(&ctx->notify_me)) { | ||
5812 | + if (qatomic_read(&ctx->notify_me)) { | ||
5813 | event_notifier_set(&ctx->notifier); | ||
5814 | } | ||
5815 | } | ||
5816 | |||
5817 | void aio_notify_accept(AioContext *ctx) | ||
5818 | { | ||
5819 | - atomic_set(&ctx->notified, false); | ||
5820 | + qatomic_set(&ctx->notified, false); | ||
5821 | |||
5822 | /* | ||
5823 | * Write ctx->notified before reading e.g. bh->flags. Pairs with smp_wmb | ||
5824 | @@ -XXX,XX +XXX,XX @@ static bool aio_context_notifier_poll(void *opaque) | ||
5825 | EventNotifier *e = opaque; | ||
5826 | AioContext *ctx = container_of(e, AioContext, notifier); | ||
5827 | |||
5828 | - return atomic_read(&ctx->notified); | ||
5829 | + return qatomic_read(&ctx->notified); | ||
5830 | } | ||
5831 | |||
5832 | static void co_schedule_bh_cb(void *opaque) | ||
5833 | @@ -XXX,XX +XXX,XX @@ static void co_schedule_bh_cb(void *opaque) | ||
5834 | aio_context_acquire(ctx); | ||
5835 | |||
5836 | /* Protected by write barrier in qemu_aio_coroutine_enter */ | ||
5837 | - atomic_set(&co->scheduled, NULL); | ||
5838 | + qatomic_set(&co->scheduled, NULL); | ||
5839 | qemu_aio_coroutine_enter(ctx, co); | ||
5840 | aio_context_release(ctx); | ||
5841 | } | ||
5842 | @@ -XXX,XX +XXX,XX @@ fail: | ||
5843 | void aio_co_schedule(AioContext *ctx, Coroutine *co) | ||
5844 | { | ||
5845 | trace_aio_co_schedule(ctx, co); | ||
5846 | - const char *scheduled = atomic_cmpxchg(&co->scheduled, NULL, | ||
5847 | + const char *scheduled = qatomic_cmpxchg(&co->scheduled, NULL, | ||
5848 | __func__); | ||
5849 | |||
5850 | if (scheduled) { | ||
5851 | @@ -XXX,XX +XXX,XX @@ void aio_co_wake(struct Coroutine *co) | ||
5852 | * qemu_coroutine_enter. | ||
5853 | */ | ||
5854 | smp_read_barrier_depends(); | ||
5855 | - ctx = atomic_read(&co->ctx); | ||
5856 | + ctx = qatomic_read(&co->ctx); | ||
5857 | |||
5858 | aio_co_enter(ctx, co); | ||
5859 | } | ||
5860 | diff --git a/util/atomic64.c b/util/atomic64.c | ||
5861 | index XXXXXXX..XXXXXXX 100644 | ||
5862 | --- a/util/atomic64.c | ||
5863 | +++ b/util/atomic64.c | ||
5864 | @@ -XXX,XX +XXX,XX @@ static QemuSpin *addr_to_lock(const void *addr) | ||
5865 | return ret; \ | ||
5866 | } | ||
5867 | |||
5868 | -GEN_READ(atomic_read_i64, int64_t) | ||
5869 | -GEN_READ(atomic_read_u64, uint64_t) | ||
5870 | +GEN_READ(qatomic_read_i64, int64_t) | ||
5871 | +GEN_READ(qatomic_read_u64, uint64_t) | ||
5872 | #undef GEN_READ | ||
5873 | |||
5874 | #define GEN_SET(name, type) \ | ||
5875 | @@ -XXX,XX +XXX,XX @@ GEN_READ(atomic_read_u64, uint64_t) | ||
5876 | qemu_spin_unlock(lock); \ | ||
5877 | } | ||
5878 | |||
5879 | -GEN_SET(atomic_set_i64, int64_t) | ||
5880 | -GEN_SET(atomic_set_u64, uint64_t) | ||
5881 | +GEN_SET(qatomic_set_i64, int64_t) | ||
5882 | +GEN_SET(qatomic_set_u64, uint64_t) | ||
5883 | #undef GEN_SET | ||
5884 | |||
5885 | -void atomic64_init(void) | ||
5886 | +void qatomic64_init(void) | ||
5887 | { | ||
5888 | int i; | ||
5889 | |||
5890 | diff --git a/util/bitmap.c b/util/bitmap.c | ||
5891 | index XXXXXXX..XXXXXXX 100644 | ||
5892 | --- a/util/bitmap.c | ||
5893 | +++ b/util/bitmap.c | ||
5894 | @@ -XXX,XX +XXX,XX @@ void bitmap_set_atomic(unsigned long *map, long start, long nr) | ||
5895 | |||
5896 | /* First word */ | ||
5897 | if (nr - bits_to_set > 0) { | ||
5898 | - atomic_or(p, mask_to_set); | ||
5899 | + qatomic_or(p, mask_to_set); | ||
5900 | nr -= bits_to_set; | ||
5901 | bits_to_set = BITS_PER_LONG; | ||
5902 | mask_to_set = ~0UL; | ||
5903 | @@ -XXX,XX +XXX,XX @@ void bitmap_set_atomic(unsigned long *map, long start, long nr) | ||
5904 | /* Last word */ | ||
5905 | if (nr) { | ||
5906 | mask_to_set &= BITMAP_LAST_WORD_MASK(size); | ||
5907 | - atomic_or(p, mask_to_set); | ||
5908 | + qatomic_or(p, mask_to_set); | ||
5909 | } else { | ||
5910 | - /* If we avoided the full barrier in atomic_or(), issue a | ||
5911 | + /* If we avoided the full barrier in qatomic_or(), issue a | ||
5912 | * barrier to account for the assignments in the while loop. | ||
5913 | */ | ||
5914 | smp_mb(); | ||
5915 | @@ -XXX,XX +XXX,XX @@ bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr) | ||
5916 | |||
5917 | /* First word */ | ||
5918 | if (nr - bits_to_clear > 0) { | ||
5919 | - old_bits = atomic_fetch_and(p, ~mask_to_clear); | ||
5920 | + old_bits = qatomic_fetch_and(p, ~mask_to_clear); | ||
5921 | dirty |= old_bits & mask_to_clear; | ||
5922 | nr -= bits_to_clear; | ||
5923 | bits_to_clear = BITS_PER_LONG; | ||
5924 | @@ -XXX,XX +XXX,XX @@ bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr) | ||
5925 | if (bits_to_clear == BITS_PER_LONG) { | ||
5926 | while (nr >= BITS_PER_LONG) { | ||
5927 | if (*p) { | ||
5928 | - old_bits = atomic_xchg(p, 0); | ||
5929 | + old_bits = qatomic_xchg(p, 0); | ||
5930 | dirty |= old_bits; | ||
5931 | } | ||
5932 | nr -= BITS_PER_LONG; | ||
5933 | @@ -XXX,XX +XXX,XX @@ bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr) | ||
5934 | /* Last word */ | ||
5935 | if (nr) { | ||
5936 | mask_to_clear &= BITMAP_LAST_WORD_MASK(size); | ||
5937 | - old_bits = atomic_fetch_and(p, ~mask_to_clear); | ||
5938 | + old_bits = qatomic_fetch_and(p, ~mask_to_clear); | ||
5939 | dirty |= old_bits & mask_to_clear; | ||
5940 | } else { | ||
5941 | if (!dirty) { | ||
5942 | @@ -XXX,XX +XXX,XX @@ void bitmap_copy_and_clear_atomic(unsigned long *dst, unsigned long *src, | ||
5943 | long nr) | ||
5944 | { | ||
5945 | while (nr > 0) { | ||
5946 | - *dst = atomic_xchg(src, 0); | ||
5947 | + *dst = qatomic_xchg(src, 0); | ||
5948 | dst++; | ||
5949 | src++; | ||
5950 | nr -= BITS_PER_LONG; | ||
5951 | diff --git a/util/cacheinfo.c b/util/cacheinfo.c | ||
5952 | index XXXXXXX..XXXXXXX 100644 | ||
5953 | --- a/util/cacheinfo.c | ||
5954 | +++ b/util/cacheinfo.c | ||
5955 | @@ -XXX,XX +XXX,XX @@ static void __attribute__((constructor)) init_cache_info(void) | ||
5956 | qemu_dcache_linesize = dsize; | ||
5957 | qemu_dcache_linesize_log = ctz32(dsize); | ||
5958 | |||
5959 | - atomic64_init(); | ||
5960 | + qatomic64_init(); | ||
5961 | } | ||
5962 | diff --git a/util/fdmon-epoll.c b/util/fdmon-epoll.c | ||
5963 | index XXXXXXX..XXXXXXX 100644 | ||
5964 | --- a/util/fdmon-epoll.c | ||
5965 | +++ b/util/fdmon-epoll.c | ||
5966 | @@ -XXX,XX +XXX,XX @@ static int fdmon_epoll_wait(AioContext *ctx, AioHandlerList *ready_list, | ||
5967 | struct epoll_event events[128]; | ||
5968 | |||
5969 | /* Fall back while external clients are disabled */ | ||
5970 | - if (atomic_read(&ctx->external_disable_cnt)) { | ||
5971 | + if (qatomic_read(&ctx->external_disable_cnt)) { | ||
5972 | return fdmon_poll_ops.wait(ctx, ready_list, timeout); | ||
5973 | } | ||
5974 | |||
5975 | @@ -XXX,XX +XXX,XX @@ bool fdmon_epoll_try_upgrade(AioContext *ctx, unsigned npfd) | ||
5976 | } | ||
5977 | |||
5978 | /* Do not upgrade while external clients are disabled */ | ||
5979 | - if (atomic_read(&ctx->external_disable_cnt)) { | ||
5980 | + if (qatomic_read(&ctx->external_disable_cnt)) { | ||
5981 | return false; | ||
5982 | } | ||
5983 | |||
5984 | diff --git a/util/fdmon-io_uring.c b/util/fdmon-io_uring.c | ||
5985 | index XXXXXXX..XXXXXXX 100644 | ||
5986 | --- a/util/fdmon-io_uring.c | ||
5987 | +++ b/util/fdmon-io_uring.c | ||
5988 | @@ -XXX,XX +XXX,XX @@ static void enqueue(AioHandlerSList *head, AioHandler *node, unsigned flags) | ||
5989 | { | ||
5990 | unsigned old_flags; | ||
5991 | |||
5992 | - old_flags = atomic_fetch_or(&node->flags, FDMON_IO_URING_PENDING | flags); | ||
5993 | + old_flags = qatomic_fetch_or(&node->flags, FDMON_IO_URING_PENDING | flags); | ||
5994 | if (!(old_flags & FDMON_IO_URING_PENDING)) { | ||
5995 | QSLIST_INSERT_HEAD_ATOMIC(head, node, node_submitted); | ||
5996 | } | ||
5997 | @@ -XXX,XX +XXX,XX @@ static AioHandler *dequeue(AioHandlerSList *head, unsigned *flags) | ||
5998 | * telling process_cqe() to delete the AioHandler when its | ||
5999 | * IORING_OP_POLL_ADD completes. | ||
6000 | */ | ||
6001 | - *flags = atomic_fetch_and(&node->flags, ~(FDMON_IO_URING_PENDING | | ||
6002 | + *flags = qatomic_fetch_and(&node->flags, ~(FDMON_IO_URING_PENDING | | ||
6003 | FDMON_IO_URING_ADD)); | ||
6004 | return node; | ||
6005 | } | ||
6006 | @@ -XXX,XX +XXX,XX @@ static bool process_cqe(AioContext *ctx, | ||
6007 | * with enqueue() here then we can safely clear the FDMON_IO_URING_REMOVE | ||
6008 | * bit before IORING_OP_POLL_REMOVE is submitted. | ||
6009 | */ | ||
6010 | - flags = atomic_fetch_and(&node->flags, ~FDMON_IO_URING_REMOVE); | ||
6011 | + flags = qatomic_fetch_and(&node->flags, ~FDMON_IO_URING_REMOVE); | ||
6012 | if (flags & FDMON_IO_URING_REMOVE) { | ||
6013 | QLIST_INSERT_HEAD_RCU(&ctx->deleted_aio_handlers, node, node_deleted); | ||
6014 | return false; | ||
6015 | @@ -XXX,XX +XXX,XX @@ static int fdmon_io_uring_wait(AioContext *ctx, AioHandlerList *ready_list, | ||
6016 | int ret; | ||
6017 | |||
6018 | /* Fall back while external clients are disabled */ | ||
6019 | - if (atomic_read(&ctx->external_disable_cnt)) { | ||
6020 | + if (qatomic_read(&ctx->external_disable_cnt)) { | ||
6021 | return fdmon_poll_ops.wait(ctx, ready_list, timeout); | ||
6022 | } | ||
6023 | |||
6024 | @@ -XXX,XX +XXX,XX @@ static bool fdmon_io_uring_need_wait(AioContext *ctx) | ||
6025 | } | ||
6026 | |||
6027 | /* Are we falling back to fdmon-poll? */ | ||
6028 | - return atomic_read(&ctx->external_disable_cnt); | ||
6029 | + return qatomic_read(&ctx->external_disable_cnt); | ||
6030 | } | ||
6031 | |||
6032 | static const FDMonOps fdmon_io_uring_ops = { | ||
6033 | @@ -XXX,XX +XXX,XX @@ void fdmon_io_uring_destroy(AioContext *ctx) | ||
6034 | |||
6035 | /* Move handlers due to be removed onto the deleted list */ | ||
6036 | while ((node = QSLIST_FIRST_RCU(&ctx->submit_list))) { | ||
6037 | - unsigned flags = atomic_fetch_and(&node->flags, | ||
6038 | + unsigned flags = qatomic_fetch_and(&node->flags, | ||
6039 | ~(FDMON_IO_URING_PENDING | | ||
6040 | FDMON_IO_URING_ADD | | ||
6041 | FDMON_IO_URING_REMOVE)); | ||
6042 | diff --git a/util/lockcnt.c b/util/lockcnt.c | ||
6043 | index XXXXXXX..XXXXXXX 100644 | ||
6044 | --- a/util/lockcnt.c | ||
6045 | +++ b/util/lockcnt.c | ||
6046 | @@ -XXX,XX +XXX,XX @@ static bool qemu_lockcnt_cmpxchg_or_wait(QemuLockCnt *lockcnt, int *val, | ||
6047 | int expected = *val; | ||
6048 | |||
6049 | trace_lockcnt_fast_path_attempt(lockcnt, expected, new_if_free); | ||
6050 | - *val = atomic_cmpxchg(&lockcnt->count, expected, new_if_free); | ||
6051 | + *val = qatomic_cmpxchg(&lockcnt->count, expected, new_if_free); | ||
6052 | if (*val == expected) { | ||
6053 | trace_lockcnt_fast_path_success(lockcnt, expected, new_if_free); | ||
6054 | *val = new_if_free; | ||
6055 | @@ -XXX,XX +XXX,XX @@ static bool qemu_lockcnt_cmpxchg_or_wait(QemuLockCnt *lockcnt, int *val, | ||
6056 | int new = expected - QEMU_LOCKCNT_STATE_LOCKED + QEMU_LOCKCNT_STATE_WAITING; | ||
6057 | |||
6058 | trace_lockcnt_futex_wait_prepare(lockcnt, expected, new); | ||
6059 | - *val = atomic_cmpxchg(&lockcnt->count, expected, new); | ||
6060 | + *val = qatomic_cmpxchg(&lockcnt->count, expected, new); | ||
6061 | if (*val == expected) { | ||
6062 | *val = new; | ||
6063 | } | ||
6064 | @@ -XXX,XX +XXX,XX @@ static bool qemu_lockcnt_cmpxchg_or_wait(QemuLockCnt *lockcnt, int *val, | ||
6065 | *waited = true; | ||
6066 | trace_lockcnt_futex_wait(lockcnt, *val); | ||
6067 | qemu_futex_wait(&lockcnt->count, *val); | ||
6068 | - *val = atomic_read(&lockcnt->count); | ||
6069 | + *val = qatomic_read(&lockcnt->count); | ||
6070 | trace_lockcnt_futex_wait_resume(lockcnt, *val); | ||
6071 | continue; | ||
6072 | } | ||
6073 | @@ -XXX,XX +XXX,XX @@ static void lockcnt_wake(QemuLockCnt *lockcnt) | ||
6074 | |||
6075 | void qemu_lockcnt_inc(QemuLockCnt *lockcnt) | ||
6076 | { | ||
6077 | - int val = atomic_read(&lockcnt->count); | ||
6078 | + int val = qatomic_read(&lockcnt->count); | ||
6079 | bool waited = false; | ||
6080 | |||
6081 | for (;;) { | ||
6082 | if (val >= QEMU_LOCKCNT_COUNT_STEP) { | ||
6083 | int expected = val; | ||
6084 | - val = atomic_cmpxchg(&lockcnt->count, val, val + QEMU_LOCKCNT_COUNT_STEP); | ||
6085 | + val = qatomic_cmpxchg(&lockcnt->count, val, | ||
6086 | + val + QEMU_LOCKCNT_COUNT_STEP); | ||
6087 | if (val == expected) { | ||
6088 | break; | ||
6089 | } | ||
6090 | @@ -XXX,XX +XXX,XX @@ void qemu_lockcnt_inc(QemuLockCnt *lockcnt) | ||
6091 | |||
6092 | void qemu_lockcnt_dec(QemuLockCnt *lockcnt) | ||
6093 | { | ||
6094 | - atomic_sub(&lockcnt->count, QEMU_LOCKCNT_COUNT_STEP); | ||
6095 | + qatomic_sub(&lockcnt->count, QEMU_LOCKCNT_COUNT_STEP); | ||
6096 | } | ||
6097 | |||
6098 | /* Decrement a counter, and return locked if it is decremented to zero. | ||
6099 | @@ -XXX,XX +XXX,XX @@ void qemu_lockcnt_dec(QemuLockCnt *lockcnt) | ||
6100 | */ | ||
6101 | bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt) | ||
6102 | { | ||
6103 | - int val = atomic_read(&lockcnt->count); | ||
6104 | + int val = qatomic_read(&lockcnt->count); | ||
6105 | int locked_state = QEMU_LOCKCNT_STATE_LOCKED; | ||
6106 | bool waited = false; | ||
6107 | |||
6108 | for (;;) { | ||
6109 | if (val >= 2 * QEMU_LOCKCNT_COUNT_STEP) { | ||
6110 | int expected = val; | ||
6111 | - val = atomic_cmpxchg(&lockcnt->count, val, val - QEMU_LOCKCNT_COUNT_STEP); | ||
6112 | + val = qatomic_cmpxchg(&lockcnt->count, val, | ||
6113 | + val - QEMU_LOCKCNT_COUNT_STEP); | ||
6114 | if (val == expected) { | ||
6115 | break; | ||
6116 | } | ||
6117 | @@ -XXX,XX +XXX,XX @@ bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt) | ||
6118 | */ | ||
6119 | bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt) | ||
6120 | { | ||
6121 | - int val = atomic_read(&lockcnt->count); | ||
6122 | + int val = qatomic_read(&lockcnt->count); | ||
6123 | int locked_state = QEMU_LOCKCNT_STATE_LOCKED; | ||
6124 | bool waited = false; | ||
6125 | |||
6126 | @@ -XXX,XX +XXX,XX @@ bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt) | ||
6127 | |||
6128 | void qemu_lockcnt_lock(QemuLockCnt *lockcnt) | ||
6129 | { | ||
6130 | - int val = atomic_read(&lockcnt->count); | ||
6131 | + int val = qatomic_read(&lockcnt->count); | ||
6132 | int step = QEMU_LOCKCNT_STATE_LOCKED; | ||
6133 | bool waited = false; | ||
6134 | |||
6135 | @@ -XXX,XX +XXX,XX @@ void qemu_lockcnt_inc_and_unlock(QemuLockCnt *lockcnt) | ||
6136 | { | ||
6137 | int expected, new, val; | ||
6138 | |||
6139 | - val = atomic_read(&lockcnt->count); | ||
6140 | + val = qatomic_read(&lockcnt->count); | ||
6141 | do { | ||
6142 | expected = val; | ||
6143 | new = (val + QEMU_LOCKCNT_COUNT_STEP) & ~QEMU_LOCKCNT_STATE_MASK; | ||
6144 | trace_lockcnt_unlock_attempt(lockcnt, val, new); | ||
6145 | - val = atomic_cmpxchg(&lockcnt->count, val, new); | ||
6146 | + val = qatomic_cmpxchg(&lockcnt->count, val, new); | ||
6147 | } while (val != expected); | ||
6148 | |||
6149 | trace_lockcnt_unlock_success(lockcnt, val, new); | ||
6150 | @@ -XXX,XX +XXX,XX @@ void qemu_lockcnt_unlock(QemuLockCnt *lockcnt) | ||
6151 | { | ||
6152 | int expected, new, val; | ||
6153 | |||
6154 | - val = atomic_read(&lockcnt->count); | ||
6155 | + val = qatomic_read(&lockcnt->count); | ||
6156 | do { | ||
6157 | expected = val; | ||
6158 | new = val & ~QEMU_LOCKCNT_STATE_MASK; | ||
6159 | trace_lockcnt_unlock_attempt(lockcnt, val, new); | ||
6160 | - val = atomic_cmpxchg(&lockcnt->count, val, new); | ||
6161 | + val = qatomic_cmpxchg(&lockcnt->count, val, new); | ||
6162 | } while (val != expected); | ||
6163 | |||
6164 | trace_lockcnt_unlock_success(lockcnt, val, new); | ||
6165 | @@ -XXX,XX +XXX,XX @@ void qemu_lockcnt_unlock(QemuLockCnt *lockcnt) | ||
6166 | |||
6167 | unsigned qemu_lockcnt_count(QemuLockCnt *lockcnt) | ||
6168 | { | ||
6169 | - return atomic_read(&lockcnt->count) >> QEMU_LOCKCNT_COUNT_SHIFT; | ||
6170 | + return qatomic_read(&lockcnt->count) >> QEMU_LOCKCNT_COUNT_SHIFT; | ||
6171 | } | ||
6172 | #else | ||
6173 | void qemu_lockcnt_init(QemuLockCnt *lockcnt) | ||
6174 | @@ -XXX,XX +XXX,XX @@ void qemu_lockcnt_inc(QemuLockCnt *lockcnt) | ||
6175 | { | ||
6176 | int old; | ||
6177 | for (;;) { | ||
6178 | - old = atomic_read(&lockcnt->count); | ||
6179 | + old = qatomic_read(&lockcnt->count); | ||
6180 | if (old == 0) { | ||
6181 | qemu_lockcnt_lock(lockcnt); | ||
6182 | qemu_lockcnt_inc_and_unlock(lockcnt); | ||
6183 | return; | ||
6184 | } else { | ||
6185 | - if (atomic_cmpxchg(&lockcnt->count, old, old + 1) == old) { | ||
6186 | + if (qatomic_cmpxchg(&lockcnt->count, old, old + 1) == old) { | ||
6187 | return; | ||
6188 | } | ||
6189 | } | ||
6190 | @@ -XXX,XX +XXX,XX @@ void qemu_lockcnt_inc(QemuLockCnt *lockcnt) | ||
6191 | |||
6192 | void qemu_lockcnt_dec(QemuLockCnt *lockcnt) | ||
6193 | { | ||
6194 | - atomic_dec(&lockcnt->count); | ||
6195 | + qatomic_dec(&lockcnt->count); | ||
6196 | } | ||
6197 | |||
6198 | /* Decrement a counter, and return locked if it is decremented to zero. | ||
6199 | @@ -XXX,XX +XXX,XX @@ void qemu_lockcnt_dec(QemuLockCnt *lockcnt) | ||
6200 | */ | ||
6201 | bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt) | ||
6202 | { | ||
6203 | - int val = atomic_read(&lockcnt->count); | ||
6204 | + int val = qatomic_read(&lockcnt->count); | ||
6205 | while (val > 1) { | ||
6206 | - int old = atomic_cmpxchg(&lockcnt->count, val, val - 1); | ||
6207 | + int old = qatomic_cmpxchg(&lockcnt->count, val, val - 1); | ||
6208 | if (old != val) { | ||
6209 | val = old; | ||
6210 | continue; | ||
6211 | @@ -XXX,XX +XXX,XX @@ bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt) | ||
6212 | } | ||
6213 | |||
6214 | qemu_lockcnt_lock(lockcnt); | ||
6215 | - if (atomic_fetch_dec(&lockcnt->count) == 1) { | ||
6216 | + if (qatomic_fetch_dec(&lockcnt->count) == 1) { | ||
6217 | return true; | ||
6218 | } | ||
6219 | |||
6220 | @@ -XXX,XX +XXX,XX @@ bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt) | ||
6221 | bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt) | ||
6222 | { | ||
6223 | /* No need for acquire semantics if we return false. */ | ||
6224 | - int val = atomic_read(&lockcnt->count); | ||
6225 | + int val = qatomic_read(&lockcnt->count); | ||
6226 | if (val > 1) { | ||
6227 | return false; | ||
6228 | } | ||
6229 | |||
6230 | qemu_lockcnt_lock(lockcnt); | ||
6231 | - if (atomic_fetch_dec(&lockcnt->count) == 1) { | ||
6232 | + if (qatomic_fetch_dec(&lockcnt->count) == 1) { | ||
6233 | return true; | ||
6234 | } | ||
6235 | |||
6236 | @@ -XXX,XX +XXX,XX @@ void qemu_lockcnt_lock(QemuLockCnt *lockcnt) | ||
6237 | |||
6238 | void qemu_lockcnt_inc_and_unlock(QemuLockCnt *lockcnt) | ||
6239 | { | ||
6240 | - atomic_inc(&lockcnt->count); | ||
6241 | + qatomic_inc(&lockcnt->count); | ||
6242 | qemu_mutex_unlock(&lockcnt->mutex); | ||
6243 | } | ||
6244 | |||
6245 | @@ -XXX,XX +XXX,XX @@ void qemu_lockcnt_unlock(QemuLockCnt *lockcnt) | ||
6246 | |||
6247 | unsigned qemu_lockcnt_count(QemuLockCnt *lockcnt) | ||
6248 | { | ||
6249 | - return atomic_read(&lockcnt->count); | ||
6250 | + return qatomic_read(&lockcnt->count); | ||
6251 | } | ||
6252 | #endif | ||
6253 | diff --git a/util/log.c b/util/log.c | ||
6254 | index XXXXXXX..XXXXXXX 100644 | ||
6255 | --- a/util/log.c | ||
6256 | +++ b/util/log.c | ||
6257 | @@ -XXX,XX +XXX,XX @@ int qemu_log(const char *fmt, ...) | ||
6258 | QemuLogFile *logfile; | ||
6259 | |||
6260 | rcu_read_lock(); | ||
6261 | - logfile = atomic_rcu_read(&qemu_logfile); | ||
6262 | + logfile = qatomic_rcu_read(&qemu_logfile); | ||
6263 | if (logfile) { | ||
6264 | va_list ap; | ||
6265 | va_start(ap, fmt); | ||
6266 | @@ -XXX,XX +XXX,XX @@ void qemu_set_log(int log_flags) | ||
6267 | QEMU_LOCK_GUARD(&qemu_logfile_mutex); | ||
6268 | if (qemu_logfile && !need_to_open_file) { | ||
6269 | logfile = qemu_logfile; | ||
6270 | - atomic_rcu_set(&qemu_logfile, NULL); | ||
6271 | + qatomic_rcu_set(&qemu_logfile, NULL); | ||
6272 | call_rcu(logfile, qemu_logfile_free, rcu); | ||
6273 | } else if (!qemu_logfile && need_to_open_file) { | ||
6274 | logfile = g_new0(QemuLogFile, 1); | ||
6275 | @@ -XXX,XX +XXX,XX @@ void qemu_set_log(int log_flags) | ||
6276 | #endif | ||
6277 | log_append = 1; | ||
6278 | } | ||
6279 | - atomic_rcu_set(&qemu_logfile, logfile); | ||
6280 | + qatomic_rcu_set(&qemu_logfile, logfile); | ||
6281 | } | ||
6282 | } | ||
6283 | |||
6284 | @@ -XXX,XX +XXX,XX @@ void qemu_log_flush(void) | ||
6285 | QemuLogFile *logfile; | ||
6286 | |||
6287 | rcu_read_lock(); | ||
6288 | - logfile = atomic_rcu_read(&qemu_logfile); | ||
6289 | + logfile = qatomic_rcu_read(&qemu_logfile); | ||
6290 | if (logfile) { | ||
6291 | fflush(logfile->fd); | ||
6292 | } | ||
6293 | @@ -XXX,XX +XXX,XX @@ void qemu_log_close(void) | ||
6294 | logfile = qemu_logfile; | ||
6295 | |||
6296 | if (logfile) { | ||
6297 | - atomic_rcu_set(&qemu_logfile, NULL); | ||
6298 | + qatomic_rcu_set(&qemu_logfile, NULL); | ||
6299 | call_rcu(logfile, qemu_logfile_free, rcu); | ||
6300 | } | ||
6301 | qemu_mutex_unlock(&qemu_logfile_mutex); | ||
6302 | diff --git a/util/qemu-coroutine-lock.c b/util/qemu-coroutine-lock.c | ||
6303 | index XXXXXXX..XXXXXXX 100644 | ||
6304 | --- a/util/qemu-coroutine-lock.c | ||
6305 | +++ b/util/qemu-coroutine-lock.c | ||
6306 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn qemu_co_mutex_lock_slowpath(AioContext *ctx, | ||
6307 | /* This is the "Responsibility Hand-Off" protocol; a lock() picks from | ||
6308 | * a concurrent unlock() the responsibility of waking somebody up. | ||
6309 | */ | ||
6310 | - old_handoff = atomic_mb_read(&mutex->handoff); | ||
6311 | + old_handoff = qatomic_mb_read(&mutex->handoff); | ||
6312 | if (old_handoff && | ||
6313 | has_waiters(mutex) && | ||
6314 | - atomic_cmpxchg(&mutex->handoff, old_handoff, 0) == old_handoff) { | ||
6315 | + qatomic_cmpxchg(&mutex->handoff, old_handoff, 0) == old_handoff) { | ||
6316 | /* There can be no concurrent pops, because there can be only | ||
6317 | * one active handoff at a time. | ||
6318 | */ | ||
6319 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex) | ||
6320 | */ | ||
6321 | i = 0; | ||
6322 | retry_fast_path: | ||
6323 | - waiters = atomic_cmpxchg(&mutex->locked, 0, 1); | ||
6324 | + waiters = qatomic_cmpxchg(&mutex->locked, 0, 1); | ||
6325 | if (waiters != 0) { | ||
6326 | while (waiters == 1 && ++i < 1000) { | ||
6327 | - if (atomic_read(&mutex->ctx) == ctx) { | ||
6328 | + if (qatomic_read(&mutex->ctx) == ctx) { | ||
6329 | break; | ||
6330 | } | ||
6331 | - if (atomic_read(&mutex->locked) == 0) { | ||
6332 | + if (qatomic_read(&mutex->locked) == 0) { | ||
6333 | goto retry_fast_path; | ||
6334 | } | ||
6335 | cpu_relax(); | ||
6336 | } | ||
6337 | - waiters = atomic_fetch_inc(&mutex->locked); | ||
6338 | + waiters = qatomic_fetch_inc(&mutex->locked); | ||
6339 | } | ||
6340 | |||
6341 | if (waiters == 0) { | ||
6342 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex) | ||
6343 | mutex->ctx = NULL; | ||
6344 | mutex->holder = NULL; | ||
6345 | self->locks_held--; | ||
6346 | - if (atomic_fetch_dec(&mutex->locked) == 1) { | ||
6347 | + if (qatomic_fetch_dec(&mutex->locked) == 1) { | ||
6348 | /* No waiting qemu_co_mutex_lock(). Pfew, that was easy! */ | ||
6349 | return; | ||
6350 | } | ||
6351 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex) | ||
6352 | } | ||
6353 | |||
6354 | our_handoff = mutex->sequence; | ||
6355 | - atomic_mb_set(&mutex->handoff, our_handoff); | ||
6356 | + qatomic_mb_set(&mutex->handoff, our_handoff); | ||
6357 | if (!has_waiters(mutex)) { | ||
6358 | /* The concurrent lock has not added itself yet, so it | ||
6359 | * will be able to pick our handoff. | ||
6360 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex) | ||
6361 | /* Try to do the handoff protocol ourselves; if somebody else has | ||
6362 | * already taken it, however, we're done and they're responsible. | ||
6363 | */ | ||
6364 | - if (atomic_cmpxchg(&mutex->handoff, our_handoff, 0) != our_handoff) { | ||
6365 | + if (qatomic_cmpxchg(&mutex->handoff, our_handoff, 0) != our_handoff) { | ||
6366 | break; | ||
6367 | } | ||
6368 | } | ||
6369 | diff --git a/util/qemu-coroutine-sleep.c b/util/qemu-coroutine-sleep.c | ||
6370 | index XXXXXXX..XXXXXXX 100644 | ||
6371 | --- a/util/qemu-coroutine-sleep.c | ||
6372 | +++ b/util/qemu-coroutine-sleep.c | ||
6373 | @@ -XXX,XX +XXX,XX @@ struct QemuCoSleepState { | ||
6374 | void qemu_co_sleep_wake(QemuCoSleepState *sleep_state) | ||
6375 | { | ||
6376 | /* Write of schedule protected by barrier write in aio_co_schedule */ | ||
6377 | - const char *scheduled = atomic_cmpxchg(&sleep_state->co->scheduled, | ||
6378 | + const char *scheduled = qatomic_cmpxchg(&sleep_state->co->scheduled, | ||
6379 | qemu_co_sleep_ns__scheduled, NULL); | ||
6380 | |||
6381 | assert(scheduled == qemu_co_sleep_ns__scheduled); | ||
6382 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn qemu_co_sleep_ns_wakeable(QEMUClockType type, int64_t ns, | ||
6383 | .user_state_pointer = sleep_state, | ||
6384 | }; | ||
6385 | |||
6386 | - const char *scheduled = atomic_cmpxchg(&state.co->scheduled, NULL, | ||
6387 | + const char *scheduled = qatomic_cmpxchg(&state.co->scheduled, NULL, | ||
6388 | qemu_co_sleep_ns__scheduled); | ||
6389 | if (scheduled) { | ||
6390 | fprintf(stderr, | ||
6391 | diff --git a/util/qemu-coroutine.c b/util/qemu-coroutine.c | ||
6392 | index XXXXXXX..XXXXXXX 100644 | ||
6393 | --- a/util/qemu-coroutine.c | ||
6394 | +++ b/util/qemu-coroutine.c | ||
6395 | @@ -XXX,XX +XXX,XX @@ Coroutine *qemu_coroutine_create(CoroutineEntry *entry, void *opaque) | ||
6396 | * release_pool_size and the actual size of release_pool. But | ||
6397 | * it is just a heuristic, it does not need to be perfect. | ||
6398 | */ | ||
6399 | - alloc_pool_size = atomic_xchg(&release_pool_size, 0); | ||
6400 | + alloc_pool_size = qatomic_xchg(&release_pool_size, 0); | ||
6401 | QSLIST_MOVE_ATOMIC(&alloc_pool, &release_pool); | ||
6402 | co = QSLIST_FIRST(&alloc_pool); | ||
6403 | } | ||
6404 | @@ -XXX,XX +XXX,XX @@ static void coroutine_delete(Coroutine *co) | ||
6405 | if (CONFIG_COROUTINE_POOL) { | ||
6406 | if (release_pool_size < POOL_BATCH_SIZE * 2) { | ||
6407 | QSLIST_INSERT_HEAD_ATOMIC(&release_pool, co, pool_next); | ||
6408 | - atomic_inc(&release_pool_size); | ||
6409 | + qatomic_inc(&release_pool_size); | ||
6410 | return; | ||
6411 | } | ||
6412 | if (alloc_pool_size < POOL_BATCH_SIZE) { | ||
6413 | @@ -XXX,XX +XXX,XX @@ void qemu_aio_coroutine_enter(AioContext *ctx, Coroutine *co) | ||
6414 | |||
6415 | /* Cannot rely on the read barrier for to in aio_co_wake(), as there are | ||
6416 | * callers outside of aio_co_wake() */ | ||
6417 | - const char *scheduled = atomic_mb_read(&to->scheduled); | ||
6418 | + const char *scheduled = qatomic_mb_read(&to->scheduled); | ||
6419 | |||
6420 | QSIMPLEQ_REMOVE_HEAD(&pending, co_queue_next); | ||
6421 | |||
6422 | diff --git a/util/qemu-sockets.c b/util/qemu-sockets.c | ||
6423 | index XXXXXXX..XXXXXXX 100644 | ||
6424 | --- a/util/qemu-sockets.c | ||
6425 | +++ b/util/qemu-sockets.c | ||
6426 | @@ -XXX,XX +XXX,XX @@ static struct addrinfo *inet_parse_connect_saddr(InetSocketAddress *saddr, | ||
6427 | memset(&ai, 0, sizeof(ai)); | ||
6428 | |||
6429 | ai.ai_flags = AI_CANONNAME | AI_ADDRCONFIG; | ||
6430 | - if (atomic_read(&useV4Mapped)) { | ||
6431 | + if (qatomic_read(&useV4Mapped)) { | ||
6432 | ai.ai_flags |= AI_V4MAPPED; | ||
6433 | } | ||
6434 | ai.ai_family = inet_ai_family_from_address(saddr, &err); | ||
6435 | @@ -XXX,XX +XXX,XX @@ static struct addrinfo *inet_parse_connect_saddr(InetSocketAddress *saddr, | ||
6436 | */ | ||
6437 | if (rc == EAI_BADFLAGS && | ||
6438 | (ai.ai_flags & AI_V4MAPPED)) { | ||
6439 | - atomic_set(&useV4Mapped, 0); | ||
6440 | + qatomic_set(&useV4Mapped, 0); | ||
6441 | ai.ai_flags &= ~AI_V4MAPPED; | ||
6442 | rc = getaddrinfo(saddr->host, saddr->port, &ai, &res); | ||
6443 | } | ||
6444 | diff --git a/util/qemu-thread-posix.c b/util/qemu-thread-posix.c | ||
6445 | index XXXXXXX..XXXXXXX 100644 | ||
6446 | --- a/util/qemu-thread-posix.c | ||
6447 | +++ b/util/qemu-thread-posix.c | ||
6448 | @@ -XXX,XX +XXX,XX @@ void qemu_event_set(QemuEvent *ev) | ||
6449 | */ | ||
6450 | assert(ev->initialized); | ||
6451 | smp_mb(); | ||
6452 | - if (atomic_read(&ev->value) != EV_SET) { | ||
6453 | - if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) { | ||
6454 | + if (qatomic_read(&ev->value) != EV_SET) { | ||
6455 | + if (qatomic_xchg(&ev->value, EV_SET) == EV_BUSY) { | ||
6456 | /* There were waiters, wake them up. */ | ||
6457 | qemu_futex_wake(ev, INT_MAX); | ||
6458 | } | ||
6459 | @@ -XXX,XX +XXX,XX @@ void qemu_event_reset(QemuEvent *ev) | ||
6460 | unsigned value; | ||
6461 | |||
6462 | assert(ev->initialized); | ||
6463 | - value = atomic_read(&ev->value); | ||
6464 | + value = qatomic_read(&ev->value); | ||
6465 | smp_mb_acquire(); | ||
6466 | if (value == EV_SET) { | ||
6467 | /* | ||
6468 | * If there was a concurrent reset (or even reset+wait), | ||
6469 | * do nothing. Otherwise change EV_SET->EV_FREE. | ||
6470 | */ | ||
6471 | - atomic_or(&ev->value, EV_FREE); | ||
6472 | + qatomic_or(&ev->value, EV_FREE); | ||
6473 | } | ||
6474 | } | ||
6475 | |||
6476 | @@ -XXX,XX +XXX,XX @@ void qemu_event_wait(QemuEvent *ev) | ||
6477 | unsigned value; | ||
6478 | |||
6479 | assert(ev->initialized); | ||
6480 | - value = atomic_read(&ev->value); | ||
6481 | + value = qatomic_read(&ev->value); | ||
6482 | smp_mb_acquire(); | ||
6483 | if (value != EV_SET) { | ||
6484 | if (value == EV_FREE) { | ||
6485 | @@ -XXX,XX +XXX,XX @@ void qemu_event_wait(QemuEvent *ev) | ||
6486 | * a concurrent busy->free transition. After the CAS, the | ||
6487 | * event will be either set or busy. | ||
6488 | */ | ||
6489 | - if (atomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) { | ||
6490 | + if (qatomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) { | ||
6491 | return; | ||
6492 | } | ||
6493 | } | ||
6494 | diff --git a/util/qemu-thread-win32.c b/util/qemu-thread-win32.c | ||
6495 | index XXXXXXX..XXXXXXX 100644 | ||
6496 | --- a/util/qemu-thread-win32.c | ||
6497 | +++ b/util/qemu-thread-win32.c | ||
6498 | @@ -XXX,XX +XXX,XX @@ void qemu_event_set(QemuEvent *ev) | ||
6499 | * ev->value we need a full memory barrier here. | ||
6500 | */ | ||
6501 | smp_mb(); | ||
6502 | - if (atomic_read(&ev->value) != EV_SET) { | ||
6503 | - if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) { | ||
6504 | + if (qatomic_read(&ev->value) != EV_SET) { | ||
6505 | + if (qatomic_xchg(&ev->value, EV_SET) == EV_BUSY) { | ||
6506 | /* There were waiters, wake them up. */ | ||
6507 | SetEvent(ev->event); | ||
6508 | } | ||
6509 | @@ -XXX,XX +XXX,XX @@ void qemu_event_reset(QemuEvent *ev) | ||
6510 | unsigned value; | ||
6511 | |||
6512 | assert(ev->initialized); | ||
6513 | - value = atomic_read(&ev->value); | ||
6514 | + value = qatomic_read(&ev->value); | ||
6515 | smp_mb_acquire(); | ||
6516 | if (value == EV_SET) { | ||
6517 | /* If there was a concurrent reset (or even reset+wait), | ||
6518 | * do nothing. Otherwise change EV_SET->EV_FREE. | ||
6519 | */ | ||
6520 | - atomic_or(&ev->value, EV_FREE); | ||
6521 | + qatomic_or(&ev->value, EV_FREE); | ||
6522 | } | ||
6523 | } | ||
6524 | |||
6525 | @@ -XXX,XX +XXX,XX @@ void qemu_event_wait(QemuEvent *ev) | ||
6526 | unsigned value; | ||
6527 | |||
6528 | assert(ev->initialized); | ||
6529 | - value = atomic_read(&ev->value); | ||
6530 | + value = qatomic_read(&ev->value); | ||
6531 | smp_mb_acquire(); | ||
6532 | if (value != EV_SET) { | ||
6533 | if (value == EV_FREE) { | ||
6534 | @@ -XXX,XX +XXX,XX @@ void qemu_event_wait(QemuEvent *ev) | ||
6535 | * because there cannot be a concurrent busy->free transition. | ||
6536 | * After the CAS, the event will be either set or busy. | ||
6537 | */ | ||
6538 | - if (atomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) { | ||
6539 | + if (qatomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) { | ||
6540 | value = EV_SET; | ||
6541 | } else { | ||
6542 | value = EV_BUSY; | ||
6543 | diff --git a/util/qemu-timer.c b/util/qemu-timer.c | ||
6544 | index XXXXXXX..XXXXXXX 100644 | ||
6545 | --- a/util/qemu-timer.c | ||
6546 | +++ b/util/qemu-timer.c | ||
6547 | @@ -XXX,XX +XXX,XX @@ void qemu_clock_enable(QEMUClockType type, bool enabled) | ||
6548 | |||
6549 | bool timerlist_has_timers(QEMUTimerList *timer_list) | ||
6550 | { | ||
6551 | - return !!atomic_read(&timer_list->active_timers); | ||
6552 | + return !!qatomic_read(&timer_list->active_timers); | ||
6553 | } | ||
6554 | |||
6555 | bool qemu_clock_has_timers(QEMUClockType type) | ||
6556 | @@ -XXX,XX +XXX,XX @@ bool timerlist_expired(QEMUTimerList *timer_list) | ||
6557 | { | ||
6558 | int64_t expire_time; | ||
6559 | |||
6560 | - if (!atomic_read(&timer_list->active_timers)) { | ||
6561 | + if (!qatomic_read(&timer_list->active_timers)) { | ||
6562 | return false; | ||
6563 | } | ||
6564 | |||
6565 | @@ -XXX,XX +XXX,XX @@ int64_t timerlist_deadline_ns(QEMUTimerList *timer_list) | ||
6566 | int64_t delta; | ||
6567 | int64_t expire_time; | ||
6568 | |||
6569 | - if (!atomic_read(&timer_list->active_timers)) { | ||
6570 | + if (!qatomic_read(&timer_list->active_timers)) { | ||
6571 | return -1; | ||
6572 | } | ||
6573 | |||
6574 | @@ -XXX,XX +XXX,XX @@ static void timer_del_locked(QEMUTimerList *timer_list, QEMUTimer *ts) | ||
6575 | if (!t) | ||
6576 | break; | ||
6577 | if (t == ts) { | ||
6578 | - atomic_set(pt, t->next); | ||
6579 | + qatomic_set(pt, t->next); | ||
6580 | break; | ||
6581 | } | ||
6582 | pt = &t->next; | ||
6583 | @@ -XXX,XX +XXX,XX @@ static bool timer_mod_ns_locked(QEMUTimerList *timer_list, | ||
6584 | } | ||
6585 | ts->expire_time = MAX(expire_time, 0); | ||
6586 | ts->next = *pt; | ||
6587 | - atomic_set(pt, ts); | ||
6588 | + qatomic_set(pt, ts); | ||
6589 | |||
6590 | return pt == &timer_list->active_timers; | ||
6591 | } | ||
6592 | @@ -XXX,XX +XXX,XX @@ bool timerlist_run_timers(QEMUTimerList *timer_list) | ||
6593 | QEMUTimerCB *cb; | ||
6594 | void *opaque; | ||
6595 | |||
6596 | - if (!atomic_read(&timer_list->active_timers)) { | ||
6597 | + if (!qatomic_read(&timer_list->active_timers)) { | ||
6598 | return false; | ||
6599 | } | ||
6600 | |||
6601 | diff --git a/util/qht.c b/util/qht.c | ||
6602 | index XXXXXXX..XXXXXXX 100644 | ||
6603 | --- a/util/qht.c | ||
6604 | +++ b/util/qht.c | ||
6605 | @@ -XXX,XX +XXX,XX @@ static inline void qht_unlock(struct qht *ht) | ||
6606 | |||
6607 | /* | ||
6608 | * Note: reading partially-updated pointers in @pointers could lead to | ||
6609 | - * segfaults. We thus access them with atomic_read/set; this guarantees | ||
6610 | + * segfaults. We thus access them with qatomic_read/set; this guarantees | ||
6611 | * that the compiler makes all those accesses atomic. We also need the | ||
6612 | - * volatile-like behavior in atomic_read, since otherwise the compiler | ||
6613 | + * volatile-like behavior in qatomic_read, since otherwise the compiler | ||
6614 | * might refetch the pointer. | ||
6615 | - * atomic_read's are of course not necessary when the bucket lock is held. | ||
6616 | + * qatomic_read's are of course not necessary when the bucket lock is held. | ||
6617 | * | ||
6618 | * If both ht->lock and b->lock are grabbed, ht->lock should always | ||
6619 | * be grabbed first. | ||
6620 | @@ -XXX,XX +XXX,XX @@ void qht_map_lock_buckets__no_stale(struct qht *ht, struct qht_map **pmap) | ||
6621 | { | ||
6622 | struct qht_map *map; | ||
6623 | |||
6624 | - map = atomic_rcu_read(&ht->map); | ||
6625 | + map = qatomic_rcu_read(&ht->map); | ||
6626 | qht_map_lock_buckets(map); | ||
6627 | if (likely(!qht_map_is_stale__locked(ht, map))) { | ||
6628 | *pmap = map; | ||
6629 | @@ -XXX,XX +XXX,XX @@ struct qht_bucket *qht_bucket_lock__no_stale(struct qht *ht, uint32_t hash, | ||
6630 | struct qht_bucket *b; | ||
6631 | struct qht_map *map; | ||
6632 | |||
6633 | - map = atomic_rcu_read(&ht->map); | ||
6634 | + map = qatomic_rcu_read(&ht->map); | ||
6635 | b = qht_map_to_bucket(map, hash); | ||
6636 | |||
6637 | qemu_spin_lock(&b->lock); | ||
6638 | @@ -XXX,XX +XXX,XX @@ struct qht_bucket *qht_bucket_lock__no_stale(struct qht *ht, uint32_t hash, | ||
6639 | |||
6640 | static inline bool qht_map_needs_resize(const struct qht_map *map) | ||
6641 | { | ||
6642 | - return atomic_read(&map->n_added_buckets) > map->n_added_buckets_threshold; | ||
6643 | + return qatomic_read(&map->n_added_buckets) > | ||
6644 | + map->n_added_buckets_threshold; | ||
6645 | } | ||
6646 | |||
6647 | static inline void qht_chain_destroy(const struct qht_bucket *head) | ||
6648 | @@ -XXX,XX +XXX,XX @@ void qht_init(struct qht *ht, qht_cmp_func_t cmp, size_t n_elems, | ||
6649 | ht->mode = mode; | ||
6650 | qemu_mutex_init(&ht->lock); | ||
6651 | map = qht_map_create(n_buckets); | ||
6652 | - atomic_rcu_set(&ht->map, map); | ||
6653 | + qatomic_rcu_set(&ht->map, map); | ||
6654 | } | ||
6655 | |||
6656 | /* call only when there are no readers/writers left */ | ||
6657 | @@ -XXX,XX +XXX,XX @@ static void qht_bucket_reset__locked(struct qht_bucket *head) | ||
6658 | if (b->pointers[i] == NULL) { | ||
6659 | goto done; | ||
6660 | } | ||
6661 | - atomic_set(&b->hashes[i], 0); | ||
6662 | - atomic_set(&b->pointers[i], NULL); | ||
6663 | + qatomic_set(&b->hashes[i], 0); | ||
6664 | + qatomic_set(&b->pointers[i], NULL); | ||
6665 | } | ||
6666 | b = b->next; | ||
6667 | } while (b); | ||
6668 | @@ -XXX,XX +XXX,XX @@ void *qht_do_lookup(const struct qht_bucket *head, qht_lookup_func_t func, | ||
6669 | |||
6670 | do { | ||
6671 | for (i = 0; i < QHT_BUCKET_ENTRIES; i++) { | ||
6672 | - if (atomic_read(&b->hashes[i]) == hash) { | ||
6673 | + if (qatomic_read(&b->hashes[i]) == hash) { | ||
6674 | /* The pointer is dereferenced before seqlock_read_retry, | ||
6675 | * so (unlike qht_insert__locked) we need to use | ||
6676 | - * atomic_rcu_read here. | ||
6677 | + * qatomic_rcu_read here. | ||
6678 | */ | ||
6679 | - void *p = atomic_rcu_read(&b->pointers[i]); | ||
6680 | + void *p = qatomic_rcu_read(&b->pointers[i]); | ||
6681 | |||
6682 | if (likely(p) && likely(func(p, userp))) { | ||
6683 | return p; | ||
6684 | } | ||
6685 | } | ||
6686 | } | ||
6687 | - b = atomic_rcu_read(&b->next); | ||
6688 | + b = qatomic_rcu_read(&b->next); | ||
6689 | } while (b); | ||
6690 | |||
6691 | return NULL; | ||
6692 | @@ -XXX,XX +XXX,XX @@ void *qht_lookup_custom(const struct qht *ht, const void *userp, uint32_t hash, | ||
6693 | unsigned int version; | ||
6694 | void *ret; | ||
6695 | |||
6696 | - map = atomic_rcu_read(&ht->map); | ||
6697 | + map = qatomic_rcu_read(&ht->map); | ||
6698 | b = qht_map_to_bucket(map, hash); | ||
6699 | |||
6700 | version = seqlock_read_begin(&b->sequence); | ||
6701 | @@ -XXX,XX +XXX,XX @@ static void *qht_insert__locked(const struct qht *ht, struct qht_map *map, | ||
6702 | memset(b, 0, sizeof(*b)); | ||
6703 | new = b; | ||
6704 | i = 0; | ||
6705 | - atomic_inc(&map->n_added_buckets); | ||
6706 | + qatomic_inc(&map->n_added_buckets); | ||
6707 | if (unlikely(qht_map_needs_resize(map)) && needs_resize) { | ||
6708 | *needs_resize = true; | ||
6709 | } | ||
6710 | @@ -XXX,XX +XXX,XX @@ static void *qht_insert__locked(const struct qht *ht, struct qht_map *map, | ||
6711 | /* found an empty key: acquire the seqlock and write */ | ||
6712 | seqlock_write_begin(&head->sequence); | ||
6713 | if (new) { | ||
6714 | - atomic_rcu_set(&prev->next, b); | ||
6715 | + qatomic_rcu_set(&prev->next, b); | ||
6716 | } | ||
6717 | /* smp_wmb() implicit in seqlock_write_begin. */ | ||
6718 | - atomic_set(&b->hashes[i], hash); | ||
6719 | - atomic_set(&b->pointers[i], p); | ||
6720 | + qatomic_set(&b->hashes[i], hash); | ||
6721 | + qatomic_set(&b->pointers[i], p); | ||
6722 | seqlock_write_end(&head->sequence); | ||
6723 | return NULL; | ||
6724 | } | ||
6725 | @@ -XXX,XX +XXX,XX @@ qht_entry_move(struct qht_bucket *to, int i, struct qht_bucket *from, int j) | ||
6726 | qht_debug_assert(to->pointers[i]); | ||
6727 | qht_debug_assert(from->pointers[j]); | ||
6728 | |||
6729 | - atomic_set(&to->hashes[i], from->hashes[j]); | ||
6730 | - atomic_set(&to->pointers[i], from->pointers[j]); | ||
6731 | + qatomic_set(&to->hashes[i], from->hashes[j]); | ||
6732 | + qatomic_set(&to->pointers[i], from->pointers[j]); | ||
6733 | |||
6734 | - atomic_set(&from->hashes[j], 0); | ||
6735 | - atomic_set(&from->pointers[j], NULL); | ||
6736 | + qatomic_set(&from->hashes[j], 0); | ||
6737 | + qatomic_set(&from->pointers[j], NULL); | ||
6738 | } | ||
6739 | |||
6740 | /* | ||
6741 | @@ -XXX,XX +XXX,XX @@ static inline void qht_bucket_remove_entry(struct qht_bucket *orig, int pos) | ||
6742 | |||
6743 | if (qht_entry_is_last(orig, pos)) { | ||
6744 | orig->hashes[pos] = 0; | ||
6745 | - atomic_set(&orig->pointers[pos], NULL); | ||
6746 | + qatomic_set(&orig->pointers[pos], NULL); | ||
6747 | return; | ||
6748 | } | ||
6749 | do { | ||
6750 | @@ -XXX,XX +XXX,XX @@ do_qht_iter(struct qht *ht, const struct qht_iter *iter, void *userp) | ||
6751 | { | ||
6752 | struct qht_map *map; | ||
6753 | |||
6754 | - map = atomic_rcu_read(&ht->map); | ||
6755 | + map = qatomic_rcu_read(&ht->map); | ||
6756 | qht_map_lock_buckets(map); | ||
6757 | qht_map_iter__all_locked(map, iter, userp); | ||
6758 | qht_map_unlock_buckets(map); | ||
6759 | @@ -XXX,XX +XXX,XX @@ static void qht_do_resize_reset(struct qht *ht, struct qht_map *new, bool reset) | ||
6760 | qht_map_iter__all_locked(old, &iter, &data); | ||
6761 | qht_map_debug__all_locked(new); | ||
6762 | |||
6763 | - atomic_rcu_set(&ht->map, new); | ||
6764 | + qatomic_rcu_set(&ht->map, new); | ||
6765 | qht_map_unlock_buckets(old); | ||
6766 | call_rcu(old, qht_map_destroy, rcu); | ||
6767 | } | ||
6768 | @@ -XXX,XX +XXX,XX @@ void qht_statistics_init(const struct qht *ht, struct qht_stats *stats) | ||
6769 | const struct qht_map *map; | ||
6770 | int i; | ||
6771 | |||
6772 | - map = atomic_rcu_read(&ht->map); | ||
6773 | + map = qatomic_rcu_read(&ht->map); | ||
6774 | |||
6775 | stats->used_head_buckets = 0; | ||
6776 | stats->entries = 0; | ||
6777 | @@ -XXX,XX +XXX,XX @@ void qht_statistics_init(const struct qht *ht, struct qht_stats *stats) | ||
6778 | b = head; | ||
6779 | do { | ||
6780 | for (j = 0; j < QHT_BUCKET_ENTRIES; j++) { | ||
6781 | - if (atomic_read(&b->pointers[j]) == NULL) { | ||
6782 | + if (qatomic_read(&b->pointers[j]) == NULL) { | ||
6783 | break; | ||
6784 | } | ||
6785 | entries++; | ||
6786 | } | ||
6787 | buckets++; | ||
6788 | - b = atomic_rcu_read(&b->next); | ||
6789 | + b = qatomic_rcu_read(&b->next); | ||
6790 | } while (b); | ||
6791 | } while (seqlock_read_retry(&head->sequence, version)); | ||
6792 | |||
6793 | diff --git a/util/qsp.c b/util/qsp.c | ||
6794 | index XXXXXXX..XXXXXXX 100644 | ||
6795 | --- a/util/qsp.c | ||
6796 | +++ b/util/qsp.c | ||
6797 | @@ -XXX,XX +XXX,XX @@ static void qsp_do_init(void) | ||
6798 | |||
6799 | static __attribute__((noinline)) void qsp_init__slowpath(void) | ||
6800 | { | ||
6801 | - if (atomic_cmpxchg(&qsp_initializing, false, true) == false) { | ||
6802 | + if (qatomic_cmpxchg(&qsp_initializing, false, true) == false) { | ||
6803 | qsp_do_init(); | ||
6804 | - atomic_set(&qsp_initialized, true); | ||
6805 | + qatomic_set(&qsp_initialized, true); | ||
6806 | } else { | ||
6807 | - while (!atomic_read(&qsp_initialized)) { | ||
6808 | + while (!qatomic_read(&qsp_initialized)) { | ||
6809 | cpu_relax(); | ||
6810 | } | ||
6811 | } | ||
6812 | @@ -XXX,XX +XXX,XX @@ static __attribute__((noinline)) void qsp_init__slowpath(void) | ||
6813 | /* qsp_init() must be called from _all_ exported functions */ | ||
6814 | static inline void qsp_init(void) | ||
6815 | { | ||
6816 | - if (likely(atomic_read(&qsp_initialized))) { | ||
6817 | + if (likely(qatomic_read(&qsp_initialized))) { | ||
6818 | return; | ||
6819 | } | ||
6820 | qsp_init__slowpath(); | ||
6821 | @@ -XXX,XX +XXX,XX @@ static QSPEntry *qsp_entry_get(const void *obj, const char *file, int line, | ||
6822 | */ | ||
6823 | static inline void do_qsp_entry_record(QSPEntry *e, int64_t delta, bool acq) | ||
6824 | { | ||
6825 | - atomic_set_u64(&e->ns, e->ns + delta); | ||
6826 | + qatomic_set_u64(&e->ns, e->ns + delta); | ||
6827 | if (acq) { | ||
6828 | - atomic_set_u64(&e->n_acqs, e->n_acqs + 1); | ||
6829 | + qatomic_set_u64(&e->n_acqs, e->n_acqs + 1); | ||
6830 | } | ||
6831 | } | ||
6832 | |||
6833 | @@ -XXX,XX +XXX,XX @@ qsp_cond_timedwait(QemuCond *cond, QemuMutex *mutex, int ms, | ||
6834 | |||
6835 | bool qsp_is_enabled(void) | ||
6836 | { | ||
6837 | - return atomic_read(&qemu_mutex_lock_func) == qsp_mutex_lock; | ||
6838 | + return qatomic_read(&qemu_mutex_lock_func) == qsp_mutex_lock; | ||
6839 | } | ||
6840 | |||
6841 | void qsp_enable(void) | ||
6842 | { | ||
6843 | - atomic_set(&qemu_mutex_lock_func, qsp_mutex_lock); | ||
6844 | - atomic_set(&qemu_mutex_trylock_func, qsp_mutex_trylock); | ||
6845 | - atomic_set(&qemu_bql_mutex_lock_func, qsp_bql_mutex_lock); | ||
6846 | - atomic_set(&qemu_rec_mutex_lock_func, qsp_rec_mutex_lock); | ||
6847 | - atomic_set(&qemu_rec_mutex_trylock_func, qsp_rec_mutex_trylock); | ||
6848 | - atomic_set(&qemu_cond_wait_func, qsp_cond_wait); | ||
6849 | - atomic_set(&qemu_cond_timedwait_func, qsp_cond_timedwait); | ||
6850 | + qatomic_set(&qemu_mutex_lock_func, qsp_mutex_lock); | ||
6851 | + qatomic_set(&qemu_mutex_trylock_func, qsp_mutex_trylock); | ||
6852 | + qatomic_set(&qemu_bql_mutex_lock_func, qsp_bql_mutex_lock); | ||
6853 | + qatomic_set(&qemu_rec_mutex_lock_func, qsp_rec_mutex_lock); | ||
6854 | + qatomic_set(&qemu_rec_mutex_trylock_func, qsp_rec_mutex_trylock); | ||
6855 | + qatomic_set(&qemu_cond_wait_func, qsp_cond_wait); | ||
6856 | + qatomic_set(&qemu_cond_timedwait_func, qsp_cond_timedwait); | ||
6857 | } | ||
6858 | |||
6859 | void qsp_disable(void) | ||
6860 | { | ||
6861 | - atomic_set(&qemu_mutex_lock_func, qemu_mutex_lock_impl); | ||
6862 | - atomic_set(&qemu_mutex_trylock_func, qemu_mutex_trylock_impl); | ||
6863 | - atomic_set(&qemu_bql_mutex_lock_func, qemu_mutex_lock_impl); | ||
6864 | - atomic_set(&qemu_rec_mutex_lock_func, qemu_rec_mutex_lock_impl); | ||
6865 | - atomic_set(&qemu_rec_mutex_trylock_func, qemu_rec_mutex_trylock_impl); | ||
6866 | - atomic_set(&qemu_cond_wait_func, qemu_cond_wait_impl); | ||
6867 | - atomic_set(&qemu_cond_timedwait_func, qemu_cond_timedwait_impl); | ||
6868 | + qatomic_set(&qemu_mutex_lock_func, qemu_mutex_lock_impl); | ||
6869 | + qatomic_set(&qemu_mutex_trylock_func, qemu_mutex_trylock_impl); | ||
6870 | + qatomic_set(&qemu_bql_mutex_lock_func, qemu_mutex_lock_impl); | ||
6871 | + qatomic_set(&qemu_rec_mutex_lock_func, qemu_rec_mutex_lock_impl); | ||
6872 | + qatomic_set(&qemu_rec_mutex_trylock_func, qemu_rec_mutex_trylock_impl); | ||
6873 | + qatomic_set(&qemu_cond_wait_func, qemu_cond_wait_impl); | ||
6874 | + qatomic_set(&qemu_cond_timedwait_func, qemu_cond_timedwait_impl); | ||
6875 | } | ||
6876 | |||
6877 | static gint qsp_tree_cmp(gconstpointer ap, gconstpointer bp, gpointer up) | ||
6878 | @@ -XXX,XX +XXX,XX @@ static void qsp_aggregate(void *p, uint32_t h, void *up) | ||
6879 | * The entry is in the global hash table; read from it atomically (as in | ||
6880 | * "read once"). | ||
6881 | */ | ||
6882 | - agg->ns += atomic_read_u64(&e->ns); | ||
6883 | - agg->n_acqs += atomic_read_u64(&e->n_acqs); | ||
6884 | + agg->ns += qatomic_read_u64(&e->ns); | ||
6885 | + agg->n_acqs += qatomic_read_u64(&e->n_acqs); | ||
6886 | } | ||
6887 | |||
6888 | static void qsp_iter_diff(void *p, uint32_t hash, void *htp) | ||
6889 | @@ -XXX,XX +XXX,XX @@ static void qsp_mktree(GTree *tree, bool callsite_coalesce) | ||
6890 | * with the snapshot. | ||
6891 | */ | ||
6892 | WITH_RCU_READ_LOCK_GUARD() { | ||
6893 | - QSPSnapshot *snap = atomic_rcu_read(&qsp_snapshot); | ||
6894 | + QSPSnapshot *snap = qatomic_rcu_read(&qsp_snapshot); | ||
6895 | |||
6896 | /* Aggregate all results from the global hash table into a local one */ | ||
6897 | qht_init(&ht, qsp_entry_no_thread_cmp, QSP_INITIAL_SIZE, | ||
6898 | @@ -XXX,XX +XXX,XX @@ void qsp_reset(void) | ||
6899 | qht_iter(&qsp_ht, qsp_aggregate, &new->ht); | ||
6900 | |||
6901 | /* replace the previous snapshot, if any */ | ||
6902 | - old = atomic_xchg(&qsp_snapshot, new); | ||
6903 | + old = qatomic_xchg(&qsp_snapshot, new); | ||
6904 | if (old) { | ||
6905 | call_rcu(old, qsp_snapshot_destroy, rcu); | ||
6906 | } | ||
6907 | diff --git a/util/rcu.c b/util/rcu.c | ||
6908 | index XXXXXXX..XXXXXXX 100644 | ||
6909 | --- a/util/rcu.c | ||
6910 | +++ b/util/rcu.c | ||
6911 | @@ -XXX,XX +XXX,XX @@ static inline int rcu_gp_ongoing(unsigned long *ctr) | ||
6912 | { | ||
6913 | unsigned long v; | ||
6914 | |||
6915 | - v = atomic_read(ctr); | ||
6916 | + v = qatomic_read(ctr); | ||
6917 | return v && (v != rcu_gp_ctr); | ||
6918 | } | ||
6919 | |||
6920 | @@ -XXX,XX +XXX,XX @@ static void wait_for_readers(void) | ||
6921 | */ | ||
6922 | qemu_event_reset(&rcu_gp_event); | ||
6923 | |||
6924 | - /* Instead of using atomic_mb_set for index->waiting, and | ||
6925 | - * atomic_mb_read for index->ctr, memory barriers are placed | ||
6926 | + /* Instead of using qatomic_mb_set for index->waiting, and | ||
6927 | + * qatomic_mb_read for index->ctr, memory barriers are placed | ||
6928 | * manually since writes to different threads are independent. | ||
6929 | * qemu_event_reset has acquire semantics, so no memory barrier | ||
6930 | * is needed here. | ||
6931 | */ | ||
6932 | QLIST_FOREACH(index, ®istry, node) { | ||
6933 | - atomic_set(&index->waiting, true); | ||
6934 | + qatomic_set(&index->waiting, true); | ||
6935 | } | ||
6936 | |||
6937 | /* Here, order the stores to index->waiting before the loads of | ||
6938 | @@ -XXX,XX +XXX,XX @@ static void wait_for_readers(void) | ||
6939 | /* No need for mb_set here, worst of all we | ||
6940 | * get some extra futex wakeups. | ||
6941 | */ | ||
6942 | - atomic_set(&index->waiting, false); | ||
6943 | + qatomic_set(&index->waiting, false); | ||
6944 | } | ||
6945 | } | ||
6946 | |||
6947 | @@ -XXX,XX +XXX,XX @@ void synchronize_rcu(void) | ||
6948 | |||
6949 | QEMU_LOCK_GUARD(&rcu_registry_lock); | ||
6950 | if (!QLIST_EMPTY(®istry)) { | ||
6951 | - /* In either case, the atomic_mb_set below blocks stores that free | ||
6952 | + /* In either case, the qatomic_mb_set below blocks stores that free | ||
6953 | * old RCU-protected pointers. | ||
6954 | */ | ||
6955 | if (sizeof(rcu_gp_ctr) < 8) { | ||
6956 | @@ -XXX,XX +XXX,XX @@ void synchronize_rcu(void) | ||
6957 | * | ||
6958 | * Switch parity: 0 -> 1, 1 -> 0. | ||
6959 | */ | ||
6960 | - atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR); | ||
6961 | + qatomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR); | ||
6962 | wait_for_readers(); | ||
6963 | - atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR); | ||
6964 | + qatomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR); | ||
6965 | } else { | ||
6966 | /* Increment current grace period. */ | ||
6967 | - atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR); | ||
6968 | + qatomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR); | ||
6969 | } | ||
6970 | |||
6971 | wait_for_readers(); | ||
6972 | @@ -XXX,XX +XXX,XX @@ static void enqueue(struct rcu_head *node) | ||
6973 | struct rcu_head **old_tail; | ||
6974 | |||
6975 | node->next = NULL; | ||
6976 | - old_tail = atomic_xchg(&tail, &node->next); | ||
6977 | - atomic_mb_set(old_tail, node); | ||
6978 | + old_tail = qatomic_xchg(&tail, &node->next); | ||
6979 | + qatomic_mb_set(old_tail, node); | ||
6980 | } | ||
6981 | |||
6982 | static struct rcu_head *try_dequeue(void) | ||
6983 | @@ -XXX,XX +XXX,XX @@ retry: | ||
6984 | * The tail, because it is the first step in the enqueuing. | ||
6985 | * It is only the next pointers that might be inconsistent. | ||
6986 | */ | ||
6987 | - if (head == &dummy && atomic_mb_read(&tail) == &dummy.next) { | ||
6988 | + if (head == &dummy && qatomic_mb_read(&tail) == &dummy.next) { | ||
6989 | abort(); | ||
6990 | } | ||
6991 | |||
6992 | @@ -XXX,XX +XXX,XX @@ retry: | ||
6993 | * wrong and we need to wait until its enqueuer finishes the update. | ||
6994 | */ | ||
6995 | node = head; | ||
6996 | - next = atomic_mb_read(&head->next); | ||
6997 | + next = qatomic_mb_read(&head->next); | ||
6998 | if (!next) { | ||
6999 | return NULL; | ||
7000 | } | ||
7001 | @@ -XXX,XX +XXX,XX @@ static void *call_rcu_thread(void *opaque) | ||
7002 | |||
7003 | for (;;) { | ||
7004 | int tries = 0; | ||
7005 | - int n = atomic_read(&rcu_call_count); | ||
7006 | + int n = qatomic_read(&rcu_call_count); | ||
7007 | |||
7008 | /* Heuristically wait for a decent number of callbacks to pile up. | ||
7009 | * Fetch rcu_call_count now, we only must process elements that were | ||
7010 | @@ -XXX,XX +XXX,XX @@ static void *call_rcu_thread(void *opaque) | ||
7011 | g_usleep(10000); | ||
7012 | if (n == 0) { | ||
7013 | qemu_event_reset(&rcu_call_ready_event); | ||
7014 | - n = atomic_read(&rcu_call_count); | ||
7015 | + n = qatomic_read(&rcu_call_count); | ||
7016 | if (n == 0) { | ||
7017 | #if defined(CONFIG_MALLOC_TRIM) | ||
7018 | malloc_trim(4 * 1024 * 1024); | ||
7019 | @@ -XXX,XX +XXX,XX @@ static void *call_rcu_thread(void *opaque) | ||
7020 | qemu_event_wait(&rcu_call_ready_event); | ||
7021 | } | ||
7022 | } | ||
7023 | - n = atomic_read(&rcu_call_count); | ||
7024 | + n = qatomic_read(&rcu_call_count); | ||
7025 | } | ||
7026 | |||
7027 | - atomic_sub(&rcu_call_count, n); | ||
7028 | + qatomic_sub(&rcu_call_count, n); | ||
7029 | synchronize_rcu(); | ||
7030 | qemu_mutex_lock_iothread(); | ||
7031 | while (n > 0) { | ||
7032 | @@ -XXX,XX +XXX,XX @@ void call_rcu1(struct rcu_head *node, void (*func)(struct rcu_head *node)) | ||
7033 | { | ||
7034 | node->func = func; | ||
7035 | enqueue(node); | ||
7036 | - atomic_inc(&rcu_call_count); | ||
7037 | + qatomic_inc(&rcu_call_count); | ||
7038 | qemu_event_set(&rcu_call_ready_event); | ||
7039 | } | ||
7040 | |||
7041 | diff --git a/util/stats64.c b/util/stats64.c | ||
7042 | index XXXXXXX..XXXXXXX 100644 | ||
7043 | --- a/util/stats64.c | ||
7044 | +++ b/util/stats64.c | ||
7045 | @@ -XXX,XX +XXX,XX @@ | ||
7046 | static inline void stat64_rdlock(Stat64 *s) | ||
7047 | { | ||
7048 | /* Keep out incoming writers to avoid them starving us. */ | ||
7049 | - atomic_add(&s->lock, 2); | ||
7050 | + qatomic_add(&s->lock, 2); | ||
7051 | |||
7052 | /* If there is a concurrent writer, wait for it. */ | ||
7053 | - while (atomic_read(&s->lock) & 1) { | ||
7054 | + while (qatomic_read(&s->lock) & 1) { | ||
7055 | cpu_relax(); | ||
7056 | } | ||
7057 | } | ||
7058 | |||
7059 | static inline void stat64_rdunlock(Stat64 *s) | ||
7060 | { | ||
7061 | - atomic_sub(&s->lock, 2); | ||
7062 | + qatomic_sub(&s->lock, 2); | ||
7063 | } | ||
7064 | |||
7065 | static inline bool stat64_wrtrylock(Stat64 *s) | ||
7066 | { | ||
7067 | - return atomic_cmpxchg(&s->lock, 0, 1) == 0; | ||
7068 | + return qatomic_cmpxchg(&s->lock, 0, 1) == 0; | ||
7069 | } | ||
7070 | |||
7071 | static inline void stat64_wrunlock(Stat64 *s) | ||
7072 | { | ||
7073 | - atomic_dec(&s->lock); | ||
7074 | + qatomic_dec(&s->lock); | ||
7075 | } | ||
7076 | |||
7077 | uint64_t stat64_get(const Stat64 *s) | ||
7078 | @@ -XXX,XX +XXX,XX @@ uint64_t stat64_get(const Stat64 *s) | ||
7079 | /* 64-bit writes always take the lock, so we can read in | ||
7080 | * any order. | ||
7081 | */ | ||
7082 | - high = atomic_read(&s->high); | ||
7083 | - low = atomic_read(&s->low); | ||
7084 | + high = qatomic_read(&s->high); | ||
7085 | + low = qatomic_read(&s->low); | ||
7086 | stat64_rdunlock((Stat64 *)s); | ||
7087 | |||
7088 | return ((uint64_t)high << 32) | low; | ||
7089 | @@ -XXX,XX +XXX,XX @@ bool stat64_add32_carry(Stat64 *s, uint32_t low, uint32_t high) | ||
7090 | * order of our update. By updating s->low first, we can check | ||
7091 | * whether we have to carry into s->high. | ||
7092 | */ | ||
7093 | - old = atomic_fetch_add(&s->low, low); | ||
7094 | + old = qatomic_fetch_add(&s->low, low); | ||
7095 | high += (old + low) < old; | ||
7096 | - atomic_add(&s->high, high); | ||
7097 | + qatomic_add(&s->high, high); | ||
7098 | stat64_wrunlock(s); | ||
7099 | return true; | ||
7100 | } | ||
7101 | @@ -XXX,XX +XXX,XX @@ bool stat64_min_slow(Stat64 *s, uint64_t value) | ||
7102 | return false; | ||
7103 | } | ||
7104 | |||
7105 | - high = atomic_read(&s->high); | ||
7106 | - low = atomic_read(&s->low); | ||
7107 | + high = qatomic_read(&s->high); | ||
7108 | + low = qatomic_read(&s->low); | ||
7109 | |||
7110 | orig = ((uint64_t)high << 32) | low; | ||
7111 | if (value < orig) { | ||
7112 | @@ -XXX,XX +XXX,XX @@ bool stat64_min_slow(Stat64 *s, uint64_t value) | ||
7113 | * effect on stat64_min is that the slow path may be triggered | ||
7114 | * unnecessarily. | ||
7115 | */ | ||
7116 | - atomic_set(&s->low, (uint32_t)value); | ||
7117 | + qatomic_set(&s->low, (uint32_t)value); | ||
7118 | smp_wmb(); | ||
7119 | - atomic_set(&s->high, value >> 32); | ||
7120 | + qatomic_set(&s->high, value >> 32); | ||
7121 | } | ||
7122 | stat64_wrunlock(s); | ||
7123 | return true; | ||
7124 | @@ -XXX,XX +XXX,XX @@ bool stat64_max_slow(Stat64 *s, uint64_t value) | ||
7125 | return false; | ||
7126 | } | ||
7127 | |||
7128 | - high = atomic_read(&s->high); | ||
7129 | - low = atomic_read(&s->low); | ||
7130 | + high = qatomic_read(&s->high); | ||
7131 | + low = qatomic_read(&s->low); | ||
7132 | |||
7133 | orig = ((uint64_t)high << 32) | low; | ||
7134 | if (value > orig) { | ||
7135 | @@ -XXX,XX +XXX,XX @@ bool stat64_max_slow(Stat64 *s, uint64_t value) | ||
7136 | * effect on stat64_max is that the slow path may be triggered | ||
7137 | * unnecessarily. | ||
7138 | */ | ||
7139 | - atomic_set(&s->low, (uint32_t)value); | ||
7140 | + qatomic_set(&s->low, (uint32_t)value); | ||
7141 | smp_wmb(); | ||
7142 | - atomic_set(&s->high, value >> 32); | ||
7143 | + qatomic_set(&s->high, value >> 32); | ||
7144 | } | ||
7145 | stat64_wrunlock(s); | ||
7146 | return true; | ||
7147 | diff --git a/docs/devel/atomics.rst b/docs/devel/atomics.rst | ||
7148 | index XXXXXXX..XXXXXXX 100644 | ||
7149 | --- a/docs/devel/atomics.rst | ||
7150 | +++ b/docs/devel/atomics.rst | ||
7151 | @@ -XXX,XX +XXX,XX @@ provides macros that fall in three camps: | ||
7152 | |||
7153 | - compiler barriers: ``barrier()``; | ||
7154 | |||
7155 | -- weak atomic access and manual memory barriers: ``atomic_read()``, | ||
7156 | - ``atomic_set()``, ``smp_rmb()``, ``smp_wmb()``, ``smp_mb()``, ``smp_mb_acquire()``, | ||
7157 | - ``smp_mb_release()``, ``smp_read_barrier_depends()``; | ||
7158 | +- weak atomic access and manual memory barriers: ``qatomic_read()``, | ||
7159 | + ``qatomic_set()``, ``smp_rmb()``, ``smp_wmb()``, ``smp_mb()``, | ||
7160 | + ``smp_mb_acquire()``, ``smp_mb_release()``, ``smp_read_barrier_depends()``; | ||
7161 | |||
7162 | - sequentially consistent atomic access: everything else. | ||
7163 | |||
7164 | @@ -XXX,XX +XXX,XX @@ in the order specified by its program". | ||
7165 | ``qemu/atomic.h`` provides the following set of atomic read-modify-write | ||
7166 | operations:: | ||
7167 | |||
7168 | - void atomic_inc(ptr) | ||
7169 | - void atomic_dec(ptr) | ||
7170 | - void atomic_add(ptr, val) | ||
7171 | - void atomic_sub(ptr, val) | ||
7172 | - void atomic_and(ptr, val) | ||
7173 | - void atomic_or(ptr, val) | ||
7174 | + void qatomic_inc(ptr) | ||
7175 | + void qatomic_dec(ptr) | ||
7176 | + void qatomic_add(ptr, val) | ||
7177 | + void qatomic_sub(ptr, val) | ||
7178 | + void qatomic_and(ptr, val) | ||
7179 | + void qatomic_or(ptr, val) | ||
7180 | |||
7181 | - typeof(*ptr) atomic_fetch_inc(ptr) | ||
7182 | - typeof(*ptr) atomic_fetch_dec(ptr) | ||
7183 | - typeof(*ptr) atomic_fetch_add(ptr, val) | ||
7184 | - typeof(*ptr) atomic_fetch_sub(ptr, val) | ||
7185 | - typeof(*ptr) atomic_fetch_and(ptr, val) | ||
7186 | - typeof(*ptr) atomic_fetch_or(ptr, val) | ||
7187 | - typeof(*ptr) atomic_fetch_xor(ptr, val) | ||
7188 | - typeof(*ptr) atomic_fetch_inc_nonzero(ptr) | ||
7189 | - typeof(*ptr) atomic_xchg(ptr, val) | ||
7190 | - typeof(*ptr) atomic_cmpxchg(ptr, old, new) | ||
7191 | + typeof(*ptr) qatomic_fetch_inc(ptr) | ||
7192 | + typeof(*ptr) qatomic_fetch_dec(ptr) | ||
7193 | + typeof(*ptr) qatomic_fetch_add(ptr, val) | ||
7194 | + typeof(*ptr) qatomic_fetch_sub(ptr, val) | ||
7195 | + typeof(*ptr) qatomic_fetch_and(ptr, val) | ||
7196 | + typeof(*ptr) qatomic_fetch_or(ptr, val) | ||
7197 | + typeof(*ptr) qatomic_fetch_xor(ptr, val) | ||
7198 | + typeof(*ptr) qatomic_fetch_inc_nonzero(ptr) | ||
7199 | + typeof(*ptr) qatomic_xchg(ptr, val) | ||
7200 | + typeof(*ptr) qatomic_cmpxchg(ptr, old, new) | ||
7201 | |||
7202 | all of which return the old value of ``*ptr``. These operations are | ||
7203 | polymorphic; they operate on any type that is as wide as a pointer or | ||
7204 | @@ -XXX,XX +XXX,XX @@ smaller. | ||
7205 | |||
7206 | Similar operations return the new value of ``*ptr``:: | ||
7207 | |||
7208 | - typeof(*ptr) atomic_inc_fetch(ptr) | ||
7209 | - typeof(*ptr) atomic_dec_fetch(ptr) | ||
7210 | - typeof(*ptr) atomic_add_fetch(ptr, val) | ||
7211 | - typeof(*ptr) atomic_sub_fetch(ptr, val) | ||
7212 | - typeof(*ptr) atomic_and_fetch(ptr, val) | ||
7213 | - typeof(*ptr) atomic_or_fetch(ptr, val) | ||
7214 | - typeof(*ptr) atomic_xor_fetch(ptr, val) | ||
7215 | + typeof(*ptr) qatomic_inc_fetch(ptr) | ||
7216 | + typeof(*ptr) qatomic_dec_fetch(ptr) | ||
7217 | + typeof(*ptr) qatomic_add_fetch(ptr, val) | ||
7218 | + typeof(*ptr) qatomic_sub_fetch(ptr, val) | ||
7219 | + typeof(*ptr) qatomic_and_fetch(ptr, val) | ||
7220 | + typeof(*ptr) qatomic_or_fetch(ptr, val) | ||
7221 | + typeof(*ptr) qatomic_xor_fetch(ptr, val) | ||
7222 | |||
7223 | ``qemu/atomic.h`` also provides loads and stores that cannot be reordered | ||
7224 | with each other:: | ||
7225 | |||
7226 | - typeof(*ptr) atomic_mb_read(ptr) | ||
7227 | - void atomic_mb_set(ptr, val) | ||
7228 | + typeof(*ptr) qatomic_mb_read(ptr) | ||
7229 | + void qatomic_mb_set(ptr, val) | ||
7230 | |||
7231 | However these do not provide sequential consistency and, in particular, | ||
7232 | they do not participate in the total ordering enforced by | ||
7233 | @@ -XXX,XX +XXX,XX @@ easiest to hardest): | ||
7234 | |||
7235 | - lightweight synchronization primitives such as ``QemuEvent`` | ||
7236 | |||
7237 | -- RCU operations (``atomic_rcu_read``, ``atomic_rcu_set``) when publishing | ||
7238 | +- RCU operations (``qatomic_rcu_read``, ``qatomic_rcu_set``) when publishing | ||
7239 | or accessing a new version of a data structure | ||
7240 | |||
7241 | -- other atomic accesses: ``atomic_read`` and ``atomic_load_acquire`` for | ||
7242 | - loads, ``atomic_set`` and ``atomic_store_release`` for stores, ``smp_mb`` | ||
7243 | +- other atomic accesses: ``qatomic_read`` and ``qatomic_load_acquire`` for | ||
7244 | + loads, ``qatomic_set`` and ``qatomic_store_release`` for stores, ``smp_mb`` | ||
7245 | to forbid reordering subsequent loads before a store. | ||
7246 | |||
7247 | |||
7248 | @@ -XXX,XX +XXX,XX @@ The only guarantees that you can rely upon in this case are: | ||
7249 | |||
7250 | When using this model, variables are accessed with: | ||
7251 | |||
7252 | -- ``atomic_read()`` and ``atomic_set()``; these prevent the compiler from | ||
7253 | +- ``qatomic_read()`` and ``qatomic_set()``; these prevent the compiler from | ||
7254 | optimizing accesses out of existence and creating unsolicited | ||
7255 | accesses, but do not otherwise impose any ordering on loads and | ||
7256 | stores: both the compiler and the processor are free to reorder | ||
7257 | them. | ||
7258 | |||
7259 | -- ``atomic_load_acquire()``, which guarantees the LOAD to appear to | ||
7260 | +- ``qatomic_load_acquire()``, which guarantees the LOAD to appear to | ||
7261 | happen, with respect to the other components of the system, | ||
7262 | before all the LOAD or STORE operations specified afterwards. | ||
7263 | - Operations coming before ``atomic_load_acquire()`` can still be | ||
7264 | + Operations coming before ``qatomic_load_acquire()`` can still be | ||
7265 | reordered after it. | ||
7266 | |||
7267 | -- ``atomic_store_release()``, which guarantees the STORE to appear to | ||
7268 | +- ``qatomic_store_release()``, which guarantees the STORE to appear to | ||
7269 | happen, with respect to the other components of the system, | ||
7270 | after all the LOAD or STORE operations specified before. | ||
7271 | - Operations coming after ``atomic_store_release()`` can still be | ||
7272 | + Operations coming after ``qatomic_store_release()`` can still be | ||
7273 | reordered before it. | ||
7274 | |||
7275 | Restrictions to the ordering of accesses can also be specified | ||
7276 | @@ -XXX,XX +XXX,XX @@ They come in six kinds: | ||
7277 | dependency and a full read barrier or better is required. | ||
7278 | |||
7279 | |||
7280 | -Memory barriers and ``atomic_load_acquire``/``atomic_store_release`` are | ||
7281 | +Memory barriers and ``qatomic_load_acquire``/``qatomic_store_release`` are | ||
7282 | mostly used when a data structure has one thread that is always a writer | ||
7283 | and one thread that is always a reader: | ||
7284 | |||
7285 | @@ -XXX,XX +XXX,XX @@ and one thread that is always a reader: | ||
7286 | +==================================+==================================+ | ||
7287 | | :: | :: | | ||
7288 | | | | | ||
7289 | - | atomic_store_release(&a, x); | y = atomic_load_acquire(&b); | | ||
7290 | - | atomic_store_release(&b, y); | x = atomic_load_acquire(&a); | | ||
7291 | + | qatomic_store_release(&a, x); | y = qatomic_load_acquire(&b); | | ||
7292 | + | qatomic_store_release(&b, y); | x = qatomic_load_acquire(&a); | | ||
7293 | +----------------------------------+----------------------------------+ | ||
7294 | |||
7295 | In this case, correctness is easy to check for using the "pairing" | ||
7296 | @@ -XXX,XX +XXX,XX @@ outside a loop. For example: | ||
7297 | | | | | ||
7298 | | n = 0; | n = 0; | | ||
7299 | | for (i = 0; i < 10; i++) | for (i = 0; i < 10; i++) | | ||
7300 | - | n += atomic_load_acquire(&a[i]); | n += atomic_read(&a[i]); | | ||
7301 | + | n += qatomic_load_acquire(&a[i]); | n += qatomic_read(&a[i]); | | ||
7302 | | | smp_mb_acquire(); | | ||
7303 | +------------------------------------------+----------------------------------+ | ||
7304 | | :: | :: | | ||
7305 | | | | | ||
7306 | | | smp_mb_release(); | | ||
7307 | | for (i = 0; i < 10; i++) | for (i = 0; i < 10; i++) | | ||
7308 | - | atomic_store_release(&a[i], false); | atomic_set(&a[i], false); | | ||
7309 | + | qatomic_store_release(&a[i], false); | qatomic_set(&a[i], false); | | ||
7310 | +------------------------------------------+----------------------------------+ | ||
7311 | |||
7312 | Splitting a loop can also be useful to reduce the number of barriers: | ||
7313 | @@ -XXX,XX +XXX,XX @@ Splitting a loop can also be useful to reduce the number of barriers: | ||
7314 | | | | | ||
7315 | | n = 0; | smp_mb_release(); | | ||
7316 | | for (i = 0; i < 10; i++) { | for (i = 0; i < 10; i++) | | ||
7317 | - | atomic_store_release(&a[i], false); | atomic_set(&a[i], false); | | ||
7318 | + | qatomic_store_release(&a[i], false); | qatomic_set(&a[i], false); | | ||
7319 | | smp_mb(); | smb_mb(); | | ||
7320 | - | n += atomic_read(&b[i]); | n = 0; | | ||
7321 | + | n += qatomic_read(&b[i]); | n = 0; | | ||
7322 | | } | for (i = 0; i < 10; i++) | | ||
7323 | - | | n += atomic_read(&b[i]); | | ||
7324 | + | | n += qatomic_read(&b[i]); | | ||
7325 | +------------------------------------------+----------------------------------+ | ||
7326 | |||
7327 | In this case, a ``smp_mb_release()`` is also replaced with a (possibly cheaper, and clearer | ||
7328 | @@ -XXX,XX +XXX,XX @@ as well) ``smp_wmb()``: | ||
7329 | | | | | ||
7330 | | | smp_mb_release(); | | ||
7331 | | for (i = 0; i < 10; i++) { | for (i = 0; i < 10; i++) | | ||
7332 | - | atomic_store_release(&a[i], false); | atomic_set(&a[i], false); | | ||
7333 | - | atomic_store_release(&b[i], false); | smb_wmb(); | | ||
7334 | + | qatomic_store_release(&a[i], false); | qatomic_set(&a[i], false); | | ||
7335 | + | qatomic_store_release(&b[i], false); | smb_wmb(); | | ||
7336 | | } | for (i = 0; i < 10; i++) | | ||
7337 | - | | atomic_set(&b[i], false); | | ||
7338 | + | | qatomic_set(&b[i], false); | | ||
7339 | +------------------------------------------+----------------------------------+ | ||
7340 | |||
7341 | |||
7342 | @@ -XXX,XX +XXX,XX @@ as well) ``smp_wmb()``: | ||
7343 | Acquire/release pairing and the *synchronizes-with* relation | ||
7344 | ------------------------------------------------------------ | ||
7345 | |||
7346 | -Atomic operations other than ``atomic_set()`` and ``atomic_read()`` have | ||
7347 | +Atomic operations other than ``qatomic_set()`` and ``qatomic_read()`` have | ||
7348 | either *acquire* or *release* semantics [#rmw]_. This has two effects: | ||
7349 | |||
7350 | .. [#rmw] Read-modify-write operations can have both---acquire applies to the | ||
7351 | @@ -XXX,XX +XXX,XX @@ thread 2 is relying on the *synchronizes-with* relation between ``pthread_exit`` | ||
7352 | |||
7353 | Synchronization between threads basically descends from this pairing of | ||
7354 | a release operation and an acquire operation. Therefore, atomic operations | ||
7355 | -other than ``atomic_set()`` and ``atomic_read()`` will almost always be | ||
7356 | +other than ``qatomic_set()`` and ``qatomic_read()`` will almost always be | ||
7357 | paired with another operation of the opposite kind: an acquire operation | ||
7358 | will pair with a release operation and vice versa. This rule of thumb is | ||
7359 | extremely useful; in the case of QEMU, however, note that the other | ||
7360 | operation may actually be in a driver that runs in the guest! | ||
7361 | |||
7362 | ``smp_read_barrier_depends()``, ``smp_rmb()``, ``smp_mb_acquire()``, | ||
7363 | -``atomic_load_acquire()`` and ``atomic_rcu_read()`` all count | ||
7364 | +``qatomic_load_acquire()`` and ``qatomic_rcu_read()`` all count | ||
7365 | as acquire operations. ``smp_wmb()``, ``smp_mb_release()``, | ||
7366 | -``atomic_store_release()`` and ``atomic_rcu_set()`` all count as release | ||
7367 | +``qatomic_store_release()`` and ``qatomic_rcu_set()`` all count as release | ||
7368 | operations. ``smp_mb()`` counts as both acquire and release, therefore | ||
7369 | it can pair with any other atomic operation. Here is an example: | ||
7370 | |||
7371 | @@ -XXX,XX +XXX,XX @@ it can pair with any other atomic operation. Here is an example: | ||
7372 | +======================+==============================+ | ||
7373 | | :: | :: | | ||
7374 | | | | | ||
7375 | - | atomic_set(&a, 1); | | | ||
7376 | + | qatomic_set(&a, 1);| | | ||
7377 | | smp_wmb(); | | | ||
7378 | - | atomic_set(&b, 2); | x = atomic_read(&b); | | ||
7379 | + | qatomic_set(&b, 2);| x = qatomic_read(&b); | | ||
7380 | | | smp_rmb(); | | ||
7381 | - | | y = atomic_read(&a); | | ||
7382 | + | | y = qatomic_read(&a); | | ||
7383 | +----------------------+------------------------------+ | ||
7384 | |||
7385 | Note that a load-store pair only counts if the two operations access the | ||
7386 | @@ -XXX,XX +XXX,XX @@ correct synchronization: | ||
7387 | +================================+================================+ | ||
7388 | | :: | :: | | ||
7389 | | | | | ||
7390 | - | atomic_set(&a, 1); | | | ||
7391 | - | atomic_store_release(&b, 2); | x = atomic_load_acquire(&b); | | ||
7392 | - | | y = atomic_read(&a); | | ||
7393 | + | qatomic_set(&a, 1); | | | ||
7394 | + | qatomic_store_release(&b, 2);| x = qatomic_load_acquire(&b);| | ||
7395 | + | | y = qatomic_read(&a); | | ||
7396 | +--------------------------------+--------------------------------+ | ||
7397 | |||
7398 | Acquire and release semantics of higher-level primitives can also be | ||
7399 | @@ -XXX,XX +XXX,XX @@ cannot be a data race: | ||
7400 | | smp_wmb(); | | | ||
7401 | | x->i = 2; | | | ||
7402 | | smp_wmb(); | | | ||
7403 | - | atomic_set(&a, x); | x = atomic_read(&a); | | ||
7404 | + | qatomic_set(&a, x);| x = qatomic_read(&a); | | ||
7405 | | | smp_read_barrier_depends(); | | ||
7406 | | | y = x->i; | | ||
7407 | | | smp_read_barrier_depends(); | | ||
7408 | @@ -XXX,XX +XXX,XX @@ and memory barriers, and the equivalents in QEMU: | ||
7409 | at all. Linux 4.1 updated them to implement volatile | ||
7410 | semantics via ``ACCESS_ONCE`` (or the more recent ``READ``/``WRITE_ONCE``). | ||
7411 | |||
7412 | - QEMU's ``atomic_read`` and ``atomic_set`` implement C11 atomic relaxed | ||
7413 | + QEMU's ``qatomic_read`` and ``qatomic_set`` implement C11 atomic relaxed | ||
7414 | semantics if the compiler supports it, and volatile semantics otherwise. | ||
7415 | Both semantics prevent the compiler from doing certain transformations; | ||
7416 | the difference is that atomic accesses are guaranteed to be atomic, | ||
7417 | @@ -XXX,XX +XXX,XX @@ and memory barriers, and the equivalents in QEMU: | ||
7418 | since we assume the variables passed are machine-word sized and | ||
7419 | properly aligned. | ||
7420 | |||
7421 | - No barriers are implied by ``atomic_read`` and ``atomic_set`` in either Linux | ||
7422 | - or QEMU. | ||
7423 | + No barriers are implied by ``qatomic_read`` and ``qatomic_set`` in either | ||
7424 | + Linux or QEMU. | ||
7425 | |||
7426 | - atomic read-modify-write operations in Linux are of three kinds: | ||
7427 | |||
7428 | @@ -XXX,XX +XXX,XX @@ and memory barriers, and the equivalents in QEMU: | ||
7429 | a different set of memory barriers; in QEMU, all of them enforce | ||
7430 | sequential consistency. | ||
7431 | |||
7432 | -- in QEMU, ``atomic_read()`` and ``atomic_set()`` do not participate in | ||
7433 | +- in QEMU, ``qatomic_read()`` and ``qatomic_set()`` do not participate in | ||
7434 | the total ordering enforced by sequentially-consistent operations. | ||
7435 | This is because QEMU uses the C11 memory model. The following example | ||
7436 | is correct in Linux but not in QEMU: | ||
7437 | @@ -XXX,XX +XXX,XX @@ and memory barriers, and the equivalents in QEMU: | ||
7438 | +==================================+================================+ | ||
7439 | | :: | :: | | ||
7440 | | | | | ||
7441 | - | a = atomic_fetch_add(&x, 2); | a = atomic_fetch_add(&x, 2); | | ||
7442 | - | b = READ_ONCE(&y); | b = atomic_read(&y); | | ||
7443 | + | a = atomic_fetch_add(&x, 2); | a = qatomic_fetch_add(&x, 2);| | ||
7444 | + | b = READ_ONCE(&y); | b = qatomic_read(&y); | | ||
7445 | +----------------------------------+--------------------------------+ | ||
7446 | |||
7447 | because the read of ``y`` can be moved (by either the processor or the | ||
7448 | @@ -XXX,XX +XXX,XX @@ and memory barriers, and the equivalents in QEMU: | ||
7449 | +================================+ | ||
7450 | | :: | | ||
7451 | | | | ||
7452 | - | a = atomic_read(&x); | | ||
7453 | - | atomic_set(&x, a + 2); | | ||
7454 | + | a = qatomic_read(&x); | | ||
7455 | + | qatomic_set(&x, a + 2); | | ||
7456 | | smp_mb(); | | ||
7457 | - | b = atomic_read(&y); | | ||
7458 | + | b = qatomic_read(&y); | | ||
7459 | +--------------------------------+ | ||
7460 | |||
7461 | Sources | ||
7462 | diff --git a/scripts/kernel-doc b/scripts/kernel-doc | ||
7463 | index XXXXXXX..XXXXXXX 100755 | ||
7464 | --- a/scripts/kernel-doc | ||
7465 | +++ b/scripts/kernel-doc | ||
7466 | @@ -XXX,XX +XXX,XX @@ sub dump_function($$) { | ||
7467 | # If you mess with these regexps, it's a good idea to check that | ||
7468 | # the following functions' documentation still comes out right: | ||
7469 | # - parport_register_device (function pointer parameters) | ||
7470 | - # - atomic_set (macro) | ||
7471 | + # - qatomic_set (macro) | ||
7472 | # - pci_match_device, __copy_to_user (long return type) | ||
7473 | |||
7474 | if ($define && $prototype =~ m/^()([a-zA-Z0-9_~:]+)\s+/) { | ||
7475 | diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc | ||
7476 | index XXXXXXX..XXXXXXX 100644 | ||
7477 | --- a/tcg/aarch64/tcg-target.c.inc | ||
7478 | +++ b/tcg/aarch64/tcg-target.c.inc | ||
7479 | @@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr, | ||
7480 | i2 = I3401_ADDI | rt << 31 | (addr & 0xfff) << 10 | rd << 5 | rd; | ||
7481 | } | ||
7482 | pair = (uint64_t)i2 << 32 | i1; | ||
7483 | - atomic_set((uint64_t *)jmp_addr, pair); | ||
7484 | + qatomic_set((uint64_t *)jmp_addr, pair); | ||
7485 | flush_icache_range(jmp_addr, jmp_addr + 8); | ||
7486 | } | ||
7487 | |||
7488 | diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc | ||
7489 | index XXXXXXX..XXXXXXX 100644 | ||
7490 | --- a/tcg/mips/tcg-target.c.inc | ||
7491 | +++ b/tcg/mips/tcg-target.c.inc | ||
7492 | @@ -XXX,XX +XXX,XX @@ static void tcg_target_init(TCGContext *s) | ||
7493 | void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr, | ||
7494 | uintptr_t addr) | ||
7495 | { | ||
7496 | - atomic_set((uint32_t *)jmp_addr, deposit32(OPC_J, 0, 26, addr >> 2)); | ||
7497 | + qatomic_set((uint32_t *)jmp_addr, deposit32(OPC_J, 0, 26, addr >> 2)); | ||
7498 | flush_icache_range(jmp_addr, jmp_addr + 4); | ||
7499 | } | ||
7500 | |||
7501 | diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc | ||
7502 | index XXXXXXX..XXXXXXX 100644 | ||
7503 | --- a/tcg/ppc/tcg-target.c.inc | ||
7504 | +++ b/tcg/ppc/tcg-target.c.inc | ||
7505 | @@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr, | ||
7506 | #endif | ||
7507 | |||
7508 | /* As per the enclosing if, this is ppc64. Avoid the _Static_assert | ||
7509 | - within atomic_set that would fail to build a ppc32 host. */ | ||
7510 | - atomic_set__nocheck((uint64_t *)jmp_addr, pair); | ||
7511 | + within qatomic_set that would fail to build a ppc32 host. */ | ||
7512 | + qatomic_set__nocheck((uint64_t *)jmp_addr, pair); | ||
7513 | flush_icache_range(jmp_addr, jmp_addr + 8); | ||
7514 | } else { | ||
7515 | intptr_t diff = addr - jmp_addr; | ||
7516 | tcg_debug_assert(in_range_b(diff)); | ||
7517 | - atomic_set((uint32_t *)jmp_addr, B | (diff & 0x3fffffc)); | ||
7518 | + qatomic_set((uint32_t *)jmp_addr, B | (diff & 0x3fffffc)); | ||
7519 | flush_icache_range(jmp_addr, jmp_addr + 4); | ||
7520 | } | ||
7521 | } | ||
7522 | diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc | ||
7523 | index XXXXXXX..XXXXXXX 100644 | ||
7524 | --- a/tcg/sparc/tcg-target.c.inc | ||
7525 | +++ b/tcg/sparc/tcg-target.c.inc | ||
7526 | @@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr, | ||
7527 | tcg_debug_assert(br_disp == (int32_t)br_disp); | ||
7528 | |||
7529 | if (!USE_REG_TB) { | ||
7530 | - atomic_set((uint32_t *)jmp_addr, deposit32(CALL, 0, 30, br_disp >> 2)); | ||
7531 | + qatomic_set((uint32_t *)jmp_addr, | ||
7532 | + deposit32(CALL, 0, 30, br_disp >> 2)); | ||
7533 | flush_icache_range(jmp_addr, jmp_addr + 4); | ||
7534 | return; | ||
7535 | } | ||
7536 | @@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr, | ||
7537 | | INSN_IMM13((tb_disp & 0x3ff) | -0x400)); | ||
7538 | } | ||
7539 | |||
7540 | - atomic_set((uint64_t *)jmp_addr, deposit64(i2, 32, 32, i1)); | ||
7541 | + qatomic_set((uint64_t *)jmp_addr, deposit64(i2, 32, 32, i1)); | ||
7542 | flush_icache_range(jmp_addr, jmp_addr + 8); | ||
7543 | } | ||
97 | -- | 7544 | -- |
98 | 2.24.1 | 7545 | 2.26.2 |
99 | 7546 | ||
100 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Aarushi Mehta <mehta.aaru20@gmail.com> | ||
2 | 1 | ||
3 | Signed-off-by: Aarushi Mehta <mehta.aaru20@gmail.com> | ||
4 | Reviewed-by: Maxim Levitsky <maximlevitsky@gmail.com> | ||
5 | Acked-by: Stefano Garzarella <sgarzare@redhat.com> | ||
6 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
7 | Message-id: 20200120141858.587874-2-stefanha@redhat.com | ||
8 | Message-Id: <20200120141858.587874-2-stefanha@redhat.com> | ||
9 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
10 | --- | ||
11 | configure | 27 +++++++++++++++++++++++++++ | ||
12 | 1 file changed, 27 insertions(+) | ||
13 | |||
14 | diff --git a/configure b/configure | ||
15 | index XXXXXXX..XXXXXXX 100755 | ||
16 | --- a/configure | ||
17 | +++ b/configure | ||
18 | @@ -XXX,XX +XXX,XX @@ xen="" | ||
19 | xen_ctrl_version="" | ||
20 | xen_pci_passthrough="" | ||
21 | linux_aio="" | ||
22 | +linux_io_uring="" | ||
23 | cap_ng="" | ||
24 | attr="" | ||
25 | libattr="" | ||
26 | @@ -XXX,XX +XXX,XX @@ for opt do | ||
27 | ;; | ||
28 | --enable-linux-aio) linux_aio="yes" | ||
29 | ;; | ||
30 | + --disable-linux-io-uring) linux_io_uring="no" | ||
31 | + ;; | ||
32 | + --enable-linux-io-uring) linux_io_uring="yes" | ||
33 | + ;; | ||
34 | --disable-attr) attr="no" | ||
35 | ;; | ||
36 | --enable-attr) attr="yes" | ||
37 | @@ -XXX,XX +XXX,XX @@ disabled with --disable-FEATURE, default is enabled if available: | ||
38 | vde support for vde network | ||
39 | netmap support for netmap network | ||
40 | linux-aio Linux AIO support | ||
41 | + linux-io-uring Linux io_uring support | ||
42 | cap-ng libcap-ng support | ||
43 | attr attr and xattr support | ||
44 | vhost-net vhost-net kernel acceleration support | ||
45 | @@ -XXX,XX +XXX,XX @@ EOF | ||
46 | linux_aio=no | ||
47 | fi | ||
48 | fi | ||
49 | +########################################## | ||
50 | +# linux-io-uring probe | ||
51 | + | ||
52 | +if test "$linux_io_uring" != "no" ; then | ||
53 | + if $pkg_config liburing; then | ||
54 | + linux_io_uring_cflags=$($pkg_config --cflags liburing) | ||
55 | + linux_io_uring_libs=$($pkg_config --libs liburing) | ||
56 | + linux_io_uring=yes | ||
57 | + else | ||
58 | + if test "$linux_io_uring" = "yes" ; then | ||
59 | + feature_not_found "linux io_uring" "Install liburing devel" | ||
60 | + fi | ||
61 | + linux_io_uring=no | ||
62 | + fi | ||
63 | +fi | ||
64 | |||
65 | ########################################## | ||
66 | # TPM emulation is only on POSIX | ||
67 | @@ -XXX,XX +XXX,XX @@ echo "PIE $pie" | ||
68 | echo "vde support $vde" | ||
69 | echo "netmap support $netmap" | ||
70 | echo "Linux AIO support $linux_aio" | ||
71 | +echo "Linux io_uring support $linux_io_uring" | ||
72 | echo "ATTR/XATTR support $attr" | ||
73 | echo "Install blobs $blobs" | ||
74 | echo "KVM support $kvm" | ||
75 | @@ -XXX,XX +XXX,XX @@ fi | ||
76 | if test "$linux_aio" = "yes" ; then | ||
77 | echo "CONFIG_LINUX_AIO=y" >> $config_host_mak | ||
78 | fi | ||
79 | +if test "$linux_io_uring" = "yes" ; then | ||
80 | + echo "CONFIG_LINUX_IO_URING=y" >> $config_host_mak | ||
81 | + echo "LINUX_IO_URING_CFLAGS=$linux_io_uring_cflags" >> $config_host_mak | ||
82 | + echo "LINUX_IO_URING_LIBS=$linux_io_uring_libs" >> $config_host_mak | ||
83 | +fi | ||
84 | if test "$attr" = "yes" ; then | ||
85 | echo "CONFIG_ATTR=y" >> $config_host_mak | ||
86 | fi | ||
87 | -- | ||
88 | 2.24.1 | ||
89 | |||
90 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Aarushi Mehta <mehta.aaru20@gmail.com> | ||
2 | 1 | ||
3 | Signed-off-by: Aarushi Mehta <mehta.aaru20@gmail.com> | ||
4 | Acked-by: Stefano Garzarella <sgarzare@redhat.com> | ||
5 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
6 | Message-id: 20200120141858.587874-8-stefanha@redhat.com | ||
7 | Message-Id: <20200120141858.587874-8-stefanha@redhat.com> | ||
8 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
9 | --- | ||
10 | block.c | 22 ++++++++++++++++++++++ | ||
11 | blockdev.c | 12 ++++-------- | ||
12 | include/block/block.h | 1 + | ||
13 | 3 files changed, 27 insertions(+), 8 deletions(-) | ||
14 | |||
15 | diff --git a/block.c b/block.c | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/block.c | ||
18 | +++ b/block.c | ||
19 | @@ -XXX,XX +XXX,XX @@ static BlockdevDetectZeroesOptions bdrv_parse_detect_zeroes(QemuOpts *opts, | ||
20 | return detect_zeroes; | ||
21 | } | ||
22 | |||
23 | +/** | ||
24 | + * Set open flags for aio engine | ||
25 | + * | ||
26 | + * Return 0 on success, -1 if the engine specified is invalid | ||
27 | + */ | ||
28 | +int bdrv_parse_aio(const char *mode, int *flags) | ||
29 | +{ | ||
30 | + if (!strcmp(mode, "threads")) { | ||
31 | + /* do nothing, default */ | ||
32 | + } else if (!strcmp(mode, "native")) { | ||
33 | + *flags |= BDRV_O_NATIVE_AIO; | ||
34 | +#ifdef CONFIG_LINUX_IO_URING | ||
35 | + } else if (!strcmp(mode, "io_uring")) { | ||
36 | + *flags |= BDRV_O_IO_URING; | ||
37 | +#endif | ||
38 | + } else { | ||
39 | + return -1; | ||
40 | + } | ||
41 | + | ||
42 | + return 0; | ||
43 | +} | ||
44 | + | ||
45 | /** | ||
46 | * Set open flags for a given discard mode | ||
47 | * | ||
48 | diff --git a/blockdev.c b/blockdev.c | ||
49 | index XXXXXXX..XXXXXXX 100644 | ||
50 | --- a/blockdev.c | ||
51 | +++ b/blockdev.c | ||
52 | @@ -XXX,XX +XXX,XX @@ static void extract_common_blockdev_options(QemuOpts *opts, int *bdrv_flags, | ||
53 | } | ||
54 | |||
55 | if ((aio = qemu_opt_get(opts, "aio")) != NULL) { | ||
56 | - if (!strcmp(aio, "native")) { | ||
57 | - *bdrv_flags |= BDRV_O_NATIVE_AIO; | ||
58 | - } else if (!strcmp(aio, "threads")) { | ||
59 | - /* this is the default */ | ||
60 | - } else { | ||
61 | - error_setg(errp, "invalid aio option"); | ||
62 | - return; | ||
63 | + if (bdrv_parse_aio(aio, bdrv_flags) < 0) { | ||
64 | + error_setg(errp, "invalid aio option"); | ||
65 | + return; | ||
66 | } | ||
67 | } | ||
68 | } | ||
69 | @@ -XXX,XX +XXX,XX @@ QemuOptsList qemu_common_drive_opts = { | ||
70 | },{ | ||
71 | .name = "aio", | ||
72 | .type = QEMU_OPT_STRING, | ||
73 | - .help = "host AIO implementation (threads, native)", | ||
74 | + .help = "host AIO implementation (threads, native, io_uring)", | ||
75 | },{ | ||
76 | .name = BDRV_OPT_CACHE_WB, | ||
77 | .type = QEMU_OPT_BOOL, | ||
78 | diff --git a/include/block/block.h b/include/block/block.h | ||
79 | index XXXXXXX..XXXXXXX 100644 | ||
80 | --- a/include/block/block.h | ||
81 | +++ b/include/block/block.h | ||
82 | @@ -XXX,XX +XXX,XX @@ void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top, | ||
83 | void bdrv_replace_node(BlockDriverState *from, BlockDriverState *to, | ||
84 | Error **errp); | ||
85 | |||
86 | +int bdrv_parse_aio(const char *mode, int *flags); | ||
87 | int bdrv_parse_cache_mode(const char *mode, int *flags, bool *writethrough); | ||
88 | int bdrv_parse_discard_flags(const char *mode, int *flags); | ||
89 | BdrvChild *bdrv_open_child(const char *filename, | ||
90 | -- | ||
91 | 2.24.1 | ||
92 | |||
93 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Aarushi Mehta <mehta.aaru20@gmail.com> | ||
2 | 1 | ||
3 | Signed-off-by: Aarushi Mehta <mehta.aaru20@gmail.com> | ||
4 | Acked-by: Stefano Garzarella <sgarzare@redhat.com> | ||
5 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
6 | Message-id: 20200120141858.587874-10-stefanha@redhat.com | ||
7 | Message-Id: <20200120141858.587874-10-stefanha@redhat.com> | ||
8 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
9 | --- | ||
10 | block/io_uring.c | 23 ++++++++++++++++++++--- | ||
11 | block/trace-events | 12 ++++++++++++ | ||
12 | 2 files changed, 32 insertions(+), 3 deletions(-) | ||
13 | |||
14 | diff --git a/block/io_uring.c b/block/io_uring.c | ||
15 | index XXXXXXX..XXXXXXX 100644 | ||
16 | --- a/block/io_uring.c | ||
17 | +++ b/block/io_uring.c | ||
18 | @@ -XXX,XX +XXX,XX @@ | ||
19 | #include "block/raw-aio.h" | ||
20 | #include "qemu/coroutine.h" | ||
21 | #include "qapi/error.h" | ||
22 | +#include "trace.h" | ||
23 | |||
24 | /* io_uring ring size */ | ||
25 | #define MAX_ENTRIES 128 | ||
26 | @@ -XXX,XX +XXX,XX @@ static void luring_resubmit_short_read(LuringState *s, LuringAIOCB *luringcb, | ||
27 | QEMUIOVector *resubmit_qiov; | ||
28 | size_t remaining; | ||
29 | |||
30 | + trace_luring_resubmit_short_read(s, luringcb, nread); | ||
31 | + | ||
32 | /* Update read position */ | ||
33 | luringcb->total_read = nread; | ||
34 | remaining = luringcb->qiov->size - luringcb->total_read; | ||
35 | @@ -XXX,XX +XXX,XX @@ static void luring_process_completions(LuringState *s) | ||
36 | |||
37 | /* Change counters one-by-one because we can be nested. */ | ||
38 | s->io_q.in_flight--; | ||
39 | + trace_luring_process_completion(s, luringcb, ret); | ||
40 | |||
41 | /* total_read is non-zero only for resubmitted read requests */ | ||
42 | total_bytes = ret + luringcb->total_read; | ||
43 | @@ -XXX,XX +XXX,XX @@ static int ioq_submit(LuringState *s) | ||
44 | QSIMPLEQ_REMOVE_HEAD(&s->io_q.submit_queue, next); | ||
45 | } | ||
46 | ret = io_uring_submit(&s->ring); | ||
47 | + trace_luring_io_uring_submit(s, ret); | ||
48 | /* Prevent infinite loop if submission is refused */ | ||
49 | if (ret <= 0) { | ||
50 | if (ret == -EAGAIN) { | ||
51 | @@ -XXX,XX +XXX,XX @@ static void ioq_init(LuringQueue *io_q) | ||
52 | |||
53 | void luring_io_plug(BlockDriverState *bs, LuringState *s) | ||
54 | { | ||
55 | + trace_luring_io_plug(s); | ||
56 | s->io_q.plugged++; | ||
57 | } | ||
58 | |||
59 | void luring_io_unplug(BlockDriverState *bs, LuringState *s) | ||
60 | { | ||
61 | assert(s->io_q.plugged); | ||
62 | + trace_luring_io_unplug(s, s->io_q.blocked, s->io_q.plugged, | ||
63 | + s->io_q.in_queue, s->io_q.in_flight); | ||
64 | if (--s->io_q.plugged == 0 && | ||
65 | !s->io_q.blocked && s->io_q.in_queue > 0) { | ||
66 | ioq_submit(s); | ||
67 | @@ -XXX,XX +XXX,XX @@ void luring_io_unplug(BlockDriverState *bs, LuringState *s) | ||
68 | static int luring_do_submit(int fd, LuringAIOCB *luringcb, LuringState *s, | ||
69 | uint64_t offset, int type) | ||
70 | { | ||
71 | + int ret; | ||
72 | struct io_uring_sqe *sqes = &luringcb->sqeq; | ||
73 | |||
74 | switch (type) { | ||
75 | @@ -XXX,XX +XXX,XX @@ static int luring_do_submit(int fd, LuringAIOCB *luringcb, LuringState *s, | ||
76 | |||
77 | QSIMPLEQ_INSERT_TAIL(&s->io_q.submit_queue, luringcb, next); | ||
78 | s->io_q.in_queue++; | ||
79 | - | ||
80 | + trace_luring_do_submit(s, s->io_q.blocked, s->io_q.plugged, | ||
81 | + s->io_q.in_queue, s->io_q.in_flight); | ||
82 | if (!s->io_q.blocked && | ||
83 | (!s->io_q.plugged || | ||
84 | s->io_q.in_flight + s->io_q.in_queue >= MAX_ENTRIES)) { | ||
85 | - return ioq_submit(s); | ||
86 | + ret = ioq_submit(s); | ||
87 | + trace_luring_do_submit_done(s, ret); | ||
88 | + return ret; | ||
89 | } | ||
90 | return 0; | ||
91 | } | ||
92 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn luring_co_submit(BlockDriverState *bs, LuringState *s, int fd, | ||
93 | .qiov = qiov, | ||
94 | .is_read = (type == QEMU_AIO_READ), | ||
95 | }; | ||
96 | - | ||
97 | + trace_luring_co_submit(bs, s, &luringcb, fd, offset, qiov ? qiov->size : 0, | ||
98 | + type); | ||
99 | ret = luring_do_submit(fd, &luringcb, s, offset, type); | ||
100 | + | ||
101 | if (ret < 0) { | ||
102 | return ret; | ||
103 | } | ||
104 | @@ -XXX,XX +XXX,XX @@ LuringState *luring_init(Error **errp) | ||
105 | LuringState *s = g_new0(LuringState, 1); | ||
106 | struct io_uring *ring = &s->ring; | ||
107 | |||
108 | + trace_luring_init_state(s, sizeof(*s)); | ||
109 | + | ||
110 | rc = io_uring_queue_init(MAX_ENTRIES, ring, 0); | ||
111 | if (rc < 0) { | ||
112 | error_setg_errno(errp, errno, "failed to init linux io_uring ring"); | ||
113 | @@ -XXX,XX +XXX,XX @@ void luring_cleanup(LuringState *s) | ||
114 | { | ||
115 | io_uring_queue_exit(&s->ring); | ||
116 | g_free(s); | ||
117 | + trace_luring_cleanup_state(s); | ||
118 | } | ||
119 | diff --git a/block/trace-events b/block/trace-events | ||
120 | index XXXXXXX..XXXXXXX 100644 | ||
121 | --- a/block/trace-events | ||
122 | +++ b/block/trace-events | ||
123 | @@ -XXX,XX +XXX,XX @@ qmp_block_stream(void *bs) "bs %p" | ||
124 | file_paio_submit(void *acb, void *opaque, int64_t offset, int count, int type) "acb %p opaque %p offset %"PRId64" count %d type %d" | ||
125 | file_copy_file_range(void *bs, int src, int64_t src_off, int dst, int64_t dst_off, int64_t bytes, int flags, int64_t ret) "bs %p src_fd %d offset %"PRIu64" dst_fd %d offset %"PRIu64" bytes %"PRIu64" flags %d ret %"PRId64 | ||
126 | |||
127 | +#io_uring.c | ||
128 | +luring_init_state(void *s, size_t size) "s %p size %zu" | ||
129 | +luring_cleanup_state(void *s) "%p freed" | ||
130 | +luring_io_plug(void *s) "LuringState %p plug" | ||
131 | +luring_io_unplug(void *s, int blocked, int plugged, int queued, int inflight) "LuringState %p blocked %d plugged %d queued %d inflight %d" | ||
132 | +luring_do_submit(void *s, int blocked, int plugged, int queued, int inflight) "LuringState %p blocked %d plugged %d queued %d inflight %d" | ||
133 | +luring_do_submit_done(void *s, int ret) "LuringState %p submitted to kernel %d" | ||
134 | +luring_co_submit(void *bs, void *s, void *luringcb, int fd, uint64_t offset, size_t nbytes, int type) "bs %p s %p luringcb %p fd %d offset %" PRId64 " nbytes %zd type %d" | ||
135 | +luring_process_completion(void *s, void *aiocb, int ret) "LuringState %p luringcb %p ret %d" | ||
136 | +luring_io_uring_submit(void *s, int ret) "LuringState %p ret %d" | ||
137 | +luring_resubmit_short_read(void *s, void *luringcb, int nread) "LuringState %p luringcb %p nread %d" | ||
138 | + | ||
139 | # qcow2.c | ||
140 | qcow2_add_task(void *co, void *bs, void *pool, const char *action, int cluster_type, uint64_t file_cluster_offset, uint64_t offset, uint64_t bytes, void *qiov, size_t qiov_offset) "co %p bs %p pool %p: %s: cluster_type %d file_cluster_offset %" PRIu64 " offset %" PRIu64 " bytes %" PRIu64 " qiov %p qiov_offset %zu" | ||
141 | qcow2_writev_start_req(void *co, int64_t offset, int bytes) "co %p offset 0x%" PRIx64 " bytes %d" | ||
142 | -- | ||
143 | 2.24.1 | ||
144 | |||
145 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Aarushi Mehta <mehta.aaru20@gmail.com> | ||
2 | 1 | ||
3 | Signed-off-by: Aarushi Mehta <mehta.aaru20@gmail.com> | ||
4 | Acked-by: Stefano Garzarella <sgarzare@redhat.com> | ||
5 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
6 | Message-id: 20200120141858.587874-11-stefanha@redhat.com | ||
7 | Message-Id: <20200120141858.587874-11-stefanha@redhat.com> | ||
8 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
9 | --- | ||
10 | block/io_uring.c | 17 ++++++++++++++++- | ||
11 | 1 file changed, 16 insertions(+), 1 deletion(-) | ||
12 | |||
13 | diff --git a/block/io_uring.c b/block/io_uring.c | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/block/io_uring.c | ||
16 | +++ b/block/io_uring.c | ||
17 | @@ -XXX,XX +XXX,XX @@ static void qemu_luring_completion_cb(void *opaque) | ||
18 | luring_process_completions_and_submit(s); | ||
19 | } | ||
20 | |||
21 | +static bool qemu_luring_poll_cb(void *opaque) | ||
22 | +{ | ||
23 | + LuringState *s = opaque; | ||
24 | + struct io_uring_cqe *cqes; | ||
25 | + | ||
26 | + if (io_uring_peek_cqe(&s->ring, &cqes) == 0) { | ||
27 | + if (cqes) { | ||
28 | + luring_process_completions_and_submit(s); | ||
29 | + return true; | ||
30 | + } | ||
31 | + } | ||
32 | + | ||
33 | + return false; | ||
34 | +} | ||
35 | + | ||
36 | static void ioq_init(LuringQueue *io_q) | ||
37 | { | ||
38 | QSIMPLEQ_INIT(&io_q->submit_queue); | ||
39 | @@ -XXX,XX +XXX,XX @@ void luring_attach_aio_context(LuringState *s, AioContext *new_context) | ||
40 | s->aio_context = new_context; | ||
41 | s->completion_bh = aio_bh_new(new_context, qemu_luring_completion_bh, s); | ||
42 | aio_set_fd_handler(s->aio_context, s->ring.ring_fd, false, | ||
43 | - qemu_luring_completion_cb, NULL, NULL, s); | ||
44 | + qemu_luring_completion_cb, NULL, qemu_luring_poll_cb, s); | ||
45 | } | ||
46 | |||
47 | LuringState *luring_init(Error **errp) | ||
48 | -- | ||
49 | 2.24.1 | ||
50 | |||
51 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | From: Aarushi Mehta <mehta.aaru20@gmail.com> | ||
2 | 1 | ||
3 | Signed-off-by: Aarushi Mehta <mehta.aaru20@gmail.com> | ||
4 | Acked-by: Stefano Garzarella <sgarzare@redhat.com> | ||
5 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
6 | Message-id: 20200120141858.587874-12-stefanha@redhat.com | ||
7 | Message-Id: <20200120141858.587874-12-stefanha@redhat.com> | ||
8 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
9 | --- | ||
10 | qemu-io.c | 25 +++++++++++++++++++++---- | ||
11 | 1 file changed, 21 insertions(+), 4 deletions(-) | ||
12 | |||
13 | diff --git a/qemu-io.c b/qemu-io.c | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/qemu-io.c | ||
16 | +++ b/qemu-io.c | ||
17 | @@ -XXX,XX +XXX,XX @@ static void open_help(void) | ||
18 | " -C, -- use copy-on-read\n" | ||
19 | " -n, -- disable host cache, short for -t none\n" | ||
20 | " -U, -- force shared permissions\n" | ||
21 | -" -k, -- use kernel AIO implementation (on Linux only)\n" | ||
22 | +" -k, -- use kernel AIO implementation (Linux only, prefer use of -i)\n" | ||
23 | +" -i, -- use AIO mode (threads, native or io_uring)\n" | ||
24 | " -t, -- use the given cache mode for the image\n" | ||
25 | " -d, -- use the given discard mode for the image\n" | ||
26 | " -o, -- options to be given to the block driver" | ||
27 | @@ -XXX,XX +XXX,XX @@ static int open_f(BlockBackend *blk, int argc, char **argv) | ||
28 | QDict *opts; | ||
29 | bool force_share = false; | ||
30 | |||
31 | - while ((c = getopt(argc, argv, "snCro:kt:d:U")) != -1) { | ||
32 | + while ((c = getopt(argc, argv, "snCro:ki:t:d:U")) != -1) { | ||
33 | switch (c) { | ||
34 | case 's': | ||
35 | flags |= BDRV_O_SNAPSHOT; | ||
36 | @@ -XXX,XX +XXX,XX @@ static int open_f(BlockBackend *blk, int argc, char **argv) | ||
37 | return -EINVAL; | ||
38 | } | ||
39 | break; | ||
40 | + case 'i': | ||
41 | + if (bdrv_parse_aio(optarg, &flags) < 0) { | ||
42 | + error_report("Invalid aio option: %s", optarg); | ||
43 | + qemu_opts_reset(&empty_opts); | ||
44 | + return -EINVAL; | ||
45 | + } | ||
46 | + break; | ||
47 | case 'o': | ||
48 | if (imageOpts) { | ||
49 | printf("--image-opts and 'open -o' are mutually exclusive\n"); | ||
50 | @@ -XXX,XX +XXX,XX @@ static void usage(const char *name) | ||
51 | " -n, --nocache disable host cache, short for -t none\n" | ||
52 | " -C, --copy-on-read enable copy-on-read\n" | ||
53 | " -m, --misalign misalign allocations for O_DIRECT\n" | ||
54 | -" -k, --native-aio use kernel AIO implementation (on Linux only)\n" | ||
55 | +" -k, --native-aio use kernel AIO implementation\n" | ||
56 | +" (Linux only, prefer use of -i)\n" | ||
57 | +" -i, --aio=MODE use AIO mode (threads, native or io_uring)\n" | ||
58 | " -t, --cache=MODE use the given cache mode for the image\n" | ||
59 | " -d, --discard=MODE use the given discard mode for the image\n" | ||
60 | " -T, --trace [[enable=]<pattern>][,events=<file>][,file=<file>]\n" | ||
61 | @@ -XXX,XX +XXX,XX @@ static QemuOptsList file_opts = { | ||
62 | int main(int argc, char **argv) | ||
63 | { | ||
64 | int readonly = 0; | ||
65 | - const char *sopt = "hVc:d:f:rsnCmkt:T:U"; | ||
66 | + const char *sopt = "hVc:d:f:rsnCmki:t:T:U"; | ||
67 | const struct option lopt[] = { | ||
68 | { "help", no_argument, NULL, 'h' }, | ||
69 | { "version", no_argument, NULL, 'V' }, | ||
70 | @@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv) | ||
71 | { "copy-on-read", no_argument, NULL, 'C' }, | ||
72 | { "misalign", no_argument, NULL, 'm' }, | ||
73 | { "native-aio", no_argument, NULL, 'k' }, | ||
74 | + { "aio", required_argument, NULL, 'i' }, | ||
75 | { "discard", required_argument, NULL, 'd' }, | ||
76 | { "cache", required_argument, NULL, 't' }, | ||
77 | { "trace", required_argument, NULL, 'T' }, | ||
78 | @@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv) | ||
79 | case 'k': | ||
80 | flags |= BDRV_O_NATIVE_AIO; | ||
81 | break; | ||
82 | + case 'i': | ||
83 | + if (bdrv_parse_aio(optarg, &flags) < 0) { | ||
84 | + error_report("Invalid aio option: %s", optarg); | ||
85 | + exit(1); | ||
86 | + } | ||
87 | + break; | ||
88 | case 't': | ||
89 | if (bdrv_parse_cache_mode(optarg, &flags, &writethrough) < 0) { | ||
90 | error_report("Invalid cache option: %s", optarg); | ||
91 | -- | ||
92 | 2.24.1 | ||
93 | |||
94 | diff view generated by jsdifflib |