1
The following changes since commit 27c94566379069fb8930bb1433dcffbf7df3203d:
1
The following changes since commit 66234fee9c2d37bfbc523aa8d0ae5300a14cc10e:
2
2
3
Merge remote-tracking branch 'remotes/edgar/tags/edgar/xilinx-next-2020-04-30.for-upstream' into staging (2020-04-30 16:47:23 +0100)
3
Merge remote-tracking branch 'remotes/alistair/tags/pull-riscv-to-apply-202=
4
00603' into staging (2020-06-04 11:38:48 +0100)
4
5
5
are available in the Git repository at:
6
are available in the Git repository at:
6
7
7
https://github.com/stefanha/qemu.git tags/block-pull-request
8
https://github.com/stefanha/qemu.git tags/block-pull-request
8
9
9
for you to fetch changes up to cc1adc4488059ac16d4d2772a7aa7cd1323deeca:
10
for you to fetch changes up to 7d2410cea154bf915fb30179ebda3b17ac36e70e:
10
11
11
lockable: Replace locks with lock guard macros (2020-05-01 09:19:25 +0100)
12
block: Factor out bdrv_run_co() (2020-06-05 09:54:48 +0100)
12
13
13
----------------------------------------------------------------
14
----------------------------------------------------------------
14
Pull request
15
Pull request
15
16
16
Fix the QEMU_LOCK_GUARD() macros, use them more widely, and allow the fuzzer
17
target to be selected from argv[0].
18
19
----------------------------------------------------------------
17
----------------------------------------------------------------
20
18
21
Alexander Bulekov (1):
19
Alexander Bulekov (4):
22
fuzz: select fuzz target using executable name
20
fuzz: add datadir for oss-fuzz compatability
21
fuzz: fix typo in i440fx-qtest-reboot arguments
22
fuzz: add mangled object name to linker script
23
fuzz: run the main-loop in fork-server process
23
24
24
Daniel Brodsky (2):
25
Philippe Mathieu-Daud=C3=A9 (4):
25
lockable: fix __COUNTER__ macro to be referenced properly
26
memory: Rename memory_region_do_writeback -> memory_region_writeback
26
lockable: replaced locks with lock guard macros where appropriate
27
memory: Extract memory_region_msync() from memory_region_writeback()
28
hw/block: Let the NVMe emulated device be target-agnostic
29
exec: Rename qemu_ram_writeback() as qemu_ram_msync()
27
30
28
Simran Singhal (1):
31
Stefano Garzarella (2):
29
lockable: Replace locks with lock guard macros
32
io_uring: retry io_uring_submit() if it fails with errno=3DEINTR
33
io_uring: use io_uring_cq_ready() to check for ready cqes
30
34
31
include/qemu/lockable.h | 7 +++---
35
Vladimir Sementsov-Ogievskiy (1):
32
include/qemu/rcu.h | 2 +-
36
block: Factor out bdrv_run_co()
33
block/iscsi.c | 7 ++----
34
block/nfs.c | 51 +++++++++++++++++++----------------------
35
cpus-common.c | 14 ++++-------
36
hw/display/qxl.c | 43 ++++++++++++++++------------------
37
hw/hyperv/hyperv.c | 15 ++++++------
38
hw/rdma/rdma_backend.c | 50 ++++++++++++++++++++--------------------
39
hw/rdma/rdma_rm.c | 3 +--
40
hw/vfio/platform.c | 5 ++--
41
migration/migration.c | 3 +--
42
migration/multifd.c | 8 +++----
43
migration/ram.c | 3 +--
44
monitor/misc.c | 4 +---
45
tests/qtest/fuzz/fuzz.c | 19 ++++++++-------
46
ui/spice-display.c | 14 +++++------
47
util/log.c | 4 ++--
48
util/qemu-timer.c | 17 +++++++-------
49
util/rcu.c | 8 +++----
50
util/thread-pool.c | 3 +--
51
util/vfio-helpers.c | 5 ++--
52
slirp | 2 +-
53
22 files changed, 133 insertions(+), 154 deletions(-)
54
37
55
--
38
hw/block/Makefile.objs | 2 +-
56
2.25.3
39
include/exec/memory.h | 15 ++-
40
include/exec/ram_addr.h | 4 +-
41
include/sysemu/sysemu.h | 2 +
42
block/io.c | 193 +++++++++++-----------------
43
block/io_uring.c | 11 +-
44
exec.c | 2 +-
45
hw/block/nvme.c | 6 +-
46
memory.c | 12 +-
47
softmmu/vl.c | 2 +-
48
target/arm/helper.c | 2 +-
49
tests/qtest/fuzz/fuzz.c | 15 +++
50
tests/qtest/fuzz/i440fx_fuzz.c | 3 +-
51
tests/qtest/fuzz/virtio_net_fuzz.c | 2 +
52
tests/qtest/fuzz/virtio_scsi_fuzz.c | 2 +
53
tests/qtest/fuzz/fork_fuzz.ld | 5 +
54
16 files changed, 134 insertions(+), 144 deletions(-)
57
55
56
--=20
57
2.25.4
58
diff view generated by jsdifflib
New patch
1
From: Stefano Garzarella <sgarzare@redhat.com>
1
2
3
As recently documented [1], io_uring_enter(2) syscall can return an
4
error (errno=EINTR) if the operation was interrupted by a delivery
5
of a signal before it could complete.
6
7
This should happen when IORING_ENTER_GETEVENTS flag is used, for
8
example during io_uring_submit_and_wait() or during io_uring_submit()
9
when IORING_SETUP_IOPOLL is enabled.
10
11
We shouldn't have this problem for now, but it's better to prevent it.
12
13
[1] https://github.com/axboe/liburing/commit/344355ec6619de8f4e64584c9736530b5346e4f4
14
15
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
16
Message-id: 20200519133041.112138-1-sgarzare@redhat.com
17
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
18
---
19
block/io_uring.c | 2 +-
20
1 file changed, 1 insertion(+), 1 deletion(-)
21
22
diff --git a/block/io_uring.c b/block/io_uring.c
23
index XXXXXXX..XXXXXXX 100644
24
--- a/block/io_uring.c
25
+++ b/block/io_uring.c
26
@@ -XXX,XX +XXX,XX @@ static int ioq_submit(LuringState *s)
27
trace_luring_io_uring_submit(s, ret);
28
/* Prevent infinite loop if submission is refused */
29
if (ret <= 0) {
30
- if (ret == -EAGAIN) {
31
+ if (ret == -EAGAIN || ret == -EINTR) {
32
continue;
33
}
34
break;
35
--
36
2.25.4
37
diff view generated by jsdifflib
New patch
1
From: Stefano Garzarella <sgarzare@redhat.com>
1
2
3
In qemu_luring_poll_cb() we are not using the cqe peeked from the
4
CQ ring. We are using io_uring_peek_cqe() only to see if there
5
are cqes ready, so we can replace it with io_uring_cq_ready().
6
7
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
8
Message-id: 20200519134942.118178-1-sgarzare@redhat.com
9
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
10
---
11
block/io_uring.c | 9 +++------
12
1 file changed, 3 insertions(+), 6 deletions(-)
13
14
diff --git a/block/io_uring.c b/block/io_uring.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/block/io_uring.c
17
+++ b/block/io_uring.c
18
@@ -XXX,XX +XXX,XX @@ static void qemu_luring_completion_cb(void *opaque)
19
static bool qemu_luring_poll_cb(void *opaque)
20
{
21
LuringState *s = opaque;
22
- struct io_uring_cqe *cqes;
23
24
- if (io_uring_peek_cqe(&s->ring, &cqes) == 0) {
25
- if (cqes) {
26
- luring_process_completions_and_submit(s);
27
- return true;
28
- }
29
+ if (io_uring_cq_ready(&s->ring)) {
30
+ luring_process_completions_and_submit(s);
31
+ return true;
32
}
33
34
return false;
35
--
36
2.25.4
37
diff view generated by jsdifflib
1
From: Alexander Bulekov <alxndr@bu.edu>
1
From: Alexander Bulekov <alxndr@bu.edu>
2
2
3
The fuzzers are built into a binary (e.g. qemu-fuzz-i386). To select the
3
This allows us to keep pc-bios in executable_dir/pc-bios, rather than
4
device to fuzz/fuzz target, we usually use the --fuzz-target= argument.
4
executable_dir/../pc-bios, which is incompatible with oss-fuzz' file
5
This commit allows the fuzz-target to be specified using the name of the
5
structure.
6
executable. If the executable name ends with -target-FUZZ_TARGET, then
7
we select the fuzz target based on this name, rather than the
8
--fuzz-target argument. This is useful for systems such as oss-fuzz
9
where we don't have control of the arguments passed to the fuzzer.
10
11
[Fixed incorrect indentation.
12
--Stefan]
13
6
14
Signed-off-by: Alexander Bulekov <alxndr@bu.edu>
7
Signed-off-by: Alexander Bulekov <alxndr@bu.edu>
15
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
8
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
16
Message-id: 20200421182230.6313-1-alxndr@bu.edu
9
Message-id: 20200512030133.29896-2-alxndr@bu.edu
17
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
10
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
18
---
11
---
19
tests/qtest/fuzz/fuzz.c | 19 +++++++++++--------
12
include/sysemu/sysemu.h | 2 ++
20
slirp | 2 +-
13
softmmu/vl.c | 2 +-
21
2 files changed, 12 insertions(+), 9 deletions(-)
14
tests/qtest/fuzz/fuzz.c | 15 +++++++++++++++
15
3 files changed, 18 insertions(+), 1 deletion(-)
22
16
17
diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/include/sysemu/sysemu.h
20
+++ b/include/sysemu/sysemu.h
21
@@ -XXX,XX +XXX,XX @@ extern const char *qemu_name;
22
extern QemuUUID qemu_uuid;
23
extern bool qemu_uuid_set;
24
25
+void qemu_add_data_dir(const char *path);
26
+
27
void qemu_add_exit_notifier(Notifier *notify);
28
void qemu_remove_exit_notifier(Notifier *notify);
29
30
diff --git a/softmmu/vl.c b/softmmu/vl.c
31
index XXXXXXX..XXXXXXX 100644
32
--- a/softmmu/vl.c
33
+++ b/softmmu/vl.c
34
@@ -XXX,XX +XXX,XX @@ char *qemu_find_file(int type, const char *name)
35
return NULL;
36
}
37
38
-static void qemu_add_data_dir(const char *path)
39
+void qemu_add_data_dir(const char *path)
40
{
41
int i;
42
23
diff --git a/tests/qtest/fuzz/fuzz.c b/tests/qtest/fuzz/fuzz.c
43
diff --git a/tests/qtest/fuzz/fuzz.c b/tests/qtest/fuzz/fuzz.c
24
index XXXXXXX..XXXXXXX 100644
44
index XXXXXXX..XXXXXXX 100644
25
--- a/tests/qtest/fuzz/fuzz.c
45
--- a/tests/qtest/fuzz/fuzz.c
26
+++ b/tests/qtest/fuzz/fuzz.c
46
+++ b/tests/qtest/fuzz/fuzz.c
27
@@ -XXX,XX +XXX,XX @@ static void usage(char *path)
28
printf(" * %s : %s\n", tmp->target->name,
29
tmp->target->description);
30
}
31
+ printf("Alternatively, add -target-FUZZ_TARGET to the executable name\n");
32
exit(0);
33
}
34
35
@@ -XXX,XX +XXX,XX @@ int LLVMFuzzerInitialize(int *argc, char ***argv, char ***envp)
47
@@ -XXX,XX +XXX,XX @@ int LLVMFuzzerInitialize(int *argc, char ***argv, char ***envp)
36
module_call_init(MODULE_INIT_QOM);
48
{
37
module_call_init(MODULE_INIT_LIBQOS);
49
38
50
char *target_name;
39
- if (*argc <= 1) {
51
+ char *dir;
40
+ target_name = strstr(**argv, "-target-");
52
41
+ if (target_name) { /* The binary name specifies the target */
53
/* Initialize qgraph and modules */
42
+ target_name += strlen("-target-");
54
qos_graph_init();
43
+ } else if (*argc > 1) { /* The target is specified as an argument */
55
@@ -XXX,XX +XXX,XX @@ int LLVMFuzzerInitialize(int *argc, char ***argv, char ***envp)
44
+ target_name = (*argv)[1];
56
target_name = strstr(**argv, "-target-");
45
+ if (!strstr(target_name, "--fuzz-target=")) {
57
if (target_name) { /* The binary name specifies the target */
46
+ usage(**argv);
58
target_name += strlen("-target-");
59
+ /*
60
+ * With oss-fuzz, the executable is kept in the root of a directory (we
61
+ * cannot assume the path). All data (including bios binaries) must be
62
+ * in the same dir, or a subdir. Thus, we cannot place the pc-bios so
63
+ * that it would be in exec_dir/../pc-bios.
64
+ * As a workaround, oss-fuzz allows us to use argv[0] to get the
65
+ * location of the executable. Using this we add exec_dir/pc-bios to
66
+ * the datadirs.
67
+ */
68
+ dir = g_build_filename(g_path_get_dirname(**argv), "pc-bios", NULL);
69
+ if (g_file_test(dir, G_FILE_TEST_IS_DIR)) {
70
+ qemu_add_data_dir(dir);
47
+ }
71
+ }
48
+ target_name += strlen("--fuzz-target=");
72
+ g_free(dir);
49
+ } else {
73
} else if (*argc > 1) { /* The target is specified as an argument */
50
usage(**argv);
74
target_name = (*argv)[1];
51
}
75
if (!strstr(target_name, "--fuzz-target=")) {
52
53
/* Identify the fuzz target */
54
- target_name = (*argv)[1];
55
- if (!strstr(target_name, "--fuzz-target=")) {
56
- usage(**argv);
57
- }
58
-
59
- target_name += strlen("--fuzz-target=");
60
-
61
fuzz_target = fuzz_get_target(target_name);
62
if (!fuzz_target) {
63
usage(**argv);
64
diff --git a/slirp b/slirp
65
index XXXXXXX..XXXXXXX 160000
66
--- a/slirp
67
+++ b/slirp
68
@@ -1 +1 @@
69
-Subproject commit 2faae0f778f818fadc873308f983289df697eb93
70
+Subproject commit 55ab21c9a36852915b81f1b41ebaf3b6509dd8ba
71
--
76
--
72
2.25.3
77
2.25.4
73
78
diff view generated by jsdifflib
New patch
1
From: Alexander Bulekov <alxndr@bu.edu>
1
2
3
Signed-off-by: Alexander Bulekov <alxndr@bu.edu>
4
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Message-id: 20200512030133.29896-3-alxndr@bu.edu
7
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
8
---
9
tests/qtest/fuzz/i440fx_fuzz.c | 2 +-
10
1 file changed, 1 insertion(+), 1 deletion(-)
11
12
diff --git a/tests/qtest/fuzz/i440fx_fuzz.c b/tests/qtest/fuzz/i440fx_fuzz.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tests/qtest/fuzz/i440fx_fuzz.c
15
+++ b/tests/qtest/fuzz/i440fx_fuzz.c
16
@@ -XXX,XX +XXX,XX @@ static void i440fx_fuzz_qos_fork(QTestState *s,
17
}
18
19
static const char *i440fx_qtest_argv = TARGET_NAME " -machine accel=qtest"
20
- "-m 0 -display none";
21
+ " -m 0 -display none";
22
static const char *i440fx_argv(FuzzTarget *t)
23
{
24
return i440fx_qtest_argv;
25
--
26
2.25.4
27
diff view generated by jsdifflib
New patch
1
From: Alexander Bulekov <alxndr@bu.edu>
1
2
3
Previously, we relied on "FuzzerTracePC*(.bss*)" to place libfuzzer's
4
fuzzer::TPC object into our contiguous shared-memory region. This does
5
not work for some libfuzzer builds, so this addition identifies the
6
region by its mangled name: *(.bss._ZN6fuzzer3TPCE);
7
8
Signed-off-by: Alexander Bulekov <alxndr@bu.edu>
9
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
10
Message-id: 20200512030133.29896-4-alxndr@bu.edu
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
13
tests/qtest/fuzz/fork_fuzz.ld | 5 +++++
14
1 file changed, 5 insertions(+)
15
16
diff --git a/tests/qtest/fuzz/fork_fuzz.ld b/tests/qtest/fuzz/fork_fuzz.ld
17
index XXXXXXX..XXXXXXX 100644
18
--- a/tests/qtest/fuzz/fork_fuzz.ld
19
+++ b/tests/qtest/fuzz/fork_fuzz.ld
20
@@ -XXX,XX +XXX,XX @@ SECTIONS
21
22
/* Internal Libfuzzer TracePC object which contains the ValueProfileMap */
23
FuzzerTracePC*(.bss*);
24
+ /*
25
+ * In case the above line fails, explicitly specify the (mangled) name of
26
+ * the object we care about
27
+ */
28
+ *(.bss._ZN6fuzzer3TPCE);
29
}
30
.data.fuzz_end : ALIGN(4K)
31
{
32
--
33
2.25.4
34
diff view generated by jsdifflib
New patch
1
From: Alexander Bulekov <alxndr@bu.edu>
1
2
3
Without this, the time since the last main-loop keeps increasing, as the
4
fuzzer runs. The forked children need to handle all the "past-due"
5
timers, slowing them down, over time. With this change, the
6
parent/fork-server process runs the main-loop, while waiting on the
7
child, ensuring that the timer events do not pile up, over time.
8
9
Signed-off-by: Alexander Bulekov <alxndr@bu.edu>
10
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
11
Message-id: 20200512030133.29896-5-alxndr@bu.edu
12
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
13
---
14
tests/qtest/fuzz/i440fx_fuzz.c | 1 +
15
tests/qtest/fuzz/virtio_net_fuzz.c | 2 ++
16
tests/qtest/fuzz/virtio_scsi_fuzz.c | 2 ++
17
3 files changed, 5 insertions(+)
18
19
diff --git a/tests/qtest/fuzz/i440fx_fuzz.c b/tests/qtest/fuzz/i440fx_fuzz.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/tests/qtest/fuzz/i440fx_fuzz.c
22
+++ b/tests/qtest/fuzz/i440fx_fuzz.c
23
@@ -XXX,XX +XXX,XX @@ static void i440fx_fuzz_qos_fork(QTestState *s,
24
i440fx_fuzz_qos(s, Data, Size);
25
_Exit(0);
26
} else {
27
+ flush_events(s);
28
wait(NULL);
29
}
30
}
31
diff --git a/tests/qtest/fuzz/virtio_net_fuzz.c b/tests/qtest/fuzz/virtio_net_fuzz.c
32
index XXXXXXX..XXXXXXX 100644
33
--- a/tests/qtest/fuzz/virtio_net_fuzz.c
34
+++ b/tests/qtest/fuzz/virtio_net_fuzz.c
35
@@ -XXX,XX +XXX,XX @@ static void virtio_net_fork_fuzz(QTestState *s,
36
flush_events(s);
37
_Exit(0);
38
} else {
39
+ flush_events(s);
40
wait(NULL);
41
}
42
}
43
@@ -XXX,XX +XXX,XX @@ static void virtio_net_fork_fuzz_check_used(QTestState *s,
44
flush_events(s);
45
_Exit(0);
46
} else {
47
+ flush_events(s);
48
wait(NULL);
49
}
50
}
51
diff --git a/tests/qtest/fuzz/virtio_scsi_fuzz.c b/tests/qtest/fuzz/virtio_scsi_fuzz.c
52
index XXXXXXX..XXXXXXX 100644
53
--- a/tests/qtest/fuzz/virtio_scsi_fuzz.c
54
+++ b/tests/qtest/fuzz/virtio_scsi_fuzz.c
55
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_fork_fuzz(QTestState *s,
56
flush_events(s);
57
_Exit(0);
58
} else {
59
+ flush_events(s);
60
wait(NULL);
61
}
62
}
63
@@ -XXX,XX +XXX,XX @@ static void virtio_scsi_with_flag_fuzz(QTestState *s,
64
}
65
_Exit(0);
66
} else {
67
+ flush_events(s);
68
wait(NULL);
69
}
70
}
71
--
72
2.25.4
73
diff view generated by jsdifflib
1
From: Daniel Brodsky <dnbrdsky@gmail.com>
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
2
2
3
- __COUNTER__ doesn't work with ## concat
3
We usually use '_do_' for internal functions. Rename
4
- replaced ## with glue() macro so __COUNTER__ is evaluated
4
memory_region_do_writeback() as memory_region_writeback().
5
5
6
Fixes: 3284c3ddc4
6
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
7
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
8
Signed-off-by: Daniel Brodsky <dnbrdsky@gmail.com>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-id: 20200404042108.389635-2-dnbrdsky@gmail.com
9
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
10
Message-id: 20200508062456.23344-2-philmd@redhat.com
10
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
11
---
12
---
12
include/qemu/lockable.h | 7 ++++---
13
include/exec/memory.h | 4 ++--
13
include/qemu/rcu.h | 2 +-
14
memory.c | 2 +-
14
2 files changed, 5 insertions(+), 4 deletions(-)
15
target/arm/helper.c | 2 +-
16
3 files changed, 4 insertions(+), 4 deletions(-)
15
17
16
diff --git a/include/qemu/lockable.h b/include/qemu/lockable.h
18
diff --git a/include/exec/memory.h b/include/exec/memory.h
17
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
18
--- a/include/qemu/lockable.h
20
--- a/include/exec/memory.h
19
+++ b/include/qemu/lockable.h
21
+++ b/include/exec/memory.h
20
@@ -XXX,XX +XXX,XX @@ G_DEFINE_AUTOPTR_CLEANUP_FUNC(QemuLockable, qemu_lockable_auto_unlock)
22
@@ -XXX,XX +XXX,XX @@ void *memory_region_get_ram_ptr(MemoryRegion *mr);
21
* }
23
void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
24
Error **errp);
25
/**
26
- * memory_region_do_writeback: Trigger cache writeback or msync for
27
+ * memory_region_writeback: Trigger cache writeback or msync for
28
* selected address range
29
*
30
* @mr: the memory region to be updated
31
* @addr: the initial address of the range to be written back
32
* @size: the size of the range to be written back
22
*/
33
*/
23
#define WITH_QEMU_LOCK_GUARD(x) \
34
-void memory_region_do_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size);
24
- WITH_QEMU_LOCK_GUARD_((x), qemu_lockable_auto##__COUNTER__)
35
+void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size);
25
+ WITH_QEMU_LOCK_GUARD_((x), glue(qemu_lockable_auto, __COUNTER__))
26
36
27
/**
37
/**
28
* QEMU_LOCK_GUARD - Lock an object until the end of the scope
38
* memory_region_set_log: Turn dirty logging on or off for a region.
29
@@ -XXX,XX +XXX,XX @@ G_DEFINE_AUTOPTR_CLEANUP_FUNC(QemuLockable, qemu_lockable_auto_unlock)
39
diff --git a/memory.c b/memory.c
30
* return; <-- mutex is automatically unlocked
31
* }
32
*/
33
-#define QEMU_LOCK_GUARD(x) \
34
- g_autoptr(QemuLockable) qemu_lockable_auto##__COUNTER__ = \
35
+#define QEMU_LOCK_GUARD(x) \
36
+ g_autoptr(QemuLockable) \
37
+ glue(qemu_lockable_auto, __COUNTER__) G_GNUC_UNUSED = \
38
qemu_lockable_auto_lock(QEMU_MAKE_LOCKABLE((x)))
39
40
#endif
41
diff --git a/include/qemu/rcu.h b/include/qemu/rcu.h
42
index XXXXXXX..XXXXXXX 100644
40
index XXXXXXX..XXXXXXX 100644
43
--- a/include/qemu/rcu.h
41
--- a/memory.c
44
+++ b/include/qemu/rcu.h
42
+++ b/memory.c
45
@@ -XXX,XX +XXX,XX @@ static inline void rcu_read_auto_unlock(RCUReadAuto *r)
43
@@ -XXX,XX +XXX,XX @@ void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp
46
G_DEFINE_AUTOPTR_CLEANUP_FUNC(RCUReadAuto, rcu_read_auto_unlock)
44
}
47
45
48
#define WITH_RCU_READ_LOCK_GUARD() \
46
49
- WITH_RCU_READ_LOCK_GUARD_(_rcu_read_auto##__COUNTER__)
47
-void memory_region_do_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size)
50
+ WITH_RCU_READ_LOCK_GUARD_(glue(_rcu_read_auto, __COUNTER__))
48
+void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size)
51
49
{
52
#define WITH_RCU_READ_LOCK_GUARD_(var) \
50
/*
53
for (g_autoptr(RCUReadAuto) var = rcu_read_auto_lock(); \
51
* Might be extended case needed to cover
52
diff --git a/target/arm/helper.c b/target/arm/helper.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/target/arm/helper.c
55
+++ b/target/arm/helper.c
56
@@ -XXX,XX +XXX,XX @@ static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque,
57
mr = memory_region_from_host(haddr, &offset);
58
59
if (mr) {
60
- memory_region_do_writeback(mr, offset, dline_size);
61
+ memory_region_writeback(mr, offset, dline_size);
62
}
63
}
64
}
54
--
65
--
55
2.25.3
66
2.25.4
56
67
diff view generated by jsdifflib
New patch
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
1
2
3
Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
4
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
8
Message-id: 20200508062456.23344-3-philmd@redhat.com
9
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
10
---
11
include/exec/memory.h | 13 ++++++++++++-
12
memory.c | 10 ++++++++--
13
2 files changed, 20 insertions(+), 3 deletions(-)
14
15
diff --git a/include/exec/memory.h b/include/exec/memory.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/include/exec/memory.h
18
+++ b/include/exec/memory.h
19
@@ -XXX,XX +XXX,XX @@ void *memory_region_get_ram_ptr(MemoryRegion *mr);
20
*/
21
void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
22
Error **errp);
23
+
24
/**
25
- * memory_region_writeback: Trigger cache writeback or msync for
26
+ * memory_region_msync: Synchronize selected address range of
27
+ * a memory mapped region
28
+ *
29
+ * @mr: the memory region to be msync
30
+ * @addr: the initial address of the range to be sync
31
+ * @size: the size of the range to be sync
32
+ */
33
+void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size);
34
+
35
+/**
36
+ * memory_region_writeback: Trigger cache writeback for
37
* selected address range
38
*
39
* @mr: the memory region to be updated
40
diff --git a/memory.c b/memory.c
41
index XXXXXXX..XXXXXXX 100644
42
--- a/memory.c
43
+++ b/memory.c
44
@@ -XXX,XX +XXX,XX @@ void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp
45
qemu_ram_resize(mr->ram_block, newsize, errp);
46
}
47
48
+void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size)
49
+{
50
+ if (mr->ram_block) {
51
+ qemu_ram_writeback(mr->ram_block, addr, size);
52
+ }
53
+}
54
55
void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size)
56
{
57
@@ -XXX,XX +XXX,XX @@ void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size)
58
* Might be extended case needed to cover
59
* different types of memory regions
60
*/
61
- if (mr->ram_block && mr->dirty_log_mask) {
62
- qemu_ram_writeback(mr->ram_block, addr, size);
63
+ if (mr->dirty_log_mask) {
64
+ memory_region_msync(mr, addr, size);
65
}
66
}
67
68
--
69
2.25.4
70
diff view generated by jsdifflib
New patch
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
1
2
3
Now than the non-target specific memory_region_msync() function
4
is available, use it to make this device target-agnostic.
5
6
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
10
Message-id: 20200508062456.23344-4-philmd@redhat.com
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
13
hw/block/Makefile.objs | 2 +-
14
hw/block/nvme.c | 6 ++----
15
2 files changed, 3 insertions(+), 5 deletions(-)
16
17
diff --git a/hw/block/Makefile.objs b/hw/block/Makefile.objs
18
index XXXXXXX..XXXXXXX 100644
19
--- a/hw/block/Makefile.objs
20
+++ b/hw/block/Makefile.objs
21
@@ -XXX,XX +XXX,XX @@ common-obj-$(CONFIG_SH4) += tc58128.o
22
23
obj-$(CONFIG_VIRTIO_BLK) += virtio-blk.o
24
obj-$(CONFIG_VHOST_USER_BLK) += vhost-user-blk.o
25
-obj-$(CONFIG_NVME_PCI) += nvme.o
26
+common-obj-$(CONFIG_NVME_PCI) += nvme.o
27
28
obj-y += dataplane/
29
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/hw/block/nvme.c
32
+++ b/hw/block/nvme.c
33
@@ -XXX,XX +XXX,XX @@
34
#include "qapi/visitor.h"
35
#include "sysemu/hostmem.h"
36
#include "sysemu/block-backend.h"
37
-#include "exec/ram_addr.h"
38
-
39
+#include "exec/memory.h"
40
#include "qemu/log.h"
41
#include "qemu/module.h"
42
#include "qemu/cutils.h"
43
@@ -XXX,XX +XXX,XX @@ static uint64_t nvme_mmio_read(void *opaque, hwaddr addr, unsigned size)
44
*/
45
if (addr == 0xE08 &&
46
(NVME_PMRCAP_PMRWBM(n->bar.pmrcap) & 0x02)) {
47
- qemu_ram_writeback(n->pmrdev->mr.ram_block,
48
- 0, n->pmrdev->size);
49
+ memory_region_msync(&n->pmrdev->mr, 0, n->pmrdev->size);
50
}
51
memcpy(&val, ptr + addr, size);
52
} else {
53
--
54
2.25.4
55
diff view generated by jsdifflib
1
From: Daniel Brodsky <dnbrdsky@gmail.com>
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
2
2
3
- ran regexp "qemu_mutex_lock\(.*\).*\n.*if" to find targets
3
Rename qemu_ram_writeback() as qemu_ram_msync() to better
4
- replaced result with QEMU_LOCK_GUARD if all unlocks at function end
4
match what it does.
5
- replaced result with WITH_QEMU_LOCK_GUARD if unlock not at end
6
5
7
Signed-off-by: Daniel Brodsky <dnbrdsky@gmail.com>
6
Suggested-by: Stefan Hajnoczi <stefanha@redhat.com>
8
Reviewed-by: Juan Quintela <quintela@redhat.com>
7
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Message-id: 20200404042108.389635-3-dnbrdsky@gmail.com
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
11
Message-id: 20200508062456.23344-5-philmd@redhat.com
10
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
11
---
13
---
12
block/iscsi.c | 7 ++----
14
include/exec/ram_addr.h | 4 ++--
13
block/nfs.c | 51 ++++++++++++++++++++-----------------------
15
exec.c | 2 +-
14
cpus-common.c | 14 +++++-------
16
memory.c | 2 +-
15
hw/display/qxl.c | 43 +++++++++++++++++-------------------
17
3 files changed, 4 insertions(+), 4 deletions(-)
16
hw/vfio/platform.c | 5 ++---
17
migration/migration.c | 3 +--
18
migration/multifd.c | 8 +++----
19
migration/ram.c | 3 +--
20
monitor/misc.c | 4 +---
21
ui/spice-display.c | 14 ++++++------
22
util/log.c | 4 ++--
23
util/qemu-timer.c | 17 +++++++--------
24
util/rcu.c | 8 +++----
25
util/thread-pool.c | 3 +--
26
util/vfio-helpers.c | 5 ++---
27
15 files changed, 83 insertions(+), 106 deletions(-)
28
18
29
diff --git a/block/iscsi.c b/block/iscsi.c
19
diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
30
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
31
--- a/block/iscsi.c
21
--- a/include/exec/ram_addr.h
32
+++ b/block/iscsi.c
22
+++ b/include/exec/ram_addr.h
33
@@ -XXX,XX +XXX,XX @@ static void iscsi_nop_timed_event(void *opaque)
23
@@ -XXX,XX +XXX,XX @@ void qemu_ram_free(RAMBlock *block);
24
25
int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp);
26
27
-void qemu_ram_writeback(RAMBlock *block, ram_addr_t start, ram_addr_t length);
28
+void qemu_ram_msync(RAMBlock *block, ram_addr_t start, ram_addr_t length);
29
30
/* Clear whole block of mem */
31
static inline void qemu_ram_block_writeback(RAMBlock *block)
34
{
32
{
35
IscsiLun *iscsilun = opaque;
33
- qemu_ram_writeback(block, 0, block->used_length);
36
34
+ qemu_ram_msync(block, 0, block->used_length);
37
- qemu_mutex_lock(&iscsilun->mutex);
38
+ QEMU_LOCK_GUARD(&iscsilun->mutex);
39
if (iscsi_get_nops_in_flight(iscsilun->iscsi) >= MAX_NOP_FAILURES) {
40
error_report("iSCSI: NOP timeout. Reconnecting...");
41
iscsilun->request_timed_out = true;
42
} else if (iscsi_nop_out_async(iscsilun->iscsi, NULL, NULL, 0, NULL) != 0) {
43
error_report("iSCSI: failed to sent NOP-Out. Disabling NOP messages.");
44
- goto out;
45
+ return;
46
}
47
48
timer_mod(iscsilun->nop_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + NOP_INTERVAL);
49
iscsi_set_events(iscsilun);
50
-
51
-out:
52
- qemu_mutex_unlock(&iscsilun->mutex);
53
}
35
}
54
36
55
static void iscsi_readcapacity_sync(IscsiLun *iscsilun, Error **errp)
37
#define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1)
56
diff --git a/block/nfs.c b/block/nfs.c
38
diff --git a/exec.c b/exec.c
57
index XXXXXXX..XXXXXXX 100644
39
index XXXXXXX..XXXXXXX 100644
58
--- a/block/nfs.c
40
--- a/exec.c
59
+++ b/block/nfs.c
41
+++ b/exec.c
60
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn nfs_co_preadv(BlockDriverState *bs, uint64_t offset,
42
@@ -XXX,XX +XXX,XX @@ int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
61
nfs_co_init_task(bs, &task);
43
* Otherwise no-op.
62
task.iov = iov;
44
* @Note: this is supposed to be a synchronous op.
63
45
*/
64
- qemu_mutex_lock(&client->mutex);
46
-void qemu_ram_writeback(RAMBlock *block, ram_addr_t start, ram_addr_t length)
65
- if (nfs_pread_async(client->context, client->fh,
47
+void qemu_ram_msync(RAMBlock *block, ram_addr_t start, ram_addr_t length)
66
- offset, bytes, nfs_co_generic_cb, &task) != 0) {
48
{
67
- qemu_mutex_unlock(&client->mutex);
49
/* The requested range should fit in within the block range */
68
- return -ENOMEM;
50
g_assert((start + length) <= block->used_length);
69
- }
51
diff --git a/memory.c b/memory.c
70
+ WITH_QEMU_LOCK_GUARD(&client->mutex) {
71
+ if (nfs_pread_async(client->context, client->fh,
72
+ offset, bytes, nfs_co_generic_cb, &task) != 0) {
73
+ return -ENOMEM;
74
+ }
75
76
- nfs_set_events(client);
77
- qemu_mutex_unlock(&client->mutex);
78
+ nfs_set_events(client);
79
+ }
80
while (!task.complete) {
81
qemu_coroutine_yield();
82
}
83
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn nfs_co_pwritev(BlockDriverState *bs, uint64_t offset,
84
buf = iov->iov[0].iov_base;
85
}
86
87
- qemu_mutex_lock(&client->mutex);
88
- if (nfs_pwrite_async(client->context, client->fh,
89
- offset, bytes, buf,
90
- nfs_co_generic_cb, &task) != 0) {
91
- qemu_mutex_unlock(&client->mutex);
92
- if (my_buffer) {
93
- g_free(buf);
94
+ WITH_QEMU_LOCK_GUARD(&client->mutex) {
95
+ if (nfs_pwrite_async(client->context, client->fh,
96
+ offset, bytes, buf,
97
+ nfs_co_generic_cb, &task) != 0) {
98
+ if (my_buffer) {
99
+ g_free(buf);
100
+ }
101
+ return -ENOMEM;
102
}
103
- return -ENOMEM;
104
- }
105
106
- nfs_set_events(client);
107
- qemu_mutex_unlock(&client->mutex);
108
+ nfs_set_events(client);
109
+ }
110
while (!task.complete) {
111
qemu_coroutine_yield();
112
}
113
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn nfs_co_flush(BlockDriverState *bs)
114
115
nfs_co_init_task(bs, &task);
116
117
- qemu_mutex_lock(&client->mutex);
118
- if (nfs_fsync_async(client->context, client->fh, nfs_co_generic_cb,
119
- &task) != 0) {
120
- qemu_mutex_unlock(&client->mutex);
121
- return -ENOMEM;
122
- }
123
+ WITH_QEMU_LOCK_GUARD(&client->mutex) {
124
+ if (nfs_fsync_async(client->context, client->fh, nfs_co_generic_cb,
125
+ &task) != 0) {
126
+ return -ENOMEM;
127
+ }
128
129
- nfs_set_events(client);
130
- qemu_mutex_unlock(&client->mutex);
131
+ nfs_set_events(client);
132
+ }
133
while (!task.complete) {
134
qemu_coroutine_yield();
135
}
136
diff --git a/cpus-common.c b/cpus-common.c
137
index XXXXXXX..XXXXXXX 100644
52
index XXXXXXX..XXXXXXX 100644
138
--- a/cpus-common.c
53
--- a/memory.c
139
+++ b/cpus-common.c
54
+++ b/memory.c
140
@@ -XXX,XX +XXX,XX @@
55
@@ -XXX,XX +XXX,XX @@ void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp
141
#include "exec/cpu-common.h"
56
void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size)
142
#include "hw/core/cpu.h"
143
#include "sysemu/cpus.h"
144
+#include "qemu/lockable.h"
145
146
static QemuMutex qemu_cpu_list_lock;
147
static QemuCond exclusive_cond;
148
@@ -XXX,XX +XXX,XX @@ static int cpu_get_free_index(void)
149
150
void cpu_list_add(CPUState *cpu)
151
{
57
{
152
- qemu_mutex_lock(&qemu_cpu_list_lock);
58
if (mr->ram_block) {
153
+ QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
59
- qemu_ram_writeback(mr->ram_block, addr, size);
154
if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) {
60
+ qemu_ram_msync(mr->ram_block, addr, size);
155
cpu->cpu_index = cpu_get_free_index();
156
assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX);
157
@@ -XXX,XX +XXX,XX @@ void cpu_list_add(CPUState *cpu)
158
assert(!cpu_index_auto_assigned);
159
}
160
QTAILQ_INSERT_TAIL_RCU(&cpus, cpu, node);
161
- qemu_mutex_unlock(&qemu_cpu_list_lock);
162
}
163
164
void cpu_list_remove(CPUState *cpu)
165
{
166
- qemu_mutex_lock(&qemu_cpu_list_lock);
167
+ QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
168
if (!QTAILQ_IN_USE(cpu, node)) {
169
/* there is nothing to undo since cpu_exec_init() hasn't been called */
170
- qemu_mutex_unlock(&qemu_cpu_list_lock);
171
return;
172
}
173
174
@@ -XXX,XX +XXX,XX @@ void cpu_list_remove(CPUState *cpu)
175
176
QTAILQ_REMOVE_RCU(&cpus, cpu, node);
177
cpu->cpu_index = UNASSIGNED_CPU_INDEX;
178
- qemu_mutex_unlock(&qemu_cpu_list_lock);
179
}
180
181
struct qemu_work_item {
182
@@ -XXX,XX +XXX,XX @@ void cpu_exec_start(CPUState *cpu)
183
* see cpu->running == true, and it will kick the CPU.
184
*/
185
if (unlikely(atomic_read(&pending_cpus))) {
186
- qemu_mutex_lock(&qemu_cpu_list_lock);
187
+ QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
188
if (!cpu->has_waiter) {
189
/* Not counted in pending_cpus, let the exclusive item
190
* run. Since we have the lock, just set cpu->running to true
191
@@ -XXX,XX +XXX,XX @@ void cpu_exec_start(CPUState *cpu)
192
* waiter at cpu_exec_end.
193
*/
194
}
195
- qemu_mutex_unlock(&qemu_cpu_list_lock);
196
}
61
}
197
}
62
}
198
63
199
@@ -XXX,XX +XXX,XX @@ void cpu_exec_end(CPUState *cpu)
200
* next cpu_exec_start.
201
*/
202
if (unlikely(atomic_read(&pending_cpus))) {
203
- qemu_mutex_lock(&qemu_cpu_list_lock);
204
+ QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
205
if (cpu->has_waiter) {
206
cpu->has_waiter = false;
207
atomic_set(&pending_cpus, pending_cpus - 1);
208
@@ -XXX,XX +XXX,XX @@ void cpu_exec_end(CPUState *cpu)
209
qemu_cond_signal(&exclusive_cond);
210
}
211
}
212
- qemu_mutex_unlock(&qemu_cpu_list_lock);
213
}
214
}
215
216
diff --git a/hw/display/qxl.c b/hw/display/qxl.c
217
index XXXXXXX..XXXXXXX 100644
218
--- a/hw/display/qxl.c
219
+++ b/hw/display/qxl.c
220
@@ -XXX,XX +XXX,XX @@ static int qxl_track_command(PCIQXLDevice *qxl, struct QXLCommandExt *ext)
221
cmd->u.surface_create.stride);
222
return 1;
223
}
224
- qemu_mutex_lock(&qxl->track_lock);
225
- if (cmd->type == QXL_SURFACE_CMD_CREATE) {
226
- qxl->guest_surfaces.cmds[id] = ext->cmd.data;
227
- qxl->guest_surfaces.count++;
228
- if (qxl->guest_surfaces.max < qxl->guest_surfaces.count)
229
- qxl->guest_surfaces.max = qxl->guest_surfaces.count;
230
+ WITH_QEMU_LOCK_GUARD(&qxl->track_lock) {
231
+ if (cmd->type == QXL_SURFACE_CMD_CREATE) {
232
+ qxl->guest_surfaces.cmds[id] = ext->cmd.data;
233
+ qxl->guest_surfaces.count++;
234
+ if (qxl->guest_surfaces.max < qxl->guest_surfaces.count) {
235
+ qxl->guest_surfaces.max = qxl->guest_surfaces.count;
236
+ }
237
+ }
238
+ if (cmd->type == QXL_SURFACE_CMD_DESTROY) {
239
+ qxl->guest_surfaces.cmds[id] = 0;
240
+ qxl->guest_surfaces.count--;
241
+ }
242
}
243
- if (cmd->type == QXL_SURFACE_CMD_DESTROY) {
244
- qxl->guest_surfaces.cmds[id] = 0;
245
- qxl->guest_surfaces.count--;
246
- }
247
- qemu_mutex_unlock(&qxl->track_lock);
248
break;
249
}
250
case QXL_CMD_CURSOR:
251
@@ -XXX,XX +XXX,XX @@ static void interface_update_area_complete(QXLInstance *sin,
252
int i;
253
int qxl_i;
254
255
- qemu_mutex_lock(&qxl->ssd.lock);
256
+ QEMU_LOCK_GUARD(&qxl->ssd.lock);
257
if (surface_id != 0 || !num_updated_rects ||
258
!qxl->render_update_cookie_num) {
259
- qemu_mutex_unlock(&qxl->ssd.lock);
260
return;
261
}
262
trace_qxl_interface_update_area_complete(qxl->id, surface_id, dirty->left,
263
@@ -XXX,XX +XXX,XX @@ static void interface_update_area_complete(QXLInstance *sin,
264
* Don't bother copying or scheduling the bh since we will flip
265
* the whole area anyway on completion of the update_area async call
266
*/
267
- qemu_mutex_unlock(&qxl->ssd.lock);
268
return;
269
}
270
qxl_i = qxl->num_dirty_rects;
271
@@ -XXX,XX +XXX,XX @@ static void interface_update_area_complete(QXLInstance *sin,
272
trace_qxl_interface_update_area_complete_schedule_bh(qxl->id,
273
qxl->num_dirty_rects);
274
qemu_bh_schedule(qxl->update_area_bh);
275
- qemu_mutex_unlock(&qxl->ssd.lock);
276
}
277
278
/* called from spice server thread context only */
279
@@ -XXX,XX +XXX,XX @@ static void ioport_write(void *opaque, hwaddr addr,
280
case QXL_IO_MONITORS_CONFIG_ASYNC:
281
async_common:
282
async = QXL_ASYNC;
283
- qemu_mutex_lock(&d->async_lock);
284
- if (d->current_async != QXL_UNDEFINED_IO) {
285
- qxl_set_guest_bug(d, "%d async started before last (%d) complete",
286
- io_port, d->current_async);
287
- qemu_mutex_unlock(&d->async_lock);
288
- return;
289
+ WITH_QEMU_LOCK_GUARD(&d->async_lock) {
290
+ if (d->current_async != QXL_UNDEFINED_IO) {
291
+ qxl_set_guest_bug(d, "%d async started before last (%d) complete",
292
+ io_port, d->current_async);
293
+ return;
294
+ }
295
+ d->current_async = orig_io_port;
296
}
297
- d->current_async = orig_io_port;
298
- qemu_mutex_unlock(&d->async_lock);
299
break;
300
default:
301
break;
302
diff --git a/hw/vfio/platform.c b/hw/vfio/platform.c
303
index XXXXXXX..XXXXXXX 100644
304
--- a/hw/vfio/platform.c
305
+++ b/hw/vfio/platform.c
306
@@ -XXX,XX +XXX,XX @@
307
#include "hw/vfio/vfio-platform.h"
308
#include "migration/vmstate.h"
309
#include "qemu/error-report.h"
310
+#include "qemu/lockable.h"
311
#include "qemu/main-loop.h"
312
#include "qemu/module.h"
313
#include "qemu/range.h"
314
@@ -XXX,XX +XXX,XX @@ static void vfio_intp_interrupt(VFIOINTp *intp)
315
VFIOPlatformDevice *vdev = intp->vdev;
316
bool delay_handling = false;
317
318
- qemu_mutex_lock(&vdev->intp_mutex);
319
+ QEMU_LOCK_GUARD(&vdev->intp_mutex);
320
if (intp->state == VFIO_IRQ_INACTIVE) {
321
QLIST_FOREACH(tmp, &vdev->intp_list, next) {
322
if (tmp->state == VFIO_IRQ_ACTIVE ||
323
@@ -XXX,XX +XXX,XX @@ static void vfio_intp_interrupt(VFIOINTp *intp)
324
QSIMPLEQ_INSERT_TAIL(&vdev->pending_intp_queue,
325
intp, pqnext);
326
ret = event_notifier_test_and_clear(intp->interrupt);
327
- qemu_mutex_unlock(&vdev->intp_mutex);
328
return;
329
}
330
331
@@ -XXX,XX +XXX,XX @@ static void vfio_intp_interrupt(VFIOINTp *intp)
332
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
333
vdev->mmap_timeout);
334
}
335
- qemu_mutex_unlock(&vdev->intp_mutex);
336
}
337
338
/**
339
diff --git a/migration/migration.c b/migration/migration.c
340
index XXXXXXX..XXXXXXX 100644
341
--- a/migration/migration.c
342
+++ b/migration/migration.c
343
@@ -XXX,XX +XXX,XX @@ static void migrate_fd_cleanup_bh(void *opaque)
344
345
void migrate_set_error(MigrationState *s, const Error *error)
346
{
347
- qemu_mutex_lock(&s->error_mutex);
348
+ QEMU_LOCK_GUARD(&s->error_mutex);
349
if (!s->error) {
350
s->error = error_copy(error);
351
}
352
- qemu_mutex_unlock(&s->error_mutex);
353
}
354
355
void migrate_fd_error(MigrationState *s, const Error *error)
356
diff --git a/migration/multifd.c b/migration/multifd.c
357
index XXXXXXX..XXXXXXX 100644
358
--- a/migration/multifd.c
359
+++ b/migration/multifd.c
360
@@ -XXX,XX +XXX,XX @@ void multifd_recv_sync_main(void)
361
for (i = 0; i < migrate_multifd_channels(); i++) {
362
MultiFDRecvParams *p = &multifd_recv_state->params[i];
363
364
- qemu_mutex_lock(&p->mutex);
365
- if (multifd_recv_state->packet_num < p->packet_num) {
366
- multifd_recv_state->packet_num = p->packet_num;
367
+ WITH_QEMU_LOCK_GUARD(&p->mutex) {
368
+ if (multifd_recv_state->packet_num < p->packet_num) {
369
+ multifd_recv_state->packet_num = p->packet_num;
370
+ }
371
}
372
- qemu_mutex_unlock(&p->mutex);
373
trace_multifd_recv_sync_main_signal(p->id);
374
qemu_sem_post(&p->sem_sync);
375
}
376
diff --git a/migration/ram.c b/migration/ram.c
377
index XXXXXXX..XXXXXXX 100644
378
--- a/migration/ram.c
379
+++ b/migration/ram.c
380
@@ -XXX,XX +XXX,XX @@ static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
381
return NULL;
382
}
383
384
- qemu_mutex_lock(&rs->src_page_req_mutex);
385
+ QEMU_LOCK_GUARD(&rs->src_page_req_mutex);
386
if (!QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
387
struct RAMSrcPageRequest *entry =
388
QSIMPLEQ_FIRST(&rs->src_page_requests);
389
@@ -XXX,XX +XXX,XX @@ static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
390
migration_consume_urgent_request();
391
}
392
}
393
- qemu_mutex_unlock(&rs->src_page_req_mutex);
394
395
return block;
396
}
397
diff --git a/monitor/misc.c b/monitor/misc.c
398
index XXXXXXX..XXXXXXX 100644
399
--- a/monitor/misc.c
400
+++ b/monitor/misc.c
401
@@ -XXX,XX +XXX,XX @@ AddfdInfo *monitor_fdset_add_fd(int fd, bool has_fdset_id, int64_t fdset_id,
402
MonFdsetFd *mon_fdset_fd;
403
AddfdInfo *fdinfo;
404
405
- qemu_mutex_lock(&mon_fdsets_lock);
406
+ QEMU_LOCK_GUARD(&mon_fdsets_lock);
407
if (has_fdset_id) {
408
QLIST_FOREACH(mon_fdset, &mon_fdsets, next) {
409
/* Break if match found or match impossible due to ordering by ID */
410
@@ -XXX,XX +XXX,XX @@ AddfdInfo *monitor_fdset_add_fd(int fd, bool has_fdset_id, int64_t fdset_id,
411
if (fdset_id < 0) {
412
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "fdset-id",
413
"a non-negative value");
414
- qemu_mutex_unlock(&mon_fdsets_lock);
415
return NULL;
416
}
417
/* Use specified fdset ID */
418
@@ -XXX,XX +XXX,XX @@ AddfdInfo *monitor_fdset_add_fd(int fd, bool has_fdset_id, int64_t fdset_id,
419
fdinfo->fdset_id = mon_fdset->id;
420
fdinfo->fd = mon_fdset_fd->fd;
421
422
- qemu_mutex_unlock(&mon_fdsets_lock);
423
return fdinfo;
424
}
425
426
diff --git a/ui/spice-display.c b/ui/spice-display.c
427
index XXXXXXX..XXXXXXX 100644
428
--- a/ui/spice-display.c
429
+++ b/ui/spice-display.c
430
@@ -XXX,XX +XXX,XX @@
431
#include "qemu/osdep.h"
432
#include "ui/qemu-spice.h"
433
#include "qemu/timer.h"
434
+#include "qemu/lockable.h"
435
#include "qemu/main-loop.h"
436
#include "qemu/option.h"
437
#include "qemu/queue.h"
438
@@ -XXX,XX +XXX,XX @@ void qemu_spice_display_refresh(SimpleSpiceDisplay *ssd)
439
{
440
graphic_hw_update(ssd->dcl.con);
441
442
- qemu_mutex_lock(&ssd->lock);
443
- if (QTAILQ_EMPTY(&ssd->updates) && ssd->ds) {
444
- qemu_spice_create_update(ssd);
445
- ssd->notify++;
446
+ WITH_QEMU_LOCK_GUARD(&ssd->lock) {
447
+ if (QTAILQ_EMPTY(&ssd->updates) && ssd->ds) {
448
+ qemu_spice_create_update(ssd);
449
+ ssd->notify++;
450
+ }
451
}
452
- qemu_mutex_unlock(&ssd->lock);
453
454
trace_qemu_spice_display_refresh(ssd->qxl.id, ssd->notify);
455
if (ssd->notify) {
456
@@ -XXX,XX +XXX,XX @@ static int interface_get_cursor_command(QXLInstance *sin, QXLCommandExt *ext)
457
SimpleSpiceDisplay *ssd = container_of(sin, SimpleSpiceDisplay, qxl);
458
int ret;
459
460
- qemu_mutex_lock(&ssd->lock);
461
+ QEMU_LOCK_GUARD(&ssd->lock);
462
if (ssd->ptr_define) {
463
*ext = ssd->ptr_define->ext;
464
ssd->ptr_define = NULL;
465
@@ -XXX,XX +XXX,XX @@ static int interface_get_cursor_command(QXLInstance *sin, QXLCommandExt *ext)
466
} else {
467
ret = false;
468
}
469
- qemu_mutex_unlock(&ssd->lock);
470
return ret;
471
}
472
473
diff --git a/util/log.c b/util/log.c
474
index XXXXXXX..XXXXXXX 100644
475
--- a/util/log.c
476
+++ b/util/log.c
477
@@ -XXX,XX +XXX,XX @@
478
#include "qemu/cutils.h"
479
#include "trace/control.h"
480
#include "qemu/thread.h"
481
+#include "qemu/lockable.h"
482
483
static char *logfilename;
484
static QemuMutex qemu_logfile_mutex;
485
@@ -XXX,XX +XXX,XX @@ void qemu_set_log(int log_flags)
486
if (qemu_loglevel && (!is_daemonized() || logfilename)) {
487
need_to_open_file = true;
488
}
489
- qemu_mutex_lock(&qemu_logfile_mutex);
490
+ QEMU_LOCK_GUARD(&qemu_logfile_mutex);
491
if (qemu_logfile && !need_to_open_file) {
492
logfile = qemu_logfile;
493
atomic_rcu_set(&qemu_logfile, NULL);
494
@@ -XXX,XX +XXX,XX @@ void qemu_set_log(int log_flags)
495
}
496
atomic_rcu_set(&qemu_logfile, logfile);
497
}
498
- qemu_mutex_unlock(&qemu_logfile_mutex);
499
}
500
501
void qemu_log_needs_buffers(void)
502
diff --git a/util/qemu-timer.c b/util/qemu-timer.c
503
index XXXXXXX..XXXXXXX 100644
504
--- a/util/qemu-timer.c
505
+++ b/util/qemu-timer.c
506
@@ -XXX,XX +XXX,XX @@ void timer_mod_anticipate_ns(QEMUTimer *ts, int64_t expire_time)
507
QEMUTimerList *timer_list = ts->timer_list;
508
bool rearm;
509
510
- qemu_mutex_lock(&timer_list->active_timers_lock);
511
- if (ts->expire_time == -1 || ts->expire_time > expire_time) {
512
- if (ts->expire_time != -1) {
513
- timer_del_locked(timer_list, ts);
514
+ WITH_QEMU_LOCK_GUARD(&timer_list->active_timers_lock) {
515
+ if (ts->expire_time == -1 || ts->expire_time > expire_time) {
516
+ if (ts->expire_time != -1) {
517
+ timer_del_locked(timer_list, ts);
518
+ }
519
+ rearm = timer_mod_ns_locked(timer_list, ts, expire_time);
520
+ } else {
521
+ rearm = false;
522
}
523
- rearm = timer_mod_ns_locked(timer_list, ts, expire_time);
524
- } else {
525
- rearm = false;
526
}
527
- qemu_mutex_unlock(&timer_list->active_timers_lock);
528
-
529
if (rearm) {
530
timerlist_rearm(timer_list);
531
}
532
diff --git a/util/rcu.c b/util/rcu.c
533
index XXXXXXX..XXXXXXX 100644
534
--- a/util/rcu.c
535
+++ b/util/rcu.c
536
@@ -XXX,XX +XXX,XX @@
537
#include "qemu/atomic.h"
538
#include "qemu/thread.h"
539
#include "qemu/main-loop.h"
540
+#include "qemu/lockable.h"
541
#if defined(CONFIG_MALLOC_TRIM)
542
#include <malloc.h>
543
#endif
544
@@ -XXX,XX +XXX,XX @@ static void wait_for_readers(void)
545
546
void synchronize_rcu(void)
547
{
548
- qemu_mutex_lock(&rcu_sync_lock);
549
+ QEMU_LOCK_GUARD(&rcu_sync_lock);
550
551
/* Write RCU-protected pointers before reading p_rcu_reader->ctr.
552
* Pairs with smp_mb_placeholder() in rcu_read_lock().
553
*/
554
smp_mb_global();
555
556
- qemu_mutex_lock(&rcu_registry_lock);
557
+ QEMU_LOCK_GUARD(&rcu_registry_lock);
558
if (!QLIST_EMPTY(&registry)) {
559
/* In either case, the atomic_mb_set below blocks stores that free
560
* old RCU-protected pointers.
561
@@ -XXX,XX +XXX,XX @@ void synchronize_rcu(void)
562
563
wait_for_readers();
564
}
565
-
566
- qemu_mutex_unlock(&rcu_registry_lock);
567
- qemu_mutex_unlock(&rcu_sync_lock);
568
}
569
570
571
diff --git a/util/thread-pool.c b/util/thread-pool.c
572
index XXXXXXX..XXXXXXX 100644
573
--- a/util/thread-pool.c
574
+++ b/util/thread-pool.c
575
@@ -XXX,XX +XXX,XX @@ static void thread_pool_cancel(BlockAIOCB *acb)
576
577
trace_thread_pool_cancel(elem, elem->common.opaque);
578
579
- qemu_mutex_lock(&pool->lock);
580
+ QEMU_LOCK_GUARD(&pool->lock);
581
if (elem->state == THREAD_QUEUED &&
582
/* No thread has yet started working on elem. we can try to "steal"
583
* the item from the worker if we can get a signal from the
584
@@ -XXX,XX +XXX,XX @@ static void thread_pool_cancel(BlockAIOCB *acb)
585
elem->ret = -ECANCELED;
586
}
587
588
- qemu_mutex_unlock(&pool->lock);
589
}
590
591
static AioContext *thread_pool_get_aio_context(BlockAIOCB *acb)
592
diff --git a/util/vfio-helpers.c b/util/vfio-helpers.c
593
index XXXXXXX..XXXXXXX 100644
594
--- a/util/vfio-helpers.c
595
+++ b/util/vfio-helpers.c
596
@@ -XXX,XX +XXX,XX @@
597
#include "standard-headers/linux/pci_regs.h"
598
#include "qemu/event_notifier.h"
599
#include "qemu/vfio-helpers.h"
600
+#include "qemu/lockable.h"
601
#include "trace.h"
602
603
#define QEMU_VFIO_DEBUG 0
604
@@ -XXX,XX +XXX,XX @@ int qemu_vfio_dma_reset_temporary(QEMUVFIOState *s)
605
.size = QEMU_VFIO_IOVA_MAX - s->high_water_mark,
606
};
607
trace_qemu_vfio_dma_reset_temporary(s);
608
- qemu_mutex_lock(&s->lock);
609
+ QEMU_LOCK_GUARD(&s->lock);
610
if (ioctl(s->container, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
611
error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno));
612
- qemu_mutex_unlock(&s->lock);
613
return -errno;
614
}
615
s->high_water_mark = QEMU_VFIO_IOVA_MAX;
616
- qemu_mutex_unlock(&s->lock);
617
return 0;
618
}
619
620
--
64
--
621
2.25.3
65
2.25.4
622
66
diff view generated by jsdifflib
1
From: Simran Singhal <singhalsimran0@gmail.com>
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
2
3
Replace manual lock()/unlock() calls with lock guard macros
3
We have a few bdrv_*() functions that can either spawn a new coroutine
4
(QEMU_LOCK_GUARD/WITH_QEMU_LOCK_GUARD).
4
and wait for it with BDRV_POLL_WHILE() or use a fastpath if they are
5
5
alreeady running in a coroutine. All of them duplicate basically the
6
Signed-off-by: Simran Singhal <singhalsimran0@gmail.com>
6
same code.
7
Reviewed-by: Yuval Shaia <yuval.shaia.ml@gmail.com>
7
8
Reviewed-by: Marcel Apfelbaum<marcel.apfelbaum@gmail.com>
8
Factor the common code into a new function bdrv_run_co().
9
Tested-by: Yuval Shaia <yuval.shaia.ml@gmail.com>
9
10
Message-id: 20200402065035.GA15477@simran-Inspiron-5558
10
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
11
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
12
Message-id: 20200520144901.16589-1-vsementsov@virtuozzo.com
13
[Factor out bdrv_run_co_entry too]
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
14
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
15
---
13
hw/hyperv/hyperv.c | 15 ++++++-------
16
block/io.c | 193 ++++++++++++++++++++---------------------------------
14
hw/rdma/rdma_backend.c | 50 +++++++++++++++++++++---------------------
17
1 file changed, 72 insertions(+), 121 deletions(-)
15
hw/rdma/rdma_rm.c | 3 +--
18
16
3 files changed, 33 insertions(+), 35 deletions(-)
19
diff --git a/block/io.c b/block/io.c
17
18
diff --git a/hw/hyperv/hyperv.c b/hw/hyperv/hyperv.c
19
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
20
--- a/hw/hyperv/hyperv.c
21
--- a/block/io.c
21
+++ b/hw/hyperv/hyperv.c
22
+++ b/block/io.c
22
@@ -XXX,XX +XXX,XX @@
23
@@ -XXX,XX +XXX,XX @@
23
#include "sysemu/kvm.h"
24
#include "qemu/main-loop.h"
24
#include "qemu/bitops.h"
25
#include "sysemu/replay.h"
25
#include "qemu/error-report.h"
26
26
+#include "qemu/lockable.h"
27
-#define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
27
#include "qemu/queue.h"
28
-
28
#include "qemu/rcu.h"
29
/* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
29
#include "qemu/rcu_queue.h"
30
#define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
30
@@ -XXX,XX +XXX,XX @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
31
31
int ret;
32
@@ -XXX,XX +XXX,XX @@ static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
32
MsgHandler *mh;
33
return 0;
33
34
}
34
- qemu_mutex_lock(&handlers_mutex);
35
35
+ QEMU_LOCK_GUARD(&handlers_mutex);
36
+typedef int coroutine_fn BdrvRequestEntry(void *opaque);
36
QLIST_FOREACH(mh, &msg_handlers, link) {
37
+typedef struct BdrvRunCo {
37
if (mh->conn_id == conn_id) {
38
+ BdrvRequestEntry *entry;
38
if (handler) {
39
+ void *opaque;
39
@@ -XXX,XX +XXX,XX @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
40
+ int ret;
40
g_free_rcu(mh, rcu);
41
+ bool done;
41
ret = 0;
42
+ Coroutine *co; /* Coroutine, running bdrv_run_co_entry, for debugging */
42
}
43
+} BdrvRunCo;
43
- goto unlock;
44
+
44
+ return ret;
45
+static void coroutine_fn bdrv_run_co_entry(void *opaque)
45
}
46
+{
47
+ BdrvRunCo *arg = opaque;
48
+
49
+ arg->ret = arg->entry(arg->opaque);
50
+ arg->done = true;
51
+ aio_wait_kick();
52
+}
53
+
54
+static int bdrv_run_co(BlockDriverState *bs, BdrvRequestEntry *entry,
55
+ void *opaque)
56
+{
57
+ if (qemu_in_coroutine()) {
58
+ /* Fast-path if already in coroutine context */
59
+ return entry(opaque);
60
+ } else {
61
+ BdrvRunCo s = { .entry = entry, .opaque = opaque };
62
+
63
+ s.co = qemu_coroutine_create(bdrv_run_co_entry, &s);
64
+ bdrv_coroutine_enter(bs, s.co);
65
+
66
+ BDRV_POLL_WHILE(bs, !s.done);
67
+
68
+ return s.ret;
69
+ }
70
+}
71
+
72
typedef struct RwCo {
73
BdrvChild *child;
74
int64_t offset;
75
QEMUIOVector *qiov;
76
bool is_write;
77
- int ret;
78
BdrvRequestFlags flags;
79
} RwCo;
80
81
-static void coroutine_fn bdrv_rw_co_entry(void *opaque)
82
+static int coroutine_fn bdrv_rw_co_entry(void *opaque)
83
{
84
RwCo *rwco = opaque;
85
86
if (!rwco->is_write) {
87
- rwco->ret = bdrv_co_preadv(rwco->child, rwco->offset,
88
- rwco->qiov->size, rwco->qiov,
89
- rwco->flags);
90
+ return bdrv_co_preadv(rwco->child, rwco->offset,
91
+ rwco->qiov->size, rwco->qiov,
92
+ rwco->flags);
93
} else {
94
- rwco->ret = bdrv_co_pwritev(rwco->child, rwco->offset,
95
- rwco->qiov->size, rwco->qiov,
96
- rwco->flags);
97
+ return bdrv_co_pwritev(rwco->child, rwco->offset,
98
+ rwco->qiov->size, rwco->qiov,
99
+ rwco->flags);
46
}
100
}
47
101
- aio_wait_kick();
48
@@ -XXX,XX +XXX,XX @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
102
}
49
} else {
103
50
ret = -ENOENT;
104
/*
51
}
105
@@ -XXX,XX +XXX,XX @@ static int bdrv_prwv_co(BdrvChild *child, int64_t offset,
52
-unlock:
106
QEMUIOVector *qiov, bool is_write,
53
- qemu_mutex_unlock(&handlers_mutex);
107
BdrvRequestFlags flags)
54
+
108
{
109
- Coroutine *co;
110
RwCo rwco = {
111
.child = child,
112
.offset = offset,
113
.qiov = qiov,
114
.is_write = is_write,
115
- .ret = NOT_DONE,
116
.flags = flags,
117
};
118
119
- if (qemu_in_coroutine()) {
120
- /* Fast-path if already in coroutine context */
121
- bdrv_rw_co_entry(&rwco);
122
- } else {
123
- co = qemu_coroutine_create(bdrv_rw_co_entry, &rwco);
124
- bdrv_coroutine_enter(child->bs, co);
125
- BDRV_POLL_WHILE(child->bs, rwco.ret == NOT_DONE);
126
- }
127
- return rwco.ret;
128
+ return bdrv_run_co(child->bs, bdrv_rw_co_entry, &rwco);
129
}
130
131
int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
132
@@ -XXX,XX +XXX,XX @@ typedef struct BdrvCoBlockStatusData {
133
int64_t *pnum;
134
int64_t *map;
135
BlockDriverState **file;
136
- int ret;
137
- bool done;
138
} BdrvCoBlockStatusData;
139
140
int coroutine_fn bdrv_co_block_status_from_file(BlockDriverState *bs,
141
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs,
142
}
143
144
/* Coroutine wrapper for bdrv_block_status_above() */
145
-static void coroutine_fn bdrv_block_status_above_co_entry(void *opaque)
146
+static int coroutine_fn bdrv_block_status_above_co_entry(void *opaque)
147
{
148
BdrvCoBlockStatusData *data = opaque;
149
150
- data->ret = bdrv_co_block_status_above(data->bs, data->base,
151
- data->want_zero,
152
- data->offset, data->bytes,
153
- data->pnum, data->map, data->file);
154
- data->done = true;
155
- aio_wait_kick();
156
+ return bdrv_co_block_status_above(data->bs, data->base,
157
+ data->want_zero,
158
+ data->offset, data->bytes,
159
+ data->pnum, data->map, data->file);
160
}
161
162
/*
163
@@ -XXX,XX +XXX,XX @@ static int bdrv_common_block_status_above(BlockDriverState *bs,
164
int64_t *map,
165
BlockDriverState **file)
166
{
167
- Coroutine *co;
168
BdrvCoBlockStatusData data = {
169
.bs = bs,
170
.base = base,
171
@@ -XXX,XX +XXX,XX @@ static int bdrv_common_block_status_above(BlockDriverState *bs,
172
.pnum = pnum,
173
.map = map,
174
.file = file,
175
- .done = false,
176
};
177
178
- if (qemu_in_coroutine()) {
179
- /* Fast-path if already in coroutine context */
180
- bdrv_block_status_above_co_entry(&data);
181
- } else {
182
- co = qemu_coroutine_create(bdrv_block_status_above_co_entry, &data);
183
- bdrv_coroutine_enter(bs, co);
184
- BDRV_POLL_WHILE(bs, !data.done);
185
- }
186
- return data.ret;
187
+ return bdrv_run_co(bs, bdrv_block_status_above_co_entry, &data);
188
}
189
190
int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
191
@@ -XXX,XX +XXX,XX @@ typedef struct BdrvVmstateCo {
192
QEMUIOVector *qiov;
193
int64_t pos;
194
bool is_read;
195
- int ret;
196
} BdrvVmstateCo;
197
198
static int coroutine_fn
199
@@ -XXX,XX +XXX,XX @@ bdrv_co_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
55
return ret;
200
return ret;
56
}
201
}
57
202
58
@@ -XXX,XX +XXX,XX @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
203
-static void coroutine_fn bdrv_co_rw_vmstate_entry(void *opaque)
59
int ret;
204
+static int coroutine_fn bdrv_co_rw_vmstate_entry(void *opaque)
60
EventFlagHandler *handler;
205
{
61
206
BdrvVmstateCo *co = opaque;
62
- qemu_mutex_lock(&handlers_mutex);
207
- co->ret = bdrv_co_rw_vmstate(co->bs, co->qiov, co->pos, co->is_read);
63
+ QEMU_LOCK_GUARD(&handlers_mutex);
208
- aio_wait_kick();
64
QLIST_FOREACH(handler, &event_flag_handlers, link) {
209
+
65
if (handler->conn_id == conn_id) {
210
+ return bdrv_co_rw_vmstate(co->bs, co->qiov, co->pos, co->is_read);
66
if (notifier) {
211
}
67
@@ -XXX,XX +XXX,XX @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
212
68
g_free_rcu(handler, rcu);
213
static inline int
69
ret = 0;
214
bdrv_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
70
}
215
bool is_read)
71
- goto unlock;
216
{
72
+ return ret;
217
- if (qemu_in_coroutine()) {
73
}
218
- return bdrv_co_rw_vmstate(bs, qiov, pos, is_read);
74
}
219
- } else {
75
220
- BdrvVmstateCo data = {
76
@@ -XXX,XX +XXX,XX @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
221
- .bs = bs,
77
} else {
222
- .qiov = qiov,
78
ret = -ENOENT;
223
- .pos = pos,
79
}
224
- .is_read = is_read,
80
-unlock:
225
- .ret = -EINPROGRESS,
81
- qemu_mutex_unlock(&handlers_mutex);
226
- };
82
+
227
- Coroutine *co = qemu_coroutine_create(bdrv_co_rw_vmstate_entry, &data);
83
return ret;
228
+ BdrvVmstateCo data = {
84
}
229
+ .bs = bs,
85
230
+ .qiov = qiov,
86
diff --git a/hw/rdma/rdma_backend.c b/hw/rdma/rdma_backend.c
231
+ .pos = pos,
87
index XXXXXXX..XXXXXXX 100644
232
+ .is_read = is_read,
88
--- a/hw/rdma/rdma_backend.c
233
+ };
89
+++ b/hw/rdma/rdma_backend.c
234
90
@@ -XXX,XX +XXX,XX @@ static int rdma_poll_cq(RdmaDeviceResources *rdma_dev_res, struct ibv_cq *ibcq)
235
- bdrv_coroutine_enter(bs, co);
91
struct ibv_wc wc[2];
236
- BDRV_POLL_WHILE(bs, data.ret == -EINPROGRESS);
92
RdmaProtectedGSList *cqe_ctx_list;
237
- return data.ret;
93
238
- }
94
- qemu_mutex_lock(&rdma_dev_res->lock);
239
+ return bdrv_run_co(bs, bdrv_co_rw_vmstate_entry, &data);
95
- do {
240
}
96
- ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
241
97
+ WITH_QEMU_LOCK_GUARD(&rdma_dev_res->lock) {
242
int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
98
+ do {
243
@@ -XXX,XX +XXX,XX @@ void bdrv_aio_cancel_async(BlockAIOCB *acb)
99
+ ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
244
/**************************************************************/
100
245
/* Coroutine block device emulation */
101
- trace_rdma_poll_cq(ne, ibcq);
246
102
+ trace_rdma_poll_cq(ne, ibcq);
247
-typedef struct FlushCo {
103
248
- BlockDriverState *bs;
104
- for (i = 0; i < ne; i++) {
249
- int ret;
105
- bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
250
-} FlushCo;
106
- if (unlikely(!bctx)) {
251
-
107
- rdma_error_report("No matching ctx for req %"PRId64,
252
-
108
- wc[i].wr_id);
253
-static void coroutine_fn bdrv_flush_co_entry(void *opaque)
109
- continue;
254
+static int coroutine_fn bdrv_flush_co_entry(void *opaque)
110
- }
255
{
111
+ for (i = 0; i < ne; i++) {
256
- FlushCo *rwco = opaque;
112
+ bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
257
-
113
+ if (unlikely(!bctx)) {
258
- rwco->ret = bdrv_co_flush(rwco->bs);
114
+ rdma_error_report("No matching ctx for req %"PRId64,
259
- aio_wait_kick();
115
+ wc[i].wr_id);
260
+ return bdrv_co_flush(opaque);
116
+ continue;
261
}
117
+ }
262
118
263
int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
119
- comp_handler(bctx->up_ctx, &wc[i]);
264
@@ -XXX,XX +XXX,XX @@ early_exit:
120
+ comp_handler(bctx->up_ctx, &wc[i]);
265
121
266
int bdrv_flush(BlockDriverState *bs)
122
- if (bctx->backend_qp) {
267
{
123
- cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
268
- Coroutine *co;
124
- } else {
269
- FlushCo flush_co = {
125
- cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
270
- .bs = bs,
126
- }
271
- .ret = NOT_DONE,
127
+ if (bctx->backend_qp) {
272
- };
128
+ cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
273
-
129
+ } else {
274
- if (qemu_in_coroutine()) {
130
+ cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
275
- /* Fast-path if already in coroutine context */
131
+ }
276
- bdrv_flush_co_entry(&flush_co);
132
277
- } else {
133
- rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id);
278
- co = qemu_coroutine_create(bdrv_flush_co_entry, &flush_co);
134
- rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
279
- bdrv_coroutine_enter(bs, co);
135
- g_free(bctx);
280
- BDRV_POLL_WHILE(bs, flush_co.ret == NOT_DONE);
136
- }
281
- }
137
- total_ne += ne;
282
-
138
- } while (ne > 0);
283
- return flush_co.ret;
139
- atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
284
+ return bdrv_run_co(bs, bdrv_flush_co_entry, bs);
140
- qemu_mutex_unlock(&rdma_dev_res->lock);
285
}
141
+ rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id);
286
142
+ rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
287
typedef struct DiscardCo {
143
+ g_free(bctx);
288
BdrvChild *child;
144
+ }
289
int64_t offset;
145
+ total_ne += ne;
290
int64_t bytes;
146
+ } while (ne > 0);
291
- int ret;
147
+ atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
292
} DiscardCo;
148
+ }
293
-static void coroutine_fn bdrv_pdiscard_co_entry(void *opaque)
149
294
+
150
if (ne < 0) {
295
+static int coroutine_fn bdrv_pdiscard_co_entry(void *opaque)
151
rdma_error_report("ibv_poll_cq fail, rc=%d, errno=%d", ne, errno);
296
{
152
diff --git a/hw/rdma/rdma_rm.c b/hw/rdma/rdma_rm.c
297
DiscardCo *rwco = opaque;
153
index XXXXXXX..XXXXXXX 100644
298
154
--- a/hw/rdma/rdma_rm.c
299
- rwco->ret = bdrv_co_pdiscard(rwco->child, rwco->offset, rwco->bytes);
155
+++ b/hw/rdma/rdma_rm.c
300
- aio_wait_kick();
156
@@ -XXX,XX +XXX,XX @@ static inline void rdma_res_tbl_dealloc(RdmaRmResTbl *tbl, uint32_t handle)
301
+ return bdrv_co_pdiscard(rwco->child, rwco->offset, rwco->bytes);
157
{
302
}
158
trace_rdma_res_tbl_dealloc(tbl->name, handle);
303
159
304
int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset,
160
- qemu_mutex_lock(&tbl->lock);
305
@@ -XXX,XX +XXX,XX @@ out:
161
+ QEMU_LOCK_GUARD(&tbl->lock);
306
162
307
int bdrv_pdiscard(BdrvChild *child, int64_t offset, int64_t bytes)
163
if (handle < tbl->tbl_sz) {
308
{
164
clear_bit(handle, tbl->bitmap);
309
- Coroutine *co;
165
tbl->used--;
310
DiscardCo rwco = {
166
}
311
.child = child,
167
312
.offset = offset,
168
- qemu_mutex_unlock(&tbl->lock);
313
.bytes = bytes,
169
}
314
- .ret = NOT_DONE,
170
315
};
171
int rdma_rm_alloc_pd(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,
316
317
- if (qemu_in_coroutine()) {
318
- /* Fast-path if already in coroutine context */
319
- bdrv_pdiscard_co_entry(&rwco);
320
- } else {
321
- co = qemu_coroutine_create(bdrv_pdiscard_co_entry, &rwco);
322
- bdrv_coroutine_enter(child->bs, co);
323
- BDRV_POLL_WHILE(child->bs, rwco.ret == NOT_DONE);
324
- }
325
-
326
- return rwco.ret;
327
+ return bdrv_run_co(child->bs, bdrv_pdiscard_co_entry, &rwco);
328
}
329
330
int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
331
@@ -XXX,XX +XXX,XX @@ typedef struct TruncateCo {
332
PreallocMode prealloc;
333
BdrvRequestFlags flags;
334
Error **errp;
335
- int ret;
336
} TruncateCo;
337
338
-static void coroutine_fn bdrv_truncate_co_entry(void *opaque)
339
+static int coroutine_fn bdrv_truncate_co_entry(void *opaque)
340
{
341
TruncateCo *tco = opaque;
342
- tco->ret = bdrv_co_truncate(tco->child, tco->offset, tco->exact,
343
- tco->prealloc, tco->flags, tco->errp);
344
- aio_wait_kick();
345
+
346
+ return bdrv_co_truncate(tco->child, tco->offset, tco->exact,
347
+ tco->prealloc, tco->flags, tco->errp);
348
}
349
350
int bdrv_truncate(BdrvChild *child, int64_t offset, bool exact,
351
PreallocMode prealloc, BdrvRequestFlags flags, Error **errp)
352
{
353
- Coroutine *co;
354
TruncateCo tco = {
355
.child = child,
356
.offset = offset,
357
@@ -XXX,XX +XXX,XX @@ int bdrv_truncate(BdrvChild *child, int64_t offset, bool exact,
358
.prealloc = prealloc,
359
.flags = flags,
360
.errp = errp,
361
- .ret = NOT_DONE,
362
};
363
364
- if (qemu_in_coroutine()) {
365
- /* Fast-path if already in coroutine context */
366
- bdrv_truncate_co_entry(&tco);
367
- } else {
368
- co = qemu_coroutine_create(bdrv_truncate_co_entry, &tco);
369
- bdrv_coroutine_enter(child->bs, co);
370
- BDRV_POLL_WHILE(child->bs, tco.ret == NOT_DONE);
371
- }
372
-
373
- return tco.ret;
374
+ return bdrv_run_co(child->bs, bdrv_truncate_co_entry, &tco);
375
}
172
--
376
--
173
2.25.3
377
2.25.4
174
378
diff view generated by jsdifflib