1
The following changes since commit 22c5f446514a2a4bb0dbe1fea26713da92fc85fa:
1
The following changes since commit 27c94566379069fb8930bb1433dcffbf7df3203d:
2
2
3
Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20190211' into staging (2019-02-11 17:04:57 +0000)
3
Merge remote-tracking branch 'remotes/edgar/tags/edgar/xilinx-next-2020-04-30.for-upstream' into staging (2020-04-30 16:47:23 +0100)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
git://github.com/stefanha/qemu.git tags/block-pull-request
7
https://github.com/stefanha/qemu.git tags/block-pull-request
8
8
9
for you to fetch changes up to 9a6719d572e99a4e79f589d0b73f7475b86f982d:
9
for you to fetch changes up to cc1adc4488059ac16d4d2772a7aa7cd1323deeca:
10
10
11
virtio-blk: cleanup using VirtIOBlock *s and VirtIODevice *vdev (2019-02-12 11:49:17 +0800)
11
lockable: Replace locks with lock guard macros (2020-05-01 09:19:25 +0100)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Pull request
14
Pull request
15
15
16
Fix the QEMU_LOCK_GUARD() macros, use them more widely, and allow the fuzzer
17
target to be selected from argv[0].
18
16
----------------------------------------------------------------
19
----------------------------------------------------------------
17
20
18
Peter Xu (1):
21
Alexander Bulekov (1):
19
iothread: fix iothread hang when stop too soon
22
fuzz: select fuzz target using executable name
20
23
21
Stefano Garzarella (1):
24
Daniel Brodsky (2):
22
virtio-blk: cleanup using VirtIOBlock *s and VirtIODevice *vdev
25
lockable: fix __COUNTER__ macro to be referenced properly
26
lockable: replaced locks with lock guard macros where appropriate
23
27
24
Vladimir Sementsov-Ogievskiy (1):
28
Simran Singhal (1):
25
qemugdb/coroutine: fix arch_prctl has unknown return type
29
lockable: Replace locks with lock guard macros
26
30
27
hw/block/virtio-blk.c | 22 +++++++++-------------
31
include/qemu/lockable.h | 7 +++---
28
iothread.c | 6 +++++-
32
include/qemu/rcu.h | 2 +-
29
scripts/qemugdb/coroutine.py | 2 +-
33
block/iscsi.c | 7 ++----
30
3 files changed, 15 insertions(+), 15 deletions(-)
34
block/nfs.c | 51 +++++++++++++++++++----------------------
35
cpus-common.c | 14 ++++-------
36
hw/display/qxl.c | 43 ++++++++++++++++------------------
37
hw/hyperv/hyperv.c | 15 ++++++------
38
hw/rdma/rdma_backend.c | 50 ++++++++++++++++++++--------------------
39
hw/rdma/rdma_rm.c | 3 +--
40
hw/vfio/platform.c | 5 ++--
41
migration/migration.c | 3 +--
42
migration/multifd.c | 8 +++----
43
migration/ram.c | 3 +--
44
monitor/misc.c | 4 +---
45
tests/qtest/fuzz/fuzz.c | 19 ++++++++-------
46
ui/spice-display.c | 14 +++++------
47
util/log.c | 4 ++--
48
util/qemu-timer.c | 17 +++++++-------
49
util/rcu.c | 8 +++----
50
util/thread-pool.c | 3 +--
51
util/vfio-helpers.c | 5 ++--
52
slirp | 2 +-
53
22 files changed, 133 insertions(+), 154 deletions(-)
31
54
32
--
55
--
33
2.20.1
56
2.25.3
34
57
35
diff view generated by jsdifflib
New patch
1
From: Alexander Bulekov <alxndr@bu.edu>
1
2
3
The fuzzers are built into a binary (e.g. qemu-fuzz-i386). To select the
4
device to fuzz/fuzz target, we usually use the --fuzz-target= argument.
5
This commit allows the fuzz-target to be specified using the name of the
6
executable. If the executable name ends with -target-FUZZ_TARGET, then
7
we select the fuzz target based on this name, rather than the
8
--fuzz-target argument. This is useful for systems such as oss-fuzz
9
where we don't have control of the arguments passed to the fuzzer.
10
11
[Fixed incorrect indentation.
12
--Stefan]
13
14
Signed-off-by: Alexander Bulekov <alxndr@bu.edu>
15
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
16
Message-id: 20200421182230.6313-1-alxndr@bu.edu
17
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
18
---
19
tests/qtest/fuzz/fuzz.c | 19 +++++++++++--------
20
slirp | 2 +-
21
2 files changed, 12 insertions(+), 9 deletions(-)
22
23
diff --git a/tests/qtest/fuzz/fuzz.c b/tests/qtest/fuzz/fuzz.c
24
index XXXXXXX..XXXXXXX 100644
25
--- a/tests/qtest/fuzz/fuzz.c
26
+++ b/tests/qtest/fuzz/fuzz.c
27
@@ -XXX,XX +XXX,XX @@ static void usage(char *path)
28
printf(" * %s : %s\n", tmp->target->name,
29
tmp->target->description);
30
}
31
+ printf("Alternatively, add -target-FUZZ_TARGET to the executable name\n");
32
exit(0);
33
}
34
35
@@ -XXX,XX +XXX,XX @@ int LLVMFuzzerInitialize(int *argc, char ***argv, char ***envp)
36
module_call_init(MODULE_INIT_QOM);
37
module_call_init(MODULE_INIT_LIBQOS);
38
39
- if (*argc <= 1) {
40
+ target_name = strstr(**argv, "-target-");
41
+ if (target_name) { /* The binary name specifies the target */
42
+ target_name += strlen("-target-");
43
+ } else if (*argc > 1) { /* The target is specified as an argument */
44
+ target_name = (*argv)[1];
45
+ if (!strstr(target_name, "--fuzz-target=")) {
46
+ usage(**argv);
47
+ }
48
+ target_name += strlen("--fuzz-target=");
49
+ } else {
50
usage(**argv);
51
}
52
53
/* Identify the fuzz target */
54
- target_name = (*argv)[1];
55
- if (!strstr(target_name, "--fuzz-target=")) {
56
- usage(**argv);
57
- }
58
-
59
- target_name += strlen("--fuzz-target=");
60
-
61
fuzz_target = fuzz_get_target(target_name);
62
if (!fuzz_target) {
63
usage(**argv);
64
diff --git a/slirp b/slirp
65
index XXXXXXX..XXXXXXX 160000
66
--- a/slirp
67
+++ b/slirp
68
@@ -1 +1 @@
69
-Subproject commit 2faae0f778f818fadc873308f983289df697eb93
70
+Subproject commit 55ab21c9a36852915b81f1b41ebaf3b6509dd8ba
71
--
72
2.25.3
73
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
From: Daniel Brodsky <dnbrdsky@gmail.com>
2
2
3
qemu coroutine command results in following error output:
3
- __COUNTER__ doesn't work with ## concat
4
- replaced ## with glue() macro so __COUNTER__ is evaluated
4
5
5
Python Exception <class 'gdb.error'> 'arch_prctl' has unknown return
6
Fixes: 3284c3ddc4
6
type; cast the call to its declared return type: Error occurred in
7
Python command: 'arch_prctl' has unknown return type; cast the call to
8
its declared return type
9
7
10
Fix it by giving it what it wants: arch_prctl return type.
8
Signed-off-by: Daniel Brodsky <dnbrdsky@gmail.com>
11
9
Message-id: 20200404042108.389635-2-dnbrdsky@gmail.com
12
Information on the topic:
13
https://sourceware.org/gdb/onlinedocs/gdb/Calling.html
14
15
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
16
Message-id: 20190206151425.105871-1-vsementsov@virtuozzo.com
17
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
10
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
18
---
11
---
19
scripts/qemugdb/coroutine.py | 2 +-
12
include/qemu/lockable.h | 7 ++++---
20
1 file changed, 1 insertion(+), 1 deletion(-)
13
include/qemu/rcu.h | 2 +-
14
2 files changed, 5 insertions(+), 4 deletions(-)
21
15
22
diff --git a/scripts/qemugdb/coroutine.py b/scripts/qemugdb/coroutine.py
16
diff --git a/include/qemu/lockable.h b/include/qemu/lockable.h
23
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
24
--- a/scripts/qemugdb/coroutine.py
18
--- a/include/qemu/lockable.h
25
+++ b/scripts/qemugdb/coroutine.py
19
+++ b/include/qemu/lockable.h
26
@@ -XXX,XX +XXX,XX @@ def get_fs_base():
20
@@ -XXX,XX +XXX,XX @@ G_DEFINE_AUTOPTR_CLEANUP_FUNC(QemuLockable, qemu_lockable_auto_unlock)
27
pthread_self().'''
21
* }
28
# %rsp - 120 is scratch space according to the SystemV ABI
22
*/
29
old = gdb.parse_and_eval('*(uint64_t*)($rsp - 120)')
23
#define WITH_QEMU_LOCK_GUARD(x) \
30
- gdb.execute('call arch_prctl(0x1003, $rsp - 120)', False, True)
24
- WITH_QEMU_LOCK_GUARD_((x), qemu_lockable_auto##__COUNTER__)
31
+ gdb.execute('call (int)arch_prctl(0x1003, $rsp - 120)', False, True)
25
+ WITH_QEMU_LOCK_GUARD_((x), glue(qemu_lockable_auto, __COUNTER__))
32
fs_base = gdb.parse_and_eval('*(uint64_t*)($rsp - 120)')
26
33
gdb.execute('set *(uint64_t*)($rsp - 120) = %s' % old, False, True)
27
/**
34
return fs_base
28
* QEMU_LOCK_GUARD - Lock an object until the end of the scope
29
@@ -XXX,XX +XXX,XX @@ G_DEFINE_AUTOPTR_CLEANUP_FUNC(QemuLockable, qemu_lockable_auto_unlock)
30
* return; <-- mutex is automatically unlocked
31
* }
32
*/
33
-#define QEMU_LOCK_GUARD(x) \
34
- g_autoptr(QemuLockable) qemu_lockable_auto##__COUNTER__ = \
35
+#define QEMU_LOCK_GUARD(x) \
36
+ g_autoptr(QemuLockable) \
37
+ glue(qemu_lockable_auto, __COUNTER__) G_GNUC_UNUSED = \
38
qemu_lockable_auto_lock(QEMU_MAKE_LOCKABLE((x)))
39
40
#endif
41
diff --git a/include/qemu/rcu.h b/include/qemu/rcu.h
42
index XXXXXXX..XXXXXXX 100644
43
--- a/include/qemu/rcu.h
44
+++ b/include/qemu/rcu.h
45
@@ -XXX,XX +XXX,XX @@ static inline void rcu_read_auto_unlock(RCUReadAuto *r)
46
G_DEFINE_AUTOPTR_CLEANUP_FUNC(RCUReadAuto, rcu_read_auto_unlock)
47
48
#define WITH_RCU_READ_LOCK_GUARD() \
49
- WITH_RCU_READ_LOCK_GUARD_(_rcu_read_auto##__COUNTER__)
50
+ WITH_RCU_READ_LOCK_GUARD_(glue(_rcu_read_auto, __COUNTER__))
51
52
#define WITH_RCU_READ_LOCK_GUARD_(var) \
53
for (g_autoptr(RCUReadAuto) var = rcu_read_auto_lock(); \
35
--
54
--
36
2.20.1
55
2.25.3
37
56
38
diff view generated by jsdifflib
1
From: Peter Xu <peterx@redhat.com>
1
From: Daniel Brodsky <dnbrdsky@gmail.com>
2
2
3
Lukas reported an hard to reproduce QMP iothread hang on s390 that
3
- ran regexp "qemu_mutex_lock\(.*\).*\n.*if" to find targets
4
QEMU might hang at pthread_join() of the QMP monitor iothread before
4
- replaced result with QEMU_LOCK_GUARD if all unlocks at function end
5
quitting:
5
- replaced result with WITH_QEMU_LOCK_GUARD if unlock not at end
6
6
7
Thread 1
7
Signed-off-by: Daniel Brodsky <dnbrdsky@gmail.com>
8
#0 0x000003ffad10932c in pthread_join
8
Reviewed-by: Juan Quintela <quintela@redhat.com>
9
#1 0x0000000109e95750 in qemu_thread_join
9
Message-id: 20200404042108.389635-3-dnbrdsky@gmail.com
10
at /home/thuth/devel/qemu/util/qemu-thread-posix.c:570
11
#2 0x0000000109c95a1c in iothread_stop
12
#3 0x0000000109bb0874 in monitor_cleanup
13
#4 0x0000000109b55042 in main
14
15
While the iothread is still in the main loop:
16
17
Thread 4
18
#0 0x000003ffad0010e4 in ??
19
#1 0x000003ffad553958 in g_main_context_iterate.isra.19
20
#2 0x000003ffad553d90 in g_main_loop_run
21
#3 0x0000000109c9585a in iothread_run
22
at /home/thuth/devel/qemu/iothread.c:74
23
#4 0x0000000109e94752 in qemu_thread_start
24
at /home/thuth/devel/qemu/util/qemu-thread-posix.c:502
25
#5 0x000003ffad10825a in start_thread
26
#6 0x000003ffad00dcf2 in thread_start
27
28
IMHO it's because there's a race between the main thread and iothread
29
when stopping the thread in following sequence:
30
31
main thread iothread
32
=========== ==============
33
aio_poll()
34
iothread_get_g_main_context
35
set iothread->worker_context
36
iothread_stop
37
schedule iothread_stop_bh
38
execute iothread_stop_bh [1]
39
set iothread->running=false
40
(since main_loop==NULL so
41
skip to quit main loop.
42
Note: although main_loop is
43
NULL but worker_context is
44
not!)
45
atomic_read(&iothread->worker_context) [2]
46
create main_loop object
47
g_main_loop_run() [3]
48
pthread_join() [4]
49
50
We can see that when execute iothread_stop_bh() at [1] it's possible
51
that main_loop is still NULL because it's only created until the first
52
check of the worker_context later at [2]. Then the iothread will hang
53
in the main loop [3] and it'll starve the main thread too [4].
54
55
Here the simple solution should be that we check again the "running"
56
variable before check against worker_context.
57
58
CC: Thomas Huth <thuth@redhat.com>
59
CC: Dr. David Alan Gilbert <dgilbert@redhat.com>
60
CC: Stefan Hajnoczi <stefanha@redhat.com>
61
CC: Lukáš Doktor <ldoktor@redhat.com>
62
CC: Markus Armbruster <armbru@redhat.com>
63
CC: Eric Blake <eblake@redhat.com>
64
CC: Paolo Bonzini <pbonzini@redhat.com>
65
Reported-by: Lukáš Doktor <ldoktor@redhat.com>
66
Signed-off-by: Peter Xu <peterx@redhat.com>
67
Tested-by: Thomas Huth <thuth@redhat.com>
68
Message-id: 20190129051432.22023-1-peterx@redhat.com
69
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
10
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
70
---
11
---
71
iothread.c | 6 +++++-
12
block/iscsi.c | 7 ++----
72
1 file changed, 5 insertions(+), 1 deletion(-)
13
block/nfs.c | 51 ++++++++++++++++++++-----------------------
14
cpus-common.c | 14 +++++-------
15
hw/display/qxl.c | 43 +++++++++++++++++-------------------
16
hw/vfio/platform.c | 5 ++---
17
migration/migration.c | 3 +--
18
migration/multifd.c | 8 +++----
19
migration/ram.c | 3 +--
20
monitor/misc.c | 4 +---
21
ui/spice-display.c | 14 ++++++------
22
util/log.c | 4 ++--
23
util/qemu-timer.c | 17 +++++++--------
24
util/rcu.c | 8 +++----
25
util/thread-pool.c | 3 +--
26
util/vfio-helpers.c | 5 ++---
27
15 files changed, 83 insertions(+), 106 deletions(-)
73
28
74
diff --git a/iothread.c b/iothread.c
29
diff --git a/block/iscsi.c b/block/iscsi.c
75
index XXXXXXX..XXXXXXX 100644
30
index XXXXXXX..XXXXXXX 100644
76
--- a/iothread.c
31
--- a/block/iscsi.c
77
+++ b/iothread.c
32
+++ b/block/iscsi.c
78
@@ -XXX,XX +XXX,XX @@ static void *iothread_run(void *opaque)
33
@@ -XXX,XX +XXX,XX @@ static void iscsi_nop_timed_event(void *opaque)
79
while (iothread->running) {
34
{
80
aio_poll(iothread->ctx, true);
35
IscsiLun *iscsilun = opaque;
81
36
82
- if (atomic_read(&iothread->worker_context)) {
37
- qemu_mutex_lock(&iscsilun->mutex);
83
+ /*
38
+ QEMU_LOCK_GUARD(&iscsilun->mutex);
84
+ * We must check the running state again in case it was
39
if (iscsi_get_nops_in_flight(iscsilun->iscsi) >= MAX_NOP_FAILURES) {
85
+ * changed in previous aio_poll()
40
error_report("iSCSI: NOP timeout. Reconnecting...");
86
+ */
41
iscsilun->request_timed_out = true;
87
+ if (iothread->running && atomic_read(&iothread->worker_context)) {
42
} else if (iscsi_nop_out_async(iscsilun->iscsi, NULL, NULL, 0, NULL) != 0) {
88
GMainLoop *loop;
43
error_report("iSCSI: failed to sent NOP-Out. Disabling NOP messages.");
89
44
- goto out;
90
g_main_context_push_thread_default(iothread->worker_context);
45
+ return;
46
}
47
48
timer_mod(iscsilun->nop_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + NOP_INTERVAL);
49
iscsi_set_events(iscsilun);
50
-
51
-out:
52
- qemu_mutex_unlock(&iscsilun->mutex);
53
}
54
55
static void iscsi_readcapacity_sync(IscsiLun *iscsilun, Error **errp)
56
diff --git a/block/nfs.c b/block/nfs.c
57
index XXXXXXX..XXXXXXX 100644
58
--- a/block/nfs.c
59
+++ b/block/nfs.c
60
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn nfs_co_preadv(BlockDriverState *bs, uint64_t offset,
61
nfs_co_init_task(bs, &task);
62
task.iov = iov;
63
64
- qemu_mutex_lock(&client->mutex);
65
- if (nfs_pread_async(client->context, client->fh,
66
- offset, bytes, nfs_co_generic_cb, &task) != 0) {
67
- qemu_mutex_unlock(&client->mutex);
68
- return -ENOMEM;
69
- }
70
+ WITH_QEMU_LOCK_GUARD(&client->mutex) {
71
+ if (nfs_pread_async(client->context, client->fh,
72
+ offset, bytes, nfs_co_generic_cb, &task) != 0) {
73
+ return -ENOMEM;
74
+ }
75
76
- nfs_set_events(client);
77
- qemu_mutex_unlock(&client->mutex);
78
+ nfs_set_events(client);
79
+ }
80
while (!task.complete) {
81
qemu_coroutine_yield();
82
}
83
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn nfs_co_pwritev(BlockDriverState *bs, uint64_t offset,
84
buf = iov->iov[0].iov_base;
85
}
86
87
- qemu_mutex_lock(&client->mutex);
88
- if (nfs_pwrite_async(client->context, client->fh,
89
- offset, bytes, buf,
90
- nfs_co_generic_cb, &task) != 0) {
91
- qemu_mutex_unlock(&client->mutex);
92
- if (my_buffer) {
93
- g_free(buf);
94
+ WITH_QEMU_LOCK_GUARD(&client->mutex) {
95
+ if (nfs_pwrite_async(client->context, client->fh,
96
+ offset, bytes, buf,
97
+ nfs_co_generic_cb, &task) != 0) {
98
+ if (my_buffer) {
99
+ g_free(buf);
100
+ }
101
+ return -ENOMEM;
102
}
103
- return -ENOMEM;
104
- }
105
106
- nfs_set_events(client);
107
- qemu_mutex_unlock(&client->mutex);
108
+ nfs_set_events(client);
109
+ }
110
while (!task.complete) {
111
qemu_coroutine_yield();
112
}
113
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn nfs_co_flush(BlockDriverState *bs)
114
115
nfs_co_init_task(bs, &task);
116
117
- qemu_mutex_lock(&client->mutex);
118
- if (nfs_fsync_async(client->context, client->fh, nfs_co_generic_cb,
119
- &task) != 0) {
120
- qemu_mutex_unlock(&client->mutex);
121
- return -ENOMEM;
122
- }
123
+ WITH_QEMU_LOCK_GUARD(&client->mutex) {
124
+ if (nfs_fsync_async(client->context, client->fh, nfs_co_generic_cb,
125
+ &task) != 0) {
126
+ return -ENOMEM;
127
+ }
128
129
- nfs_set_events(client);
130
- qemu_mutex_unlock(&client->mutex);
131
+ nfs_set_events(client);
132
+ }
133
while (!task.complete) {
134
qemu_coroutine_yield();
135
}
136
diff --git a/cpus-common.c b/cpus-common.c
137
index XXXXXXX..XXXXXXX 100644
138
--- a/cpus-common.c
139
+++ b/cpus-common.c
140
@@ -XXX,XX +XXX,XX @@
141
#include "exec/cpu-common.h"
142
#include "hw/core/cpu.h"
143
#include "sysemu/cpus.h"
144
+#include "qemu/lockable.h"
145
146
static QemuMutex qemu_cpu_list_lock;
147
static QemuCond exclusive_cond;
148
@@ -XXX,XX +XXX,XX @@ static int cpu_get_free_index(void)
149
150
void cpu_list_add(CPUState *cpu)
151
{
152
- qemu_mutex_lock(&qemu_cpu_list_lock);
153
+ QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
154
if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) {
155
cpu->cpu_index = cpu_get_free_index();
156
assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX);
157
@@ -XXX,XX +XXX,XX @@ void cpu_list_add(CPUState *cpu)
158
assert(!cpu_index_auto_assigned);
159
}
160
QTAILQ_INSERT_TAIL_RCU(&cpus, cpu, node);
161
- qemu_mutex_unlock(&qemu_cpu_list_lock);
162
}
163
164
void cpu_list_remove(CPUState *cpu)
165
{
166
- qemu_mutex_lock(&qemu_cpu_list_lock);
167
+ QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
168
if (!QTAILQ_IN_USE(cpu, node)) {
169
/* there is nothing to undo since cpu_exec_init() hasn't been called */
170
- qemu_mutex_unlock(&qemu_cpu_list_lock);
171
return;
172
}
173
174
@@ -XXX,XX +XXX,XX @@ void cpu_list_remove(CPUState *cpu)
175
176
QTAILQ_REMOVE_RCU(&cpus, cpu, node);
177
cpu->cpu_index = UNASSIGNED_CPU_INDEX;
178
- qemu_mutex_unlock(&qemu_cpu_list_lock);
179
}
180
181
struct qemu_work_item {
182
@@ -XXX,XX +XXX,XX @@ void cpu_exec_start(CPUState *cpu)
183
* see cpu->running == true, and it will kick the CPU.
184
*/
185
if (unlikely(atomic_read(&pending_cpus))) {
186
- qemu_mutex_lock(&qemu_cpu_list_lock);
187
+ QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
188
if (!cpu->has_waiter) {
189
/* Not counted in pending_cpus, let the exclusive item
190
* run. Since we have the lock, just set cpu->running to true
191
@@ -XXX,XX +XXX,XX @@ void cpu_exec_start(CPUState *cpu)
192
* waiter at cpu_exec_end.
193
*/
194
}
195
- qemu_mutex_unlock(&qemu_cpu_list_lock);
196
}
197
}
198
199
@@ -XXX,XX +XXX,XX @@ void cpu_exec_end(CPUState *cpu)
200
* next cpu_exec_start.
201
*/
202
if (unlikely(atomic_read(&pending_cpus))) {
203
- qemu_mutex_lock(&qemu_cpu_list_lock);
204
+ QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
205
if (cpu->has_waiter) {
206
cpu->has_waiter = false;
207
atomic_set(&pending_cpus, pending_cpus - 1);
208
@@ -XXX,XX +XXX,XX @@ void cpu_exec_end(CPUState *cpu)
209
qemu_cond_signal(&exclusive_cond);
210
}
211
}
212
- qemu_mutex_unlock(&qemu_cpu_list_lock);
213
}
214
}
215
216
diff --git a/hw/display/qxl.c b/hw/display/qxl.c
217
index XXXXXXX..XXXXXXX 100644
218
--- a/hw/display/qxl.c
219
+++ b/hw/display/qxl.c
220
@@ -XXX,XX +XXX,XX @@ static int qxl_track_command(PCIQXLDevice *qxl, struct QXLCommandExt *ext)
221
cmd->u.surface_create.stride);
222
return 1;
223
}
224
- qemu_mutex_lock(&qxl->track_lock);
225
- if (cmd->type == QXL_SURFACE_CMD_CREATE) {
226
- qxl->guest_surfaces.cmds[id] = ext->cmd.data;
227
- qxl->guest_surfaces.count++;
228
- if (qxl->guest_surfaces.max < qxl->guest_surfaces.count)
229
- qxl->guest_surfaces.max = qxl->guest_surfaces.count;
230
+ WITH_QEMU_LOCK_GUARD(&qxl->track_lock) {
231
+ if (cmd->type == QXL_SURFACE_CMD_CREATE) {
232
+ qxl->guest_surfaces.cmds[id] = ext->cmd.data;
233
+ qxl->guest_surfaces.count++;
234
+ if (qxl->guest_surfaces.max < qxl->guest_surfaces.count) {
235
+ qxl->guest_surfaces.max = qxl->guest_surfaces.count;
236
+ }
237
+ }
238
+ if (cmd->type == QXL_SURFACE_CMD_DESTROY) {
239
+ qxl->guest_surfaces.cmds[id] = 0;
240
+ qxl->guest_surfaces.count--;
241
+ }
242
}
243
- if (cmd->type == QXL_SURFACE_CMD_DESTROY) {
244
- qxl->guest_surfaces.cmds[id] = 0;
245
- qxl->guest_surfaces.count--;
246
- }
247
- qemu_mutex_unlock(&qxl->track_lock);
248
break;
249
}
250
case QXL_CMD_CURSOR:
251
@@ -XXX,XX +XXX,XX @@ static void interface_update_area_complete(QXLInstance *sin,
252
int i;
253
int qxl_i;
254
255
- qemu_mutex_lock(&qxl->ssd.lock);
256
+ QEMU_LOCK_GUARD(&qxl->ssd.lock);
257
if (surface_id != 0 || !num_updated_rects ||
258
!qxl->render_update_cookie_num) {
259
- qemu_mutex_unlock(&qxl->ssd.lock);
260
return;
261
}
262
trace_qxl_interface_update_area_complete(qxl->id, surface_id, dirty->left,
263
@@ -XXX,XX +XXX,XX @@ static void interface_update_area_complete(QXLInstance *sin,
264
* Don't bother copying or scheduling the bh since we will flip
265
* the whole area anyway on completion of the update_area async call
266
*/
267
- qemu_mutex_unlock(&qxl->ssd.lock);
268
return;
269
}
270
qxl_i = qxl->num_dirty_rects;
271
@@ -XXX,XX +XXX,XX @@ static void interface_update_area_complete(QXLInstance *sin,
272
trace_qxl_interface_update_area_complete_schedule_bh(qxl->id,
273
qxl->num_dirty_rects);
274
qemu_bh_schedule(qxl->update_area_bh);
275
- qemu_mutex_unlock(&qxl->ssd.lock);
276
}
277
278
/* called from spice server thread context only */
279
@@ -XXX,XX +XXX,XX @@ static void ioport_write(void *opaque, hwaddr addr,
280
case QXL_IO_MONITORS_CONFIG_ASYNC:
281
async_common:
282
async = QXL_ASYNC;
283
- qemu_mutex_lock(&d->async_lock);
284
- if (d->current_async != QXL_UNDEFINED_IO) {
285
- qxl_set_guest_bug(d, "%d async started before last (%d) complete",
286
- io_port, d->current_async);
287
- qemu_mutex_unlock(&d->async_lock);
288
- return;
289
+ WITH_QEMU_LOCK_GUARD(&d->async_lock) {
290
+ if (d->current_async != QXL_UNDEFINED_IO) {
291
+ qxl_set_guest_bug(d, "%d async started before last (%d) complete",
292
+ io_port, d->current_async);
293
+ return;
294
+ }
295
+ d->current_async = orig_io_port;
296
}
297
- d->current_async = orig_io_port;
298
- qemu_mutex_unlock(&d->async_lock);
299
break;
300
default:
301
break;
302
diff --git a/hw/vfio/platform.c b/hw/vfio/platform.c
303
index XXXXXXX..XXXXXXX 100644
304
--- a/hw/vfio/platform.c
305
+++ b/hw/vfio/platform.c
306
@@ -XXX,XX +XXX,XX @@
307
#include "hw/vfio/vfio-platform.h"
308
#include "migration/vmstate.h"
309
#include "qemu/error-report.h"
310
+#include "qemu/lockable.h"
311
#include "qemu/main-loop.h"
312
#include "qemu/module.h"
313
#include "qemu/range.h"
314
@@ -XXX,XX +XXX,XX @@ static void vfio_intp_interrupt(VFIOINTp *intp)
315
VFIOPlatformDevice *vdev = intp->vdev;
316
bool delay_handling = false;
317
318
- qemu_mutex_lock(&vdev->intp_mutex);
319
+ QEMU_LOCK_GUARD(&vdev->intp_mutex);
320
if (intp->state == VFIO_IRQ_INACTIVE) {
321
QLIST_FOREACH(tmp, &vdev->intp_list, next) {
322
if (tmp->state == VFIO_IRQ_ACTIVE ||
323
@@ -XXX,XX +XXX,XX @@ static void vfio_intp_interrupt(VFIOINTp *intp)
324
QSIMPLEQ_INSERT_TAIL(&vdev->pending_intp_queue,
325
intp, pqnext);
326
ret = event_notifier_test_and_clear(intp->interrupt);
327
- qemu_mutex_unlock(&vdev->intp_mutex);
328
return;
329
}
330
331
@@ -XXX,XX +XXX,XX @@ static void vfio_intp_interrupt(VFIOINTp *intp)
332
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
333
vdev->mmap_timeout);
334
}
335
- qemu_mutex_unlock(&vdev->intp_mutex);
336
}
337
338
/**
339
diff --git a/migration/migration.c b/migration/migration.c
340
index XXXXXXX..XXXXXXX 100644
341
--- a/migration/migration.c
342
+++ b/migration/migration.c
343
@@ -XXX,XX +XXX,XX @@ static void migrate_fd_cleanup_bh(void *opaque)
344
345
void migrate_set_error(MigrationState *s, const Error *error)
346
{
347
- qemu_mutex_lock(&s->error_mutex);
348
+ QEMU_LOCK_GUARD(&s->error_mutex);
349
if (!s->error) {
350
s->error = error_copy(error);
351
}
352
- qemu_mutex_unlock(&s->error_mutex);
353
}
354
355
void migrate_fd_error(MigrationState *s, const Error *error)
356
diff --git a/migration/multifd.c b/migration/multifd.c
357
index XXXXXXX..XXXXXXX 100644
358
--- a/migration/multifd.c
359
+++ b/migration/multifd.c
360
@@ -XXX,XX +XXX,XX @@ void multifd_recv_sync_main(void)
361
for (i = 0; i < migrate_multifd_channels(); i++) {
362
MultiFDRecvParams *p = &multifd_recv_state->params[i];
363
364
- qemu_mutex_lock(&p->mutex);
365
- if (multifd_recv_state->packet_num < p->packet_num) {
366
- multifd_recv_state->packet_num = p->packet_num;
367
+ WITH_QEMU_LOCK_GUARD(&p->mutex) {
368
+ if (multifd_recv_state->packet_num < p->packet_num) {
369
+ multifd_recv_state->packet_num = p->packet_num;
370
+ }
371
}
372
- qemu_mutex_unlock(&p->mutex);
373
trace_multifd_recv_sync_main_signal(p->id);
374
qemu_sem_post(&p->sem_sync);
375
}
376
diff --git a/migration/ram.c b/migration/ram.c
377
index XXXXXXX..XXXXXXX 100644
378
--- a/migration/ram.c
379
+++ b/migration/ram.c
380
@@ -XXX,XX +XXX,XX @@ static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
381
return NULL;
382
}
383
384
- qemu_mutex_lock(&rs->src_page_req_mutex);
385
+ QEMU_LOCK_GUARD(&rs->src_page_req_mutex);
386
if (!QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
387
struct RAMSrcPageRequest *entry =
388
QSIMPLEQ_FIRST(&rs->src_page_requests);
389
@@ -XXX,XX +XXX,XX @@ static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
390
migration_consume_urgent_request();
391
}
392
}
393
- qemu_mutex_unlock(&rs->src_page_req_mutex);
394
395
return block;
396
}
397
diff --git a/monitor/misc.c b/monitor/misc.c
398
index XXXXXXX..XXXXXXX 100644
399
--- a/monitor/misc.c
400
+++ b/monitor/misc.c
401
@@ -XXX,XX +XXX,XX @@ AddfdInfo *monitor_fdset_add_fd(int fd, bool has_fdset_id, int64_t fdset_id,
402
MonFdsetFd *mon_fdset_fd;
403
AddfdInfo *fdinfo;
404
405
- qemu_mutex_lock(&mon_fdsets_lock);
406
+ QEMU_LOCK_GUARD(&mon_fdsets_lock);
407
if (has_fdset_id) {
408
QLIST_FOREACH(mon_fdset, &mon_fdsets, next) {
409
/* Break if match found or match impossible due to ordering by ID */
410
@@ -XXX,XX +XXX,XX @@ AddfdInfo *monitor_fdset_add_fd(int fd, bool has_fdset_id, int64_t fdset_id,
411
if (fdset_id < 0) {
412
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "fdset-id",
413
"a non-negative value");
414
- qemu_mutex_unlock(&mon_fdsets_lock);
415
return NULL;
416
}
417
/* Use specified fdset ID */
418
@@ -XXX,XX +XXX,XX @@ AddfdInfo *monitor_fdset_add_fd(int fd, bool has_fdset_id, int64_t fdset_id,
419
fdinfo->fdset_id = mon_fdset->id;
420
fdinfo->fd = mon_fdset_fd->fd;
421
422
- qemu_mutex_unlock(&mon_fdsets_lock);
423
return fdinfo;
424
}
425
426
diff --git a/ui/spice-display.c b/ui/spice-display.c
427
index XXXXXXX..XXXXXXX 100644
428
--- a/ui/spice-display.c
429
+++ b/ui/spice-display.c
430
@@ -XXX,XX +XXX,XX @@
431
#include "qemu/osdep.h"
432
#include "ui/qemu-spice.h"
433
#include "qemu/timer.h"
434
+#include "qemu/lockable.h"
435
#include "qemu/main-loop.h"
436
#include "qemu/option.h"
437
#include "qemu/queue.h"
438
@@ -XXX,XX +XXX,XX @@ void qemu_spice_display_refresh(SimpleSpiceDisplay *ssd)
439
{
440
graphic_hw_update(ssd->dcl.con);
441
442
- qemu_mutex_lock(&ssd->lock);
443
- if (QTAILQ_EMPTY(&ssd->updates) && ssd->ds) {
444
- qemu_spice_create_update(ssd);
445
- ssd->notify++;
446
+ WITH_QEMU_LOCK_GUARD(&ssd->lock) {
447
+ if (QTAILQ_EMPTY(&ssd->updates) && ssd->ds) {
448
+ qemu_spice_create_update(ssd);
449
+ ssd->notify++;
450
+ }
451
}
452
- qemu_mutex_unlock(&ssd->lock);
453
454
trace_qemu_spice_display_refresh(ssd->qxl.id, ssd->notify);
455
if (ssd->notify) {
456
@@ -XXX,XX +XXX,XX @@ static int interface_get_cursor_command(QXLInstance *sin, QXLCommandExt *ext)
457
SimpleSpiceDisplay *ssd = container_of(sin, SimpleSpiceDisplay, qxl);
458
int ret;
459
460
- qemu_mutex_lock(&ssd->lock);
461
+ QEMU_LOCK_GUARD(&ssd->lock);
462
if (ssd->ptr_define) {
463
*ext = ssd->ptr_define->ext;
464
ssd->ptr_define = NULL;
465
@@ -XXX,XX +XXX,XX @@ static int interface_get_cursor_command(QXLInstance *sin, QXLCommandExt *ext)
466
} else {
467
ret = false;
468
}
469
- qemu_mutex_unlock(&ssd->lock);
470
return ret;
471
}
472
473
diff --git a/util/log.c b/util/log.c
474
index XXXXXXX..XXXXXXX 100644
475
--- a/util/log.c
476
+++ b/util/log.c
477
@@ -XXX,XX +XXX,XX @@
478
#include "qemu/cutils.h"
479
#include "trace/control.h"
480
#include "qemu/thread.h"
481
+#include "qemu/lockable.h"
482
483
static char *logfilename;
484
static QemuMutex qemu_logfile_mutex;
485
@@ -XXX,XX +XXX,XX @@ void qemu_set_log(int log_flags)
486
if (qemu_loglevel && (!is_daemonized() || logfilename)) {
487
need_to_open_file = true;
488
}
489
- qemu_mutex_lock(&qemu_logfile_mutex);
490
+ QEMU_LOCK_GUARD(&qemu_logfile_mutex);
491
if (qemu_logfile && !need_to_open_file) {
492
logfile = qemu_logfile;
493
atomic_rcu_set(&qemu_logfile, NULL);
494
@@ -XXX,XX +XXX,XX @@ void qemu_set_log(int log_flags)
495
}
496
atomic_rcu_set(&qemu_logfile, logfile);
497
}
498
- qemu_mutex_unlock(&qemu_logfile_mutex);
499
}
500
501
void qemu_log_needs_buffers(void)
502
diff --git a/util/qemu-timer.c b/util/qemu-timer.c
503
index XXXXXXX..XXXXXXX 100644
504
--- a/util/qemu-timer.c
505
+++ b/util/qemu-timer.c
506
@@ -XXX,XX +XXX,XX @@ void timer_mod_anticipate_ns(QEMUTimer *ts, int64_t expire_time)
507
QEMUTimerList *timer_list = ts->timer_list;
508
bool rearm;
509
510
- qemu_mutex_lock(&timer_list->active_timers_lock);
511
- if (ts->expire_time == -1 || ts->expire_time > expire_time) {
512
- if (ts->expire_time != -1) {
513
- timer_del_locked(timer_list, ts);
514
+ WITH_QEMU_LOCK_GUARD(&timer_list->active_timers_lock) {
515
+ if (ts->expire_time == -1 || ts->expire_time > expire_time) {
516
+ if (ts->expire_time != -1) {
517
+ timer_del_locked(timer_list, ts);
518
+ }
519
+ rearm = timer_mod_ns_locked(timer_list, ts, expire_time);
520
+ } else {
521
+ rearm = false;
522
}
523
- rearm = timer_mod_ns_locked(timer_list, ts, expire_time);
524
- } else {
525
- rearm = false;
526
}
527
- qemu_mutex_unlock(&timer_list->active_timers_lock);
528
-
529
if (rearm) {
530
timerlist_rearm(timer_list);
531
}
532
diff --git a/util/rcu.c b/util/rcu.c
533
index XXXXXXX..XXXXXXX 100644
534
--- a/util/rcu.c
535
+++ b/util/rcu.c
536
@@ -XXX,XX +XXX,XX @@
537
#include "qemu/atomic.h"
538
#include "qemu/thread.h"
539
#include "qemu/main-loop.h"
540
+#include "qemu/lockable.h"
541
#if defined(CONFIG_MALLOC_TRIM)
542
#include <malloc.h>
543
#endif
544
@@ -XXX,XX +XXX,XX @@ static void wait_for_readers(void)
545
546
void synchronize_rcu(void)
547
{
548
- qemu_mutex_lock(&rcu_sync_lock);
549
+ QEMU_LOCK_GUARD(&rcu_sync_lock);
550
551
/* Write RCU-protected pointers before reading p_rcu_reader->ctr.
552
* Pairs with smp_mb_placeholder() in rcu_read_lock().
553
*/
554
smp_mb_global();
555
556
- qemu_mutex_lock(&rcu_registry_lock);
557
+ QEMU_LOCK_GUARD(&rcu_registry_lock);
558
if (!QLIST_EMPTY(&registry)) {
559
/* In either case, the atomic_mb_set below blocks stores that free
560
* old RCU-protected pointers.
561
@@ -XXX,XX +XXX,XX @@ void synchronize_rcu(void)
562
563
wait_for_readers();
564
}
565
-
566
- qemu_mutex_unlock(&rcu_registry_lock);
567
- qemu_mutex_unlock(&rcu_sync_lock);
568
}
569
570
571
diff --git a/util/thread-pool.c b/util/thread-pool.c
572
index XXXXXXX..XXXXXXX 100644
573
--- a/util/thread-pool.c
574
+++ b/util/thread-pool.c
575
@@ -XXX,XX +XXX,XX @@ static void thread_pool_cancel(BlockAIOCB *acb)
576
577
trace_thread_pool_cancel(elem, elem->common.opaque);
578
579
- qemu_mutex_lock(&pool->lock);
580
+ QEMU_LOCK_GUARD(&pool->lock);
581
if (elem->state == THREAD_QUEUED &&
582
/* No thread has yet started working on elem. we can try to "steal"
583
* the item from the worker if we can get a signal from the
584
@@ -XXX,XX +XXX,XX @@ static void thread_pool_cancel(BlockAIOCB *acb)
585
elem->ret = -ECANCELED;
586
}
587
588
- qemu_mutex_unlock(&pool->lock);
589
}
590
591
static AioContext *thread_pool_get_aio_context(BlockAIOCB *acb)
592
diff --git a/util/vfio-helpers.c b/util/vfio-helpers.c
593
index XXXXXXX..XXXXXXX 100644
594
--- a/util/vfio-helpers.c
595
+++ b/util/vfio-helpers.c
596
@@ -XXX,XX +XXX,XX @@
597
#include "standard-headers/linux/pci_regs.h"
598
#include "qemu/event_notifier.h"
599
#include "qemu/vfio-helpers.h"
600
+#include "qemu/lockable.h"
601
#include "trace.h"
602
603
#define QEMU_VFIO_DEBUG 0
604
@@ -XXX,XX +XXX,XX @@ int qemu_vfio_dma_reset_temporary(QEMUVFIOState *s)
605
.size = QEMU_VFIO_IOVA_MAX - s->high_water_mark,
606
};
607
trace_qemu_vfio_dma_reset_temporary(s);
608
- qemu_mutex_lock(&s->lock);
609
+ QEMU_LOCK_GUARD(&s->lock);
610
if (ioctl(s->container, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
611
error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno));
612
- qemu_mutex_unlock(&s->lock);
613
return -errno;
614
}
615
s->high_water_mark = QEMU_VFIO_IOVA_MAX;
616
- qemu_mutex_unlock(&s->lock);
617
return 0;
618
}
619
91
--
620
--
92
2.20.1
621
2.25.3
93
622
94
diff view generated by jsdifflib
1
From: Stefano Garzarella <sgarzare@redhat.com>
1
From: Simran Singhal <singhalsimran0@gmail.com>
2
2
3
In several part we still using req->dev or VIRTIO_DEVICE(req->dev)
3
Replace manual lock()/unlock() calls with lock guard macros
4
when we have already defined s and vdev pointers:
4
(QEMU_LOCK_GUARD/WITH_QEMU_LOCK_GUARD).
5
VirtIOBlock *s = req->dev;
6
VirtIODevice *vdev = VIRTIO_DEVICE(s);
7
5
8
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
6
Signed-off-by: Simran Singhal <singhalsimran0@gmail.com>
9
Reviewed-by: Liam Merwick <liam.merwick@oracle.com>
7
Reviewed-by: Yuval Shaia <yuval.shaia.ml@gmail.com>
10
Message-id: 20190208142347.214815-1-sgarzare@redhat.com
8
Reviewed-by: Marcel Apfelbaum<marcel.apfelbaum@gmail.com>
9
Tested-by: Yuval Shaia <yuval.shaia.ml@gmail.com>
10
Message-id: 20200402065035.GA15477@simran-Inspiron-5558
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
12
---
13
hw/block/virtio-blk.c | 22 +++++++++-------------
13
hw/hyperv/hyperv.c | 15 ++++++-------
14
1 file changed, 9 insertions(+), 13 deletions(-)
14
hw/rdma/rdma_backend.c | 50 +++++++++++++++++++++---------------------
15
hw/rdma/rdma_rm.c | 3 +--
16
3 files changed, 33 insertions(+), 35 deletions(-)
15
17
16
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
18
diff --git a/hw/hyperv/hyperv.c b/hw/hyperv/hyperv.c
17
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/block/virtio-blk.c
20
--- a/hw/hyperv/hyperv.c
19
+++ b/hw/block/virtio-blk.c
21
+++ b/hw/hyperv/hyperv.c
20
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status)
22
@@ -XXX,XX +XXX,XX @@
21
static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
23
#include "sysemu/kvm.h"
22
bool is_read)
24
#include "qemu/bitops.h"
25
#include "qemu/error-report.h"
26
+#include "qemu/lockable.h"
27
#include "qemu/queue.h"
28
#include "qemu/rcu.h"
29
#include "qemu/rcu_queue.h"
30
@@ -XXX,XX +XXX,XX @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
31
int ret;
32
MsgHandler *mh;
33
34
- qemu_mutex_lock(&handlers_mutex);
35
+ QEMU_LOCK_GUARD(&handlers_mutex);
36
QLIST_FOREACH(mh, &msg_handlers, link) {
37
if (mh->conn_id == conn_id) {
38
if (handler) {
39
@@ -XXX,XX +XXX,XX @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
40
g_free_rcu(mh, rcu);
41
ret = 0;
42
}
43
- goto unlock;
44
+ return ret;
45
}
46
}
47
48
@@ -XXX,XX +XXX,XX @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
49
} else {
50
ret = -ENOENT;
51
}
52
-unlock:
53
- qemu_mutex_unlock(&handlers_mutex);
54
+
55
return ret;
56
}
57
58
@@ -XXX,XX +XXX,XX @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
59
int ret;
60
EventFlagHandler *handler;
61
62
- qemu_mutex_lock(&handlers_mutex);
63
+ QEMU_LOCK_GUARD(&handlers_mutex);
64
QLIST_FOREACH(handler, &event_flag_handlers, link) {
65
if (handler->conn_id == conn_id) {
66
if (notifier) {
67
@@ -XXX,XX +XXX,XX @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
68
g_free_rcu(handler, rcu);
69
ret = 0;
70
}
71
- goto unlock;
72
+ return ret;
73
}
74
}
75
76
@@ -XXX,XX +XXX,XX @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
77
} else {
78
ret = -ENOENT;
79
}
80
-unlock:
81
- qemu_mutex_unlock(&handlers_mutex);
82
+
83
return ret;
84
}
85
86
diff --git a/hw/rdma/rdma_backend.c b/hw/rdma/rdma_backend.c
87
index XXXXXXX..XXXXXXX 100644
88
--- a/hw/rdma/rdma_backend.c
89
+++ b/hw/rdma/rdma_backend.c
90
@@ -XXX,XX +XXX,XX @@ static int rdma_poll_cq(RdmaDeviceResources *rdma_dev_res, struct ibv_cq *ibcq)
91
struct ibv_wc wc[2];
92
RdmaProtectedGSList *cqe_ctx_list;
93
94
- qemu_mutex_lock(&rdma_dev_res->lock);
95
- do {
96
- ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
97
+ WITH_QEMU_LOCK_GUARD(&rdma_dev_res->lock) {
98
+ do {
99
+ ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
100
101
- trace_rdma_poll_cq(ne, ibcq);
102
+ trace_rdma_poll_cq(ne, ibcq);
103
104
- for (i = 0; i < ne; i++) {
105
- bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
106
- if (unlikely(!bctx)) {
107
- rdma_error_report("No matching ctx for req %"PRId64,
108
- wc[i].wr_id);
109
- continue;
110
- }
111
+ for (i = 0; i < ne; i++) {
112
+ bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
113
+ if (unlikely(!bctx)) {
114
+ rdma_error_report("No matching ctx for req %"PRId64,
115
+ wc[i].wr_id);
116
+ continue;
117
+ }
118
119
- comp_handler(bctx->up_ctx, &wc[i]);
120
+ comp_handler(bctx->up_ctx, &wc[i]);
121
122
- if (bctx->backend_qp) {
123
- cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
124
- } else {
125
- cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
126
- }
127
+ if (bctx->backend_qp) {
128
+ cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
129
+ } else {
130
+ cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
131
+ }
132
133
- rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id);
134
- rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
135
- g_free(bctx);
136
- }
137
- total_ne += ne;
138
- } while (ne > 0);
139
- atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
140
- qemu_mutex_unlock(&rdma_dev_res->lock);
141
+ rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id);
142
+ rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
143
+ g_free(bctx);
144
+ }
145
+ total_ne += ne;
146
+ } while (ne > 0);
147
+ atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
148
+ }
149
150
if (ne < 0) {
151
rdma_error_report("ibv_poll_cq fail, rc=%d, errno=%d", ne, errno);
152
diff --git a/hw/rdma/rdma_rm.c b/hw/rdma/rdma_rm.c
153
index XXXXXXX..XXXXXXX 100644
154
--- a/hw/rdma/rdma_rm.c
155
+++ b/hw/rdma/rdma_rm.c
156
@@ -XXX,XX +XXX,XX @@ static inline void rdma_res_tbl_dealloc(RdmaRmResTbl *tbl, uint32_t handle)
23
{
157
{
24
- BlockErrorAction action = blk_get_error_action(req->dev->blk,
158
trace_rdma_res_tbl_dealloc(tbl->name, handle);
25
- is_read, error);
159
26
VirtIOBlock *s = req->dev;
160
- qemu_mutex_lock(&tbl->lock);
27
+ BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
161
+ QEMU_LOCK_GUARD(&tbl->lock);
28
162
29
if (action == BLOCK_ERROR_ACTION_STOP) {
163
if (handle < tbl->tbl_sz) {
30
/* Break the link as the next request is going to be parsed from the
164
clear_bit(handle, tbl->bitmap);
31
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_flush_complete(void *opaque, int ret)
165
tbl->used--;
32
}
166
}
33
167
34
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
168
- qemu_mutex_unlock(&tbl->lock);
35
- block_acct_done(blk_get_stats(req->dev->blk), &req->acct);
169
}
36
+ block_acct_done(blk_get_stats(s->blk), &req->acct);
170
37
virtio_blk_free_request(req);
171
int rdma_rm_alloc_pd(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,
38
39
out:
40
@@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
41
- sizeof(struct virtio_blk_inhdr);
42
iov_discard_back(in_iov, &in_num, sizeof(struct virtio_blk_inhdr));
43
44
- type = virtio_ldl_p(VIRTIO_DEVICE(req->dev), &req->out.type);
45
+ type = virtio_ldl_p(vdev, &req->out.type);
46
47
/* VIRTIO_BLK_T_OUT defines the command direction. VIRTIO_BLK_T_BARRIER
48
* is an optional flag. Although a guest should not send this flag if
49
@@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
50
case VIRTIO_BLK_T_IN:
51
{
52
bool is_write = type & VIRTIO_BLK_T_OUT;
53
- req->sector_num = virtio_ldq_p(VIRTIO_DEVICE(req->dev),
54
- &req->out.sector);
55
+ req->sector_num = virtio_ldq_p(vdev, &req->out.sector);
56
57
if (is_write) {
58
qemu_iovec_init_external(&req->qiov, out_iov, out_num);
59
@@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
60
req->qiov.size / BDRV_SECTOR_SIZE);
61
}
62
63
- if (!virtio_blk_sect_range_ok(req->dev, req->sector_num,
64
- req->qiov.size)) {
65
+ if (!virtio_blk_sect_range_ok(s, req->sector_num, req->qiov.size)) {
66
virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
67
- block_acct_invalid(blk_get_stats(req->dev->blk),
68
+ block_acct_invalid(blk_get_stats(s->blk),
69
is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ);
70
virtio_blk_free_request(req);
71
return 0;
72
}
73
74
- block_acct_start(blk_get_stats(req->dev->blk),
75
- &req->acct, req->qiov.size,
76
+ block_acct_start(blk_get_stats(s->blk), &req->acct, req->qiov.size,
77
is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ);
78
79
/* merge would exceed maximum number of requests or IO direction
80
* changes */
81
if (mrb->num_reqs > 0 && (mrb->num_reqs == VIRTIO_BLK_MAX_MERGE_REQS ||
82
is_write != mrb->is_write ||
83
- !req->dev->conf.request_merging)) {
84
- virtio_blk_submit_multireq(req->dev->blk, mrb);
85
+ !s->conf.request_merging)) {
86
+ virtio_blk_submit_multireq(s->blk, mrb);
87
}
88
89
assert(mrb->num_reqs < VIRTIO_BLK_MAX_MERGE_REQS);
90
--
172
--
91
2.20.1
173
2.25.3
92
174
93
diff view generated by jsdifflib