1
The following changes since commit 9cf289af47bcfae5c75de37d8e5d6fd23705322c:
1
The following changes since commit 9af638cc1f665712522608c5d6b8c03d8fa67666:
2
2
3
Merge tag 'qga-pull-request' of gitlab.com:marcandre.lureau/qemu into staging (2022-05-04 03:42:49 -0700)
3
Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20200504' into staging (2020-05-04 13:37:17 +0100)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://gitlab.com/stefanha/qemu.git tags/block-pull-request
7
https://github.com/stefanha/qemu.git tags/block-pull-request
8
8
9
for you to fetch changes up to bef2e050d6a7feb865854c65570c496ac5a8cf53:
9
for you to fetch changes up to 08b689aa6b521964b8275dd7a2564aefa5d68129:
10
10
11
util/event-loop-base: Introduce options to set the thread pool size (2022-05-04 17:02:19 +0100)
11
lockable: Replace locks with lock guard macros (2020-05-04 16:07:43 +0100)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Pull request
14
Pull request
15
15
16
Add new thread-pool-min/thread-pool-max parameters to control the thread pool
16
v2:
17
used for async I/O.
17
* Fixed stray slirp submodule change [Peter]
18
19
Fixes for the lock guard macros, code conversions to the lock guard macros, and
20
support for selecting fuzzer targets with argv[0].
18
21
19
----------------------------------------------------------------
22
----------------------------------------------------------------
20
23
21
Nicolas Saenz Julienne (3):
24
Alexander Bulekov (1):
22
Introduce event-loop-base abstract class
25
fuzz: select fuzz target using executable name
23
util/main-loop: Introduce the main loop into QOM
24
util/event-loop-base: Introduce options to set the thread pool size
25
26
26
qapi/qom.json | 43 ++++++++--
27
Daniel Brodsky (2):
27
meson.build | 26 +++---
28
lockable: fix __COUNTER__ macro to be referenced properly
28
include/block/aio.h | 10 +++
29
lockable: replaced locks with lock guard macros where appropriate
29
include/block/thread-pool.h | 3 +
30
30
include/qemu/main-loop.h | 10 +++
31
Simran Singhal (1):
31
include/sysemu/event-loop-base.h | 41 +++++++++
32
lockable: Replace locks with lock guard macros
32
include/sysemu/iothread.h | 6 +-
33
33
event-loop-base.c | 140 +++++++++++++++++++++++++++++++
34
include/qemu/lockable.h | 7 +++---
34
iothread.c | 68 +++++----------
35
include/qemu/rcu.h | 2 +-
35
util/aio-posix.c | 1 +
36
block/iscsi.c | 7 ++----
36
util/async.c | 20 +++++
37
block/nfs.c | 51 +++++++++++++++++++----------------------
37
util/main-loop.c | 65 ++++++++++++++
38
cpus-common.c | 14 ++++-------
38
util/thread-pool.c | 55 +++++++++++-
39
hw/display/qxl.c | 43 ++++++++++++++++------------------
39
13 files changed, 419 insertions(+), 69 deletions(-)
40
hw/hyperv/hyperv.c | 15 ++++++------
40
create mode 100644 include/sysemu/event-loop-base.h
41
hw/rdma/rdma_backend.c | 50 ++++++++++++++++++++--------------------
41
create mode 100644 event-loop-base.c
42
hw/rdma/rdma_rm.c | 3 +--
43
hw/vfio/platform.c | 5 ++--
44
migration/migration.c | 3 +--
45
migration/multifd.c | 8 +++----
46
migration/ram.c | 3 +--
47
monitor/misc.c | 4 +---
48
tests/qtest/fuzz/fuzz.c | 19 ++++++++-------
49
ui/spice-display.c | 14 +++++------
50
util/log.c | 4 ++--
51
util/qemu-timer.c | 17 +++++++-------
52
util/rcu.c | 8 +++----
53
util/thread-pool.c | 3 +--
54
util/vfio-helpers.c | 5 ++--
55
21 files changed, 132 insertions(+), 153 deletions(-)
42
56
43
--
57
--
44
2.35.1
58
2.25.3
59
diff view generated by jsdifflib
1
From: Nicolas Saenz Julienne <nsaenzju@redhat.com>
1
From: Alexander Bulekov <alxndr@bu.edu>
2
2
3
Introduce the 'event-loop-base' abstract class, it'll hold the
3
The fuzzers are built into a binary (e.g. qemu-fuzz-i386). To select the
4
properties common to all event loops and provide the necessary hooks for
4
device to fuzz/fuzz target, we usually use the --fuzz-target= argument.
5
their creation and maintenance. Then have iothread inherit from it.
5
This commit allows the fuzz-target to be specified using the name of the
6
executable. If the executable name ends with -target-FUZZ_TARGET, then
7
we select the fuzz target based on this name, rather than the
8
--fuzz-target argument. This is useful for systems such as oss-fuzz
9
where we don't have control of the arguments passed to the fuzzer.
6
10
7
EventLoopBaseClass is defined as user creatable and provides a hook for
11
[Fixed incorrect indentation.
8
its children to attach themselves to the user creatable class 'complete'
12
--Stefan]
9
function. It also provides an update_params() callback to propagate
10
property changes onto its children.
11
13
12
The new 'event-loop-base' class will live in the root directory. It is
14
Signed-off-by: Alexander Bulekov <alxndr@bu.edu>
13
built on its own using the 'link_whole' option (there are no direct
15
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
14
function dependencies between the class and its children, it all happens
16
Message-id: 20200421182230.6313-1-alxndr@bu.edu
15
trough 'constructor' magic). And also imposes new compilation
16
dependencies:
17
18
qom <- event-loop-base <- blockdev (iothread.c)
19
20
And in subsequent patches:
21
22
qom <- event-loop-base <- qemuutil (util/main-loop.c)
23
24
All this forced some amount of reordering in meson.build:
25
26
- Moved qom build definition before qemuutil. Doing it the other way
27
around (i.e. moving qemuutil after qom) isn't possible as a lot of
28
core libraries that live in between the two depend on it.
29
30
- Process the 'hw' subdir earlier, as it introduces files into the
31
'qom' source set.
32
33
No functional changes intended.
34
35
Signed-off-by: Nicolas Saenz Julienne <nsaenzju@redhat.com>
36
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
37
Acked-by: Markus Armbruster <armbru@redhat.com>
38
Message-id: 20220425075723.20019-2-nsaenzju@redhat.com
39
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
17
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
40
---
18
---
41
qapi/qom.json | 22 +++++--
19
tests/qtest/fuzz/fuzz.c | 19 +++++++++++--------
42
meson.build | 23 ++++---
20
1 file changed, 11 insertions(+), 8 deletions(-)
43
include/sysemu/event-loop-base.h | 36 +++++++++++
44
include/sysemu/iothread.h | 6 +-
45
event-loop-base.c | 104 +++++++++++++++++++++++++++++++
46
iothread.c | 65 ++++++-------------
47
6 files changed, 192 insertions(+), 64 deletions(-)
48
create mode 100644 include/sysemu/event-loop-base.h
49
create mode 100644 event-loop-base.c
50
21
51
diff --git a/qapi/qom.json b/qapi/qom.json
22
diff --git a/tests/qtest/fuzz/fuzz.c b/tests/qtest/fuzz/fuzz.c
52
index XXXXXXX..XXXXXXX 100644
23
index XXXXXXX..XXXXXXX 100644
53
--- a/qapi/qom.json
24
--- a/tests/qtest/fuzz/fuzz.c
54
+++ b/qapi/qom.json
25
+++ b/tests/qtest/fuzz/fuzz.c
55
@@ -XXX,XX +XXX,XX @@
26
@@ -XXX,XX +XXX,XX @@ static void usage(char *path)
56
'*repeat': 'bool',
27
printf(" * %s : %s\n", tmp->target->name,
57
'*grab-toggle': 'GrabToggleKeys' } }
28
tmp->target->description);
58
29
}
59
+##
30
+ printf("Alternatively, add -target-FUZZ_TARGET to the executable name\n");
60
+# @EventLoopBaseProperties:
31
exit(0);
61
+#
62
+# Common properties for event loops
63
+#
64
+# @aio-max-batch: maximum number of requests in a batch for the AIO engine,
65
+# 0 means that the engine will use its default.
66
+# (default: 0)
67
+#
68
+# Since: 7.1
69
+##
70
+{ 'struct': 'EventLoopBaseProperties',
71
+ 'data': { '*aio-max-batch': 'int' } }
72
+
73
##
74
# @IothreadProperties:
75
#
76
@@ -XXX,XX +XXX,XX @@
77
# algorithm detects it is spending too long polling without
78
# encountering events. 0 selects a default behaviour (default: 0)
79
#
80
-# @aio-max-batch: maximum number of requests in a batch for the AIO engine,
81
-# 0 means that the engine will use its default
82
-# (default:0, since 6.1)
83
+# The @aio-max-batch option is available since 6.1.
84
#
85
# Since: 2.0
86
##
87
{ 'struct': 'IothreadProperties',
88
+ 'base': 'EventLoopBaseProperties',
89
'data': { '*poll-max-ns': 'int',
90
'*poll-grow': 'int',
91
- '*poll-shrink': 'int',
92
- '*aio-max-batch': 'int' } }
93
+ '*poll-shrink': 'int' } }
94
95
##
96
# @MemoryBackendProperties:
97
diff --git a/meson.build b/meson.build
98
index XXXXXXX..XXXXXXX 100644
99
--- a/meson.build
100
+++ b/meson.build
101
@@ -XXX,XX +XXX,XX @@ subdir('qom')
102
subdir('authz')
103
subdir('crypto')
104
subdir('ui')
105
+subdir('hw')
106
107
108
if enable_modules
109
@@ -XXX,XX +XXX,XX @@ if enable_modules
110
modulecommon = declare_dependency(link_whole: libmodulecommon, compile_args: '-DBUILD_DSO')
111
endif
112
113
+qom_ss = qom_ss.apply(config_host, strict: false)
114
+libqom = static_library('qom', qom_ss.sources() + genh,
115
+ dependencies: [qom_ss.dependencies()],
116
+ name_suffix: 'fa')
117
+qom = declare_dependency(link_whole: libqom)
118
+
119
+event_loop_base = files('event-loop-base.c')
120
+event_loop_base = static_library('event-loop-base', sources: event_loop_base + genh,
121
+ build_by_default: true)
122
+event_loop_base = declare_dependency(link_whole: event_loop_base,
123
+ dependencies: [qom])
124
+
125
stub_ss = stub_ss.apply(config_all, strict: false)
126
127
util_ss.add_all(trace_ss)
128
@@ -XXX,XX +XXX,XX @@ subdir('monitor')
129
subdir('net')
130
subdir('replay')
131
subdir('semihosting')
132
-subdir('hw')
133
subdir('tcg')
134
subdir('fpu')
135
subdir('accel')
136
@@ -XXX,XX +XXX,XX @@ qemu_syms = custom_target('qemu.syms', output: 'qemu.syms',
137
capture: true,
138
command: [undefsym, nm, '@INPUT@'])
139
140
-qom_ss = qom_ss.apply(config_host, strict: false)
141
-libqom = static_library('qom', qom_ss.sources() + genh,
142
- dependencies: [qom_ss.dependencies()],
143
- name_suffix: 'fa')
144
-
145
-qom = declare_dependency(link_whole: libqom)
146
-
147
authz_ss = authz_ss.apply(config_host, strict: false)
148
libauthz = static_library('authz', authz_ss.sources() + genh,
149
dependencies: [authz_ss.dependencies()],
150
@@ -XXX,XX +XXX,XX @@ libblockdev = static_library('blockdev', blockdev_ss.sources() + genh,
151
build_by_default: false)
152
153
blockdev = declare_dependency(link_whole: [libblockdev],
154
- dependencies: [block])
155
+ dependencies: [block, event_loop_base])
156
157
qmp_ss = qmp_ss.apply(config_host, strict: false)
158
libqmp = static_library('qmp', qmp_ss.sources() + genh,
159
diff --git a/include/sysemu/event-loop-base.h b/include/sysemu/event-loop-base.h
160
new file mode 100644
161
index XXXXXXX..XXXXXXX
162
--- /dev/null
163
+++ b/include/sysemu/event-loop-base.h
164
@@ -XXX,XX +XXX,XX @@
165
+/*
166
+ * QEMU event-loop backend
167
+ *
168
+ * Copyright (C) 2022 Red Hat Inc
169
+ *
170
+ * Authors:
171
+ * Nicolas Saenz Julienne <nsaenzju@redhat.com>
172
+ *
173
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
174
+ * See the COPYING file in the top-level directory.
175
+ */
176
+#ifndef QEMU_EVENT_LOOP_BASE_H
177
+#define QEMU_EVENT_LOOP_BASE_H
178
+
179
+#include "qom/object.h"
180
+#include "block/aio.h"
181
+#include "qemu/typedefs.h"
182
+
183
+#define TYPE_EVENT_LOOP_BASE "event-loop-base"
184
+OBJECT_DECLARE_TYPE(EventLoopBase, EventLoopBaseClass,
185
+ EVENT_LOOP_BASE)
186
+
187
+struct EventLoopBaseClass {
188
+ ObjectClass parent_class;
189
+
190
+ void (*init)(EventLoopBase *base, Error **errp);
191
+ void (*update_params)(EventLoopBase *base, Error **errp);
192
+};
193
+
194
+struct EventLoopBase {
195
+ Object parent;
196
+
197
+ /* AioContext AIO engine parameters */
198
+ int64_t aio_max_batch;
199
+};
200
+#endif
201
diff --git a/include/sysemu/iothread.h b/include/sysemu/iothread.h
202
index XXXXXXX..XXXXXXX 100644
203
--- a/include/sysemu/iothread.h
204
+++ b/include/sysemu/iothread.h
205
@@ -XXX,XX +XXX,XX @@
206
#include "block/aio.h"
207
#include "qemu/thread.h"
208
#include "qom/object.h"
209
+#include "sysemu/event-loop-base.h"
210
211
#define TYPE_IOTHREAD "iothread"
212
213
struct IOThread {
214
- Object parent_obj;
215
+ EventLoopBase parent_obj;
216
217
QemuThread thread;
218
AioContext *ctx;
219
@@ -XXX,XX +XXX,XX @@ struct IOThread {
220
int64_t poll_max_ns;
221
int64_t poll_grow;
222
int64_t poll_shrink;
223
-
224
- /* AioContext AIO engine parameters */
225
- int64_t aio_max_batch;
226
};
227
typedef struct IOThread IOThread;
228
229
diff --git a/event-loop-base.c b/event-loop-base.c
230
new file mode 100644
231
index XXXXXXX..XXXXXXX
232
--- /dev/null
233
+++ b/event-loop-base.c
234
@@ -XXX,XX +XXX,XX @@
235
+/*
236
+ * QEMU event-loop base
237
+ *
238
+ * Copyright (C) 2022 Red Hat Inc
239
+ *
240
+ * Authors:
241
+ * Stefan Hajnoczi <stefanha@redhat.com>
242
+ * Nicolas Saenz Julienne <nsaenzju@redhat.com>
243
+ *
244
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
245
+ * See the COPYING file in the top-level directory.
246
+ */
247
+
248
+#include "qemu/osdep.h"
249
+#include "qom/object_interfaces.h"
250
+#include "qapi/error.h"
251
+#include "sysemu/event-loop-base.h"
252
+
253
+typedef struct {
254
+ const char *name;
255
+ ptrdiff_t offset; /* field's byte offset in EventLoopBase struct */
256
+} EventLoopBaseParamInfo;
257
+
258
+static EventLoopBaseParamInfo aio_max_batch_info = {
259
+ "aio-max-batch", offsetof(EventLoopBase, aio_max_batch),
260
+};
261
+
262
+static void event_loop_base_get_param(Object *obj, Visitor *v,
263
+ const char *name, void *opaque, Error **errp)
264
+{
265
+ EventLoopBase *event_loop_base = EVENT_LOOP_BASE(obj);
266
+ EventLoopBaseParamInfo *info = opaque;
267
+ int64_t *field = (void *)event_loop_base + info->offset;
268
+
269
+ visit_type_int64(v, name, field, errp);
270
+}
271
+
272
+static void event_loop_base_set_param(Object *obj, Visitor *v,
273
+ const char *name, void *opaque, Error **errp)
274
+{
275
+ EventLoopBaseClass *bc = EVENT_LOOP_BASE_GET_CLASS(obj);
276
+ EventLoopBase *base = EVENT_LOOP_BASE(obj);
277
+ EventLoopBaseParamInfo *info = opaque;
278
+ int64_t *field = (void *)base + info->offset;
279
+ int64_t value;
280
+
281
+ if (!visit_type_int64(v, name, &value, errp)) {
282
+ return;
283
+ }
284
+
285
+ if (value < 0) {
286
+ error_setg(errp, "%s value must be in range [0, %" PRId64 "]",
287
+ info->name, INT64_MAX);
288
+ return;
289
+ }
290
+
291
+ *field = value;
292
+
293
+ if (bc->update_params) {
294
+ bc->update_params(base, errp);
295
+ }
296
+
297
+ return;
298
+}
299
+
300
+static void event_loop_base_complete(UserCreatable *uc, Error **errp)
301
+{
302
+ EventLoopBaseClass *bc = EVENT_LOOP_BASE_GET_CLASS(uc);
303
+ EventLoopBase *base = EVENT_LOOP_BASE(uc);
304
+
305
+ if (bc->init) {
306
+ bc->init(base, errp);
307
+ }
308
+}
309
+
310
+static void event_loop_base_class_init(ObjectClass *klass, void *class_data)
311
+{
312
+ UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass);
313
+ ucc->complete = event_loop_base_complete;
314
+
315
+ object_class_property_add(klass, "aio-max-batch", "int",
316
+ event_loop_base_get_param,
317
+ event_loop_base_set_param,
318
+ NULL, &aio_max_batch_info);
319
+}
320
+
321
+static const TypeInfo event_loop_base_info = {
322
+ .name = TYPE_EVENT_LOOP_BASE,
323
+ .parent = TYPE_OBJECT,
324
+ .instance_size = sizeof(EventLoopBase),
325
+ .class_size = sizeof(EventLoopBaseClass),
326
+ .class_init = event_loop_base_class_init,
327
+ .abstract = true,
328
+ .interfaces = (InterfaceInfo[]) {
329
+ { TYPE_USER_CREATABLE },
330
+ { }
331
+ }
332
+};
333
+
334
+static void register_types(void)
335
+{
336
+ type_register_static(&event_loop_base_info);
337
+}
338
+type_init(register_types);
339
diff --git a/iothread.c b/iothread.c
340
index XXXXXXX..XXXXXXX 100644
341
--- a/iothread.c
342
+++ b/iothread.c
343
@@ -XXX,XX +XXX,XX @@
344
#include "qemu/module.h"
345
#include "block/aio.h"
346
#include "block/block.h"
347
+#include "sysemu/event-loop-base.h"
348
#include "sysemu/iothread.h"
349
#include "qapi/error.h"
350
#include "qapi/qapi-commands-misc.h"
351
@@ -XXX,XX +XXX,XX @@ static void iothread_init_gcontext(IOThread *iothread)
352
iothread->main_loop = g_main_loop_new(iothread->worker_context, TRUE);
353
}
32
}
354
33
355
-static void iothread_set_aio_context_params(IOThread *iothread, Error **errp)
34
@@ -XXX,XX +XXX,XX @@ int LLVMFuzzerInitialize(int *argc, char ***argv, char ***envp)
356
+static void iothread_set_aio_context_params(EventLoopBase *base, Error **errp)
35
module_call_init(MODULE_INIT_QOM);
357
{
36
module_call_init(MODULE_INIT_LIBQOS);
358
+ IOThread *iothread = IOTHREAD(base);
37
359
ERRP_GUARD();
38
- if (*argc <= 1) {
360
39
+ target_name = strstr(**argv, "-target-");
361
+ if (!iothread->ctx) {
40
+ if (target_name) { /* The binary name specifies the target */
362
+ return;
41
+ target_name += strlen("-target-");
363
+ }
42
+ } else if (*argc > 1) { /* The target is specified as an argument */
364
+
43
+ target_name = (*argv)[1];
365
aio_context_set_poll_params(iothread->ctx,
44
+ if (!strstr(target_name, "--fuzz-target=")) {
366
iothread->poll_max_ns,
45
+ usage(**argv);
367
iothread->poll_grow,
46
+ }
368
@@ -XXX,XX +XXX,XX @@ static void iothread_set_aio_context_params(IOThread *iothread, Error **errp)
47
+ target_name += strlen("--fuzz-target=");
48
+ } else {
49
usage(**argv);
369
}
50
}
370
51
371
aio_context_set_aio_params(iothread->ctx,
52
/* Identify the fuzz target */
372
- iothread->aio_max_batch,
53
- target_name = (*argv)[1];
373
+ iothread->parent_obj.aio_max_batch,
54
- if (!strstr(target_name, "--fuzz-target=")) {
374
errp);
55
- usage(**argv);
375
}
376
377
-static void iothread_complete(UserCreatable *obj, Error **errp)
378
+
379
+static void iothread_init(EventLoopBase *base, Error **errp)
380
{
381
Error *local_error = NULL;
382
- IOThread *iothread = IOTHREAD(obj);
383
+ IOThread *iothread = IOTHREAD(base);
384
char *thread_name;
385
386
iothread->stopping = false;
387
@@ -XXX,XX +XXX,XX @@ static void iothread_complete(UserCreatable *obj, Error **errp)
388
*/
389
iothread_init_gcontext(iothread);
390
391
- iothread_set_aio_context_params(iothread, &local_error);
392
+ iothread_set_aio_context_params(base, &local_error);
393
if (local_error) {
394
error_propagate(errp, local_error);
395
aio_context_unref(iothread->ctx);
396
@@ -XXX,XX +XXX,XX @@ static void iothread_complete(UserCreatable *obj, Error **errp)
397
* to inherit.
398
*/
399
thread_name = g_strdup_printf("IO %s",
400
- object_get_canonical_path_component(OBJECT(obj)));
401
+ object_get_canonical_path_component(OBJECT(base)));
402
qemu_thread_create(&iothread->thread, thread_name, iothread_run,
403
iothread, QEMU_THREAD_JOINABLE);
404
g_free(thread_name);
405
@@ -XXX,XX +XXX,XX @@ static IOThreadParamInfo poll_grow_info = {
406
static IOThreadParamInfo poll_shrink_info = {
407
"poll-shrink", offsetof(IOThread, poll_shrink),
408
};
409
-static IOThreadParamInfo aio_max_batch_info = {
410
- "aio-max-batch", offsetof(IOThread, aio_max_batch),
411
-};
412
413
static void iothread_get_param(Object *obj, Visitor *v,
414
const char *name, IOThreadParamInfo *info, Error **errp)
415
@@ -XXX,XX +XXX,XX @@ static void iothread_set_poll_param(Object *obj, Visitor *v,
416
}
417
}
418
419
-static void iothread_get_aio_param(Object *obj, Visitor *v,
420
- const char *name, void *opaque, Error **errp)
421
-{
422
- IOThreadParamInfo *info = opaque;
423
-
424
- iothread_get_param(obj, v, name, info, errp);
425
-}
426
-
427
-static void iothread_set_aio_param(Object *obj, Visitor *v,
428
- const char *name, void *opaque, Error **errp)
429
-{
430
- IOThread *iothread = IOTHREAD(obj);
431
- IOThreadParamInfo *info = opaque;
432
-
433
- if (!iothread_set_param(obj, v, name, info, errp)) {
434
- return;
435
- }
56
- }
436
-
57
-
437
- if (iothread->ctx) {
58
- target_name += strlen("--fuzz-target=");
438
- aio_context_set_aio_params(iothread->ctx,
439
- iothread->aio_max_batch,
440
- errp);
441
- }
442
-}
443
-
59
-
444
static void iothread_class_init(ObjectClass *klass, void *class_data)
60
fuzz_target = fuzz_get_target(target_name);
445
{
61
if (!fuzz_target) {
446
- UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass);
62
usage(**argv);
447
- ucc->complete = iothread_complete;
448
+ EventLoopBaseClass *bc = EVENT_LOOP_BASE_CLASS(klass);
449
+
450
+ bc->init = iothread_init;
451
+ bc->update_params = iothread_set_aio_context_params;
452
453
object_class_property_add(klass, "poll-max-ns", "int",
454
iothread_get_poll_param,
455
@@ -XXX,XX +XXX,XX @@ static void iothread_class_init(ObjectClass *klass, void *class_data)
456
iothread_get_poll_param,
457
iothread_set_poll_param,
458
NULL, &poll_shrink_info);
459
- object_class_property_add(klass, "aio-max-batch", "int",
460
- iothread_get_aio_param,
461
- iothread_set_aio_param,
462
- NULL, &aio_max_batch_info);
463
}
464
465
static const TypeInfo iothread_info = {
466
.name = TYPE_IOTHREAD,
467
- .parent = TYPE_OBJECT,
468
+ .parent = TYPE_EVENT_LOOP_BASE,
469
.class_init = iothread_class_init,
470
.instance_size = sizeof(IOThread),
471
.instance_init = iothread_instance_init,
472
.instance_finalize = iothread_instance_finalize,
473
- .interfaces = (InterfaceInfo[]) {
474
- {TYPE_USER_CREATABLE},
475
- {}
476
- },
477
};
478
479
static void iothread_register_types(void)
480
@@ -XXX,XX +XXX,XX @@ static int query_one_iothread(Object *object, void *opaque)
481
info->poll_max_ns = iothread->poll_max_ns;
482
info->poll_grow = iothread->poll_grow;
483
info->poll_shrink = iothread->poll_shrink;
484
- info->aio_max_batch = iothread->aio_max_batch;
485
+ info->aio_max_batch = iothread->parent_obj.aio_max_batch;
486
487
QAPI_LIST_APPEND(*tail, info);
488
return 0;
489
--
63
--
490
2.35.1
64
2.25.3
65
diff view generated by jsdifflib
New patch
1
From: Daniel Brodsky <dnbrdsky@gmail.com>
1
2
3
- __COUNTER__ doesn't work with ## concat
4
- replaced ## with glue() macro so __COUNTER__ is evaluated
5
6
Fixes: 3284c3ddc4
7
8
Signed-off-by: Daniel Brodsky <dnbrdsky@gmail.com>
9
Message-id: 20200404042108.389635-2-dnbrdsky@gmail.com
10
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
11
---
12
include/qemu/lockable.h | 7 ++++---
13
include/qemu/rcu.h | 2 +-
14
2 files changed, 5 insertions(+), 4 deletions(-)
15
16
diff --git a/include/qemu/lockable.h b/include/qemu/lockable.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/include/qemu/lockable.h
19
+++ b/include/qemu/lockable.h
20
@@ -XXX,XX +XXX,XX @@ G_DEFINE_AUTOPTR_CLEANUP_FUNC(QemuLockable, qemu_lockable_auto_unlock)
21
* }
22
*/
23
#define WITH_QEMU_LOCK_GUARD(x) \
24
- WITH_QEMU_LOCK_GUARD_((x), qemu_lockable_auto##__COUNTER__)
25
+ WITH_QEMU_LOCK_GUARD_((x), glue(qemu_lockable_auto, __COUNTER__))
26
27
/**
28
* QEMU_LOCK_GUARD - Lock an object until the end of the scope
29
@@ -XXX,XX +XXX,XX @@ G_DEFINE_AUTOPTR_CLEANUP_FUNC(QemuLockable, qemu_lockable_auto_unlock)
30
* return; <-- mutex is automatically unlocked
31
* }
32
*/
33
-#define QEMU_LOCK_GUARD(x) \
34
- g_autoptr(QemuLockable) qemu_lockable_auto##__COUNTER__ = \
35
+#define QEMU_LOCK_GUARD(x) \
36
+ g_autoptr(QemuLockable) \
37
+ glue(qemu_lockable_auto, __COUNTER__) G_GNUC_UNUSED = \
38
qemu_lockable_auto_lock(QEMU_MAKE_LOCKABLE((x)))
39
40
#endif
41
diff --git a/include/qemu/rcu.h b/include/qemu/rcu.h
42
index XXXXXXX..XXXXXXX 100644
43
--- a/include/qemu/rcu.h
44
+++ b/include/qemu/rcu.h
45
@@ -XXX,XX +XXX,XX @@ static inline void rcu_read_auto_unlock(RCUReadAuto *r)
46
G_DEFINE_AUTOPTR_CLEANUP_FUNC(RCUReadAuto, rcu_read_auto_unlock)
47
48
#define WITH_RCU_READ_LOCK_GUARD() \
49
- WITH_RCU_READ_LOCK_GUARD_(_rcu_read_auto##__COUNTER__)
50
+ WITH_RCU_READ_LOCK_GUARD_(glue(_rcu_read_auto, __COUNTER__))
51
52
#define WITH_RCU_READ_LOCK_GUARD_(var) \
53
for (g_autoptr(RCUReadAuto) var = rcu_read_auto_lock(); \
54
--
55
2.25.3
56
diff view generated by jsdifflib
1
From: Nicolas Saenz Julienne <nsaenzju@redhat.com>
1
From: Daniel Brodsky <dnbrdsky@gmail.com>
2
2
3
The thread pool regulates itself: when idle, it kills threads until
3
- ran regexp "qemu_mutex_lock\(.*\).*\n.*if" to find targets
4
empty, when in demand, it creates new threads until full. This behaviour
4
- replaced result with QEMU_LOCK_GUARD if all unlocks at function end
5
doesn't play well with latency sensitive workloads where the price of
5
- replaced result with WITH_QEMU_LOCK_GUARD if unlock not at end
6
creating a new thread is too high. For example, when paired with qemu's
7
'-mlock', or using safety features like SafeStack, creating a new thread
8
has been measured take multiple milliseconds.
9
6
10
In order to mitigate this let's introduce a new 'EventLoopBase'
7
Signed-off-by: Daniel Brodsky <dnbrdsky@gmail.com>
11
property to set the thread pool size. The threads will be created during
8
Reviewed-by: Juan Quintela <quintela@redhat.com>
12
the pool's initialization or upon updating the property's value, remain
9
Message-id: 20200404042108.389635-3-dnbrdsky@gmail.com
13
available during its lifetime regardless of demand, and destroyed upon
14
freeing it. A properly characterized workload will then be able to
15
configure the pool to avoid any latency spikes.
16
17
Signed-off-by: Nicolas Saenz Julienne <nsaenzju@redhat.com>
18
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
19
Acked-by: Markus Armbruster <armbru@redhat.com>
20
Message-id: 20220425075723.20019-4-nsaenzju@redhat.com
21
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
10
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
22
---
11
---
23
qapi/qom.json | 10 +++++-
12
block/iscsi.c | 7 ++----
24
include/block/aio.h | 10 ++++++
13
block/nfs.c | 51 ++++++++++++++++++++-----------------------
25
include/block/thread-pool.h | 3 ++
14
cpus-common.c | 14 +++++-------
26
include/sysemu/event-loop-base.h | 4 +++
15
hw/display/qxl.c | 43 +++++++++++++++++-------------------
27
event-loop-base.c | 23 +++++++++++++
16
hw/vfio/platform.c | 5 ++---
28
iothread.c | 3 ++
17
migration/migration.c | 3 +--
29
util/aio-posix.c | 1 +
18
migration/multifd.c | 8 +++----
30
util/async.c | 20 ++++++++++++
19
migration/ram.c | 3 +--
31
util/main-loop.c | 9 ++++++
20
monitor/misc.c | 4 +---
32
util/thread-pool.c | 55 +++++++++++++++++++++++++++++---
21
ui/spice-display.c | 14 ++++++------
33
10 files changed, 133 insertions(+), 5 deletions(-)
22
util/log.c | 4 ++--
23
util/qemu-timer.c | 17 +++++++--------
24
util/rcu.c | 8 +++----
25
util/thread-pool.c | 3 +--
26
util/vfio-helpers.c | 5 ++---
27
15 files changed, 83 insertions(+), 106 deletions(-)
34
28
35
diff --git a/qapi/qom.json b/qapi/qom.json
29
diff --git a/block/iscsi.c b/block/iscsi.c
36
index XXXXXXX..XXXXXXX 100644
30
index XXXXXXX..XXXXXXX 100644
37
--- a/qapi/qom.json
31
--- a/block/iscsi.c
38
+++ b/qapi/qom.json
32
+++ b/block/iscsi.c
33
@@ -XXX,XX +XXX,XX @@ static void iscsi_nop_timed_event(void *opaque)
34
{
35
IscsiLun *iscsilun = opaque;
36
37
- qemu_mutex_lock(&iscsilun->mutex);
38
+ QEMU_LOCK_GUARD(&iscsilun->mutex);
39
if (iscsi_get_nops_in_flight(iscsilun->iscsi) >= MAX_NOP_FAILURES) {
40
error_report("iSCSI: NOP timeout. Reconnecting...");
41
iscsilun->request_timed_out = true;
42
} else if (iscsi_nop_out_async(iscsilun->iscsi, NULL, NULL, 0, NULL) != 0) {
43
error_report("iSCSI: failed to sent NOP-Out. Disabling NOP messages.");
44
- goto out;
45
+ return;
46
}
47
48
timer_mod(iscsilun->nop_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + NOP_INTERVAL);
49
iscsi_set_events(iscsilun);
50
-
51
-out:
52
- qemu_mutex_unlock(&iscsilun->mutex);
53
}
54
55
static void iscsi_readcapacity_sync(IscsiLun *iscsilun, Error **errp)
56
diff --git a/block/nfs.c b/block/nfs.c
57
index XXXXXXX..XXXXXXX 100644
58
--- a/block/nfs.c
59
+++ b/block/nfs.c
60
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn nfs_co_preadv(BlockDriverState *bs, uint64_t offset,
61
nfs_co_init_task(bs, &task);
62
task.iov = iov;
63
64
- qemu_mutex_lock(&client->mutex);
65
- if (nfs_pread_async(client->context, client->fh,
66
- offset, bytes, nfs_co_generic_cb, &task) != 0) {
67
- qemu_mutex_unlock(&client->mutex);
68
- return -ENOMEM;
69
- }
70
+ WITH_QEMU_LOCK_GUARD(&client->mutex) {
71
+ if (nfs_pread_async(client->context, client->fh,
72
+ offset, bytes, nfs_co_generic_cb, &task) != 0) {
73
+ return -ENOMEM;
74
+ }
75
76
- nfs_set_events(client);
77
- qemu_mutex_unlock(&client->mutex);
78
+ nfs_set_events(client);
79
+ }
80
while (!task.complete) {
81
qemu_coroutine_yield();
82
}
83
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn nfs_co_pwritev(BlockDriverState *bs, uint64_t offset,
84
buf = iov->iov[0].iov_base;
85
}
86
87
- qemu_mutex_lock(&client->mutex);
88
- if (nfs_pwrite_async(client->context, client->fh,
89
- offset, bytes, buf,
90
- nfs_co_generic_cb, &task) != 0) {
91
- qemu_mutex_unlock(&client->mutex);
92
- if (my_buffer) {
93
- g_free(buf);
94
+ WITH_QEMU_LOCK_GUARD(&client->mutex) {
95
+ if (nfs_pwrite_async(client->context, client->fh,
96
+ offset, bytes, buf,
97
+ nfs_co_generic_cb, &task) != 0) {
98
+ if (my_buffer) {
99
+ g_free(buf);
100
+ }
101
+ return -ENOMEM;
102
}
103
- return -ENOMEM;
104
- }
105
106
- nfs_set_events(client);
107
- qemu_mutex_unlock(&client->mutex);
108
+ nfs_set_events(client);
109
+ }
110
while (!task.complete) {
111
qemu_coroutine_yield();
112
}
113
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn nfs_co_flush(BlockDriverState *bs)
114
115
nfs_co_init_task(bs, &task);
116
117
- qemu_mutex_lock(&client->mutex);
118
- if (nfs_fsync_async(client->context, client->fh, nfs_co_generic_cb,
119
- &task) != 0) {
120
- qemu_mutex_unlock(&client->mutex);
121
- return -ENOMEM;
122
- }
123
+ WITH_QEMU_LOCK_GUARD(&client->mutex) {
124
+ if (nfs_fsync_async(client->context, client->fh, nfs_co_generic_cb,
125
+ &task) != 0) {
126
+ return -ENOMEM;
127
+ }
128
129
- nfs_set_events(client);
130
- qemu_mutex_unlock(&client->mutex);
131
+ nfs_set_events(client);
132
+ }
133
while (!task.complete) {
134
qemu_coroutine_yield();
135
}
136
diff --git a/cpus-common.c b/cpus-common.c
137
index XXXXXXX..XXXXXXX 100644
138
--- a/cpus-common.c
139
+++ b/cpus-common.c
39
@@ -XXX,XX +XXX,XX @@
140
@@ -XXX,XX +XXX,XX @@
40
# 0 means that the engine will use its default.
141
#include "exec/cpu-common.h"
41
# (default: 0)
142
#include "hw/core/cpu.h"
42
#
143
#include "sysemu/cpus.h"
43
+# @thread-pool-min: minimum number of threads reserved in the thread pool
144
+#include "qemu/lockable.h"
44
+# (default:0)
145
45
+#
146
static QemuMutex qemu_cpu_list_lock;
46
+# @thread-pool-max: maximum number of threads the thread pool can contain
147
static QemuCond exclusive_cond;
47
+# (default:64)
148
@@ -XXX,XX +XXX,XX @@ static int cpu_get_free_index(void)
48
+#
149
49
# Since: 7.1
150
void cpu_list_add(CPUState *cpu)
50
##
151
{
51
{ 'struct': 'EventLoopBaseProperties',
152
- qemu_mutex_lock(&qemu_cpu_list_lock);
52
- 'data': { '*aio-max-batch': 'int' } }
153
+ QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
53
+ 'data': { '*aio-max-batch': 'int',
154
if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) {
54
+ '*thread-pool-min': 'int',
155
cpu->cpu_index = cpu_get_free_index();
55
+ '*thread-pool-max': 'int' } }
156
assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX);
56
157
@@ -XXX,XX +XXX,XX @@ void cpu_list_add(CPUState *cpu)
57
##
158
assert(!cpu_index_auto_assigned);
58
# @IothreadProperties:
159
}
59
diff --git a/include/block/aio.h b/include/block/aio.h
160
QTAILQ_INSERT_TAIL_RCU(&cpus, cpu, node);
60
index XXXXXXX..XXXXXXX 100644
161
- qemu_mutex_unlock(&qemu_cpu_list_lock);
61
--- a/include/block/aio.h
162
}
62
+++ b/include/block/aio.h
163
63
@@ -XXX,XX +XXX,XX @@ struct AioContext {
164
void cpu_list_remove(CPUState *cpu)
64
QSLIST_HEAD(, Coroutine) scheduled_coroutines;
165
{
65
QEMUBH *co_schedule_bh;
166
- qemu_mutex_lock(&qemu_cpu_list_lock);
66
167
+ QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
67
+ int thread_pool_min;
168
if (!QTAILQ_IN_USE(cpu, node)) {
68
+ int thread_pool_max;
169
/* there is nothing to undo since cpu_exec_init() hasn't been called */
69
/* Thread pool for performing work and receiving completion callbacks.
170
- qemu_mutex_unlock(&qemu_cpu_list_lock);
70
* Has its own locking.
171
return;
172
}
173
174
@@ -XXX,XX +XXX,XX @@ void cpu_list_remove(CPUState *cpu)
175
176
QTAILQ_REMOVE_RCU(&cpus, cpu, node);
177
cpu->cpu_index = UNASSIGNED_CPU_INDEX;
178
- qemu_mutex_unlock(&qemu_cpu_list_lock);
179
}
180
181
struct qemu_work_item {
182
@@ -XXX,XX +XXX,XX @@ void cpu_exec_start(CPUState *cpu)
183
* see cpu->running == true, and it will kick the CPU.
71
*/
184
*/
72
@@ -XXX,XX +XXX,XX @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
185
if (unlikely(atomic_read(&pending_cpus))) {
73
void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
186
- qemu_mutex_lock(&qemu_cpu_list_lock);
74
Error **errp);
187
+ QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
75
188
if (!cpu->has_waiter) {
76
+/**
189
/* Not counted in pending_cpus, let the exclusive item
77
+ * aio_context_set_thread_pool_params:
190
* run. Since we have the lock, just set cpu->running to true
78
+ * @ctx: the aio context
191
@@ -XXX,XX +XXX,XX @@ void cpu_exec_start(CPUState *cpu)
79
+ * @min: min number of threads to have readily available in the thread pool
192
* waiter at cpu_exec_end.
80
+ * @min: max number of threads the thread pool can contain
193
*/
81
+ */
194
}
82
+void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min,
195
- qemu_mutex_unlock(&qemu_cpu_list_lock);
83
+ int64_t max, Error **errp);
196
}
84
#endif
197
}
85
diff --git a/include/block/thread-pool.h b/include/block/thread-pool.h
198
86
index XXXXXXX..XXXXXXX 100644
199
@@ -XXX,XX +XXX,XX @@ void cpu_exec_end(CPUState *cpu)
87
--- a/include/block/thread-pool.h
200
* next cpu_exec_start.
88
+++ b/include/block/thread-pool.h
201
*/
202
if (unlikely(atomic_read(&pending_cpus))) {
203
- qemu_mutex_lock(&qemu_cpu_list_lock);
204
+ QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
205
if (cpu->has_waiter) {
206
cpu->has_waiter = false;
207
atomic_set(&pending_cpus, pending_cpus - 1);
208
@@ -XXX,XX +XXX,XX @@ void cpu_exec_end(CPUState *cpu)
209
qemu_cond_signal(&exclusive_cond);
210
}
211
}
212
- qemu_mutex_unlock(&qemu_cpu_list_lock);
213
}
214
}
215
216
diff --git a/hw/display/qxl.c b/hw/display/qxl.c
217
index XXXXXXX..XXXXXXX 100644
218
--- a/hw/display/qxl.c
219
+++ b/hw/display/qxl.c
220
@@ -XXX,XX +XXX,XX @@ static int qxl_track_command(PCIQXLDevice *qxl, struct QXLCommandExt *ext)
221
cmd->u.surface_create.stride);
222
return 1;
223
}
224
- qemu_mutex_lock(&qxl->track_lock);
225
- if (cmd->type == QXL_SURFACE_CMD_CREATE) {
226
- qxl->guest_surfaces.cmds[id] = ext->cmd.data;
227
- qxl->guest_surfaces.count++;
228
- if (qxl->guest_surfaces.max < qxl->guest_surfaces.count)
229
- qxl->guest_surfaces.max = qxl->guest_surfaces.count;
230
+ WITH_QEMU_LOCK_GUARD(&qxl->track_lock) {
231
+ if (cmd->type == QXL_SURFACE_CMD_CREATE) {
232
+ qxl->guest_surfaces.cmds[id] = ext->cmd.data;
233
+ qxl->guest_surfaces.count++;
234
+ if (qxl->guest_surfaces.max < qxl->guest_surfaces.count) {
235
+ qxl->guest_surfaces.max = qxl->guest_surfaces.count;
236
+ }
237
+ }
238
+ if (cmd->type == QXL_SURFACE_CMD_DESTROY) {
239
+ qxl->guest_surfaces.cmds[id] = 0;
240
+ qxl->guest_surfaces.count--;
241
+ }
242
}
243
- if (cmd->type == QXL_SURFACE_CMD_DESTROY) {
244
- qxl->guest_surfaces.cmds[id] = 0;
245
- qxl->guest_surfaces.count--;
246
- }
247
- qemu_mutex_unlock(&qxl->track_lock);
248
break;
249
}
250
case QXL_CMD_CURSOR:
251
@@ -XXX,XX +XXX,XX @@ static void interface_update_area_complete(QXLInstance *sin,
252
int i;
253
int qxl_i;
254
255
- qemu_mutex_lock(&qxl->ssd.lock);
256
+ QEMU_LOCK_GUARD(&qxl->ssd.lock);
257
if (surface_id != 0 || !num_updated_rects ||
258
!qxl->render_update_cookie_num) {
259
- qemu_mutex_unlock(&qxl->ssd.lock);
260
return;
261
}
262
trace_qxl_interface_update_area_complete(qxl->id, surface_id, dirty->left,
263
@@ -XXX,XX +XXX,XX @@ static void interface_update_area_complete(QXLInstance *sin,
264
* Don't bother copying or scheduling the bh since we will flip
265
* the whole area anyway on completion of the update_area async call
266
*/
267
- qemu_mutex_unlock(&qxl->ssd.lock);
268
return;
269
}
270
qxl_i = qxl->num_dirty_rects;
271
@@ -XXX,XX +XXX,XX @@ static void interface_update_area_complete(QXLInstance *sin,
272
trace_qxl_interface_update_area_complete_schedule_bh(qxl->id,
273
qxl->num_dirty_rects);
274
qemu_bh_schedule(qxl->update_area_bh);
275
- qemu_mutex_unlock(&qxl->ssd.lock);
276
}
277
278
/* called from spice server thread context only */
279
@@ -XXX,XX +XXX,XX @@ static void ioport_write(void *opaque, hwaddr addr,
280
case QXL_IO_MONITORS_CONFIG_ASYNC:
281
async_common:
282
async = QXL_ASYNC;
283
- qemu_mutex_lock(&d->async_lock);
284
- if (d->current_async != QXL_UNDEFINED_IO) {
285
- qxl_set_guest_bug(d, "%d async started before last (%d) complete",
286
- io_port, d->current_async);
287
- qemu_mutex_unlock(&d->async_lock);
288
- return;
289
+ WITH_QEMU_LOCK_GUARD(&d->async_lock) {
290
+ if (d->current_async != QXL_UNDEFINED_IO) {
291
+ qxl_set_guest_bug(d, "%d async started before last (%d) complete",
292
+ io_port, d->current_async);
293
+ return;
294
+ }
295
+ d->current_async = orig_io_port;
296
}
297
- d->current_async = orig_io_port;
298
- qemu_mutex_unlock(&d->async_lock);
299
break;
300
default:
301
break;
302
diff --git a/hw/vfio/platform.c b/hw/vfio/platform.c
303
index XXXXXXX..XXXXXXX 100644
304
--- a/hw/vfio/platform.c
305
+++ b/hw/vfio/platform.c
89
@@ -XXX,XX +XXX,XX @@
306
@@ -XXX,XX +XXX,XX @@
90
307
#include "hw/vfio/vfio-platform.h"
91
#include "block/block.h"
308
#include "migration/vmstate.h"
92
309
#include "qemu/error-report.h"
93
+#define THREAD_POOL_MAX_THREADS_DEFAULT 64
310
+#include "qemu/lockable.h"
94
+
311
#include "qemu/main-loop.h"
95
typedef int ThreadPoolFunc(void *opaque);
312
#include "qemu/module.h"
96
313
#include "qemu/range.h"
97
typedef struct ThreadPool ThreadPool;
314
@@ -XXX,XX +XXX,XX @@ static void vfio_intp_interrupt(VFIOINTp *intp)
98
@@ -XXX,XX +XXX,XX @@ BlockAIOCB *thread_pool_submit_aio(ThreadPool *pool,
315
VFIOPlatformDevice *vdev = intp->vdev;
99
int coroutine_fn thread_pool_submit_co(ThreadPool *pool,
316
bool delay_handling = false;
100
ThreadPoolFunc *func, void *arg);
317
101
void thread_pool_submit(ThreadPool *pool, ThreadPoolFunc *func, void *arg);
318
- qemu_mutex_lock(&vdev->intp_mutex);
102
+void thread_pool_update_params(ThreadPool *pool, struct AioContext *ctx);
319
+ QEMU_LOCK_GUARD(&vdev->intp_mutex);
103
320
if (intp->state == VFIO_IRQ_INACTIVE) {
104
#endif
321
QLIST_FOREACH(tmp, &vdev->intp_list, next) {
105
diff --git a/include/sysemu/event-loop-base.h b/include/sysemu/event-loop-base.h
322
if (tmp->state == VFIO_IRQ_ACTIVE ||
106
index XXXXXXX..XXXXXXX 100644
323
@@ -XXX,XX +XXX,XX @@ static void vfio_intp_interrupt(VFIOINTp *intp)
107
--- a/include/sysemu/event-loop-base.h
324
QSIMPLEQ_INSERT_TAIL(&vdev->pending_intp_queue,
108
+++ b/include/sysemu/event-loop-base.h
325
intp, pqnext);
109
@@ -XXX,XX +XXX,XX @@ struct EventLoopBase {
326
ret = event_notifier_test_and_clear(intp->interrupt);
110
327
- qemu_mutex_unlock(&vdev->intp_mutex);
111
/* AioContext AIO engine parameters */
328
return;
112
int64_t aio_max_batch;
329
}
113
+
330
114
+ /* AioContext thread pool parameters */
331
@@ -XXX,XX +XXX,XX @@ static void vfio_intp_interrupt(VFIOINTp *intp)
115
+ int64_t thread_pool_min;
332
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
116
+ int64_t thread_pool_max;
333
vdev->mmap_timeout);
117
};
334
}
118
#endif
335
- qemu_mutex_unlock(&vdev->intp_mutex);
119
diff --git a/event-loop-base.c b/event-loop-base.c
336
}
120
index XXXXXXX..XXXXXXX 100644
337
121
--- a/event-loop-base.c
338
/**
122
+++ b/event-loop-base.c
339
diff --git a/migration/migration.c b/migration/migration.c
340
index XXXXXXX..XXXXXXX 100644
341
--- a/migration/migration.c
342
+++ b/migration/migration.c
343
@@ -XXX,XX +XXX,XX @@ static void migrate_fd_cleanup_bh(void *opaque)
344
345
void migrate_set_error(MigrationState *s, const Error *error)
346
{
347
- qemu_mutex_lock(&s->error_mutex);
348
+ QEMU_LOCK_GUARD(&s->error_mutex);
349
if (!s->error) {
350
s->error = error_copy(error);
351
}
352
- qemu_mutex_unlock(&s->error_mutex);
353
}
354
355
void migrate_fd_error(MigrationState *s, const Error *error)
356
diff --git a/migration/multifd.c b/migration/multifd.c
357
index XXXXXXX..XXXXXXX 100644
358
--- a/migration/multifd.c
359
+++ b/migration/multifd.c
360
@@ -XXX,XX +XXX,XX @@ void multifd_recv_sync_main(void)
361
for (i = 0; i < migrate_multifd_channels(); i++) {
362
MultiFDRecvParams *p = &multifd_recv_state->params[i];
363
364
- qemu_mutex_lock(&p->mutex);
365
- if (multifd_recv_state->packet_num < p->packet_num) {
366
- multifd_recv_state->packet_num = p->packet_num;
367
+ WITH_QEMU_LOCK_GUARD(&p->mutex) {
368
+ if (multifd_recv_state->packet_num < p->packet_num) {
369
+ multifd_recv_state->packet_num = p->packet_num;
370
+ }
371
}
372
- qemu_mutex_unlock(&p->mutex);
373
trace_multifd_recv_sync_main_signal(p->id);
374
qemu_sem_post(&p->sem_sync);
375
}
376
diff --git a/migration/ram.c b/migration/ram.c
377
index XXXXXXX..XXXXXXX 100644
378
--- a/migration/ram.c
379
+++ b/migration/ram.c
380
@@ -XXX,XX +XXX,XX @@ static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
381
return NULL;
382
}
383
384
- qemu_mutex_lock(&rs->src_page_req_mutex);
385
+ QEMU_LOCK_GUARD(&rs->src_page_req_mutex);
386
if (!QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
387
struct RAMSrcPageRequest *entry =
388
QSIMPLEQ_FIRST(&rs->src_page_requests);
389
@@ -XXX,XX +XXX,XX @@ static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
390
migration_consume_urgent_request();
391
}
392
}
393
- qemu_mutex_unlock(&rs->src_page_req_mutex);
394
395
return block;
396
}
397
diff --git a/monitor/misc.c b/monitor/misc.c
398
index XXXXXXX..XXXXXXX 100644
399
--- a/monitor/misc.c
400
+++ b/monitor/misc.c
401
@@ -XXX,XX +XXX,XX @@ AddfdInfo *monitor_fdset_add_fd(int fd, bool has_fdset_id, int64_t fdset_id,
402
MonFdsetFd *mon_fdset_fd;
403
AddfdInfo *fdinfo;
404
405
- qemu_mutex_lock(&mon_fdsets_lock);
406
+ QEMU_LOCK_GUARD(&mon_fdsets_lock);
407
if (has_fdset_id) {
408
QLIST_FOREACH(mon_fdset, &mon_fdsets, next) {
409
/* Break if match found or match impossible due to ordering by ID */
410
@@ -XXX,XX +XXX,XX @@ AddfdInfo *monitor_fdset_add_fd(int fd, bool has_fdset_id, int64_t fdset_id,
411
if (fdset_id < 0) {
412
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "fdset-id",
413
"a non-negative value");
414
- qemu_mutex_unlock(&mon_fdsets_lock);
415
return NULL;
416
}
417
/* Use specified fdset ID */
418
@@ -XXX,XX +XXX,XX @@ AddfdInfo *monitor_fdset_add_fd(int fd, bool has_fdset_id, int64_t fdset_id,
419
fdinfo->fdset_id = mon_fdset->id;
420
fdinfo->fd = mon_fdset_fd->fd;
421
422
- qemu_mutex_unlock(&mon_fdsets_lock);
423
return fdinfo;
424
}
425
426
diff --git a/ui/spice-display.c b/ui/spice-display.c
427
index XXXXXXX..XXXXXXX 100644
428
--- a/ui/spice-display.c
429
+++ b/ui/spice-display.c
123
@@ -XXX,XX +XXX,XX @@
430
@@ -XXX,XX +XXX,XX @@
124
#include "qemu/osdep.h"
431
#include "qemu/osdep.h"
125
#include "qom/object_interfaces.h"
432
#include "ui/qemu-spice.h"
126
#include "qapi/error.h"
433
#include "qemu/timer.h"
127
+#include "block/thread-pool.h"
434
+#include "qemu/lockable.h"
128
#include "sysemu/event-loop-base.h"
435
#include "qemu/main-loop.h"
129
436
#include "qemu/option.h"
130
typedef struct {
437
#include "qemu/queue.h"
131
@@ -XXX,XX +XXX,XX @@ typedef struct {
438
@@ -XXX,XX +XXX,XX @@ void qemu_spice_display_refresh(SimpleSpiceDisplay *ssd)
132
ptrdiff_t offset; /* field's byte offset in EventLoopBase struct */
439
{
133
} EventLoopBaseParamInfo;
440
graphic_hw_update(ssd->dcl.con);
134
441
135
+static void event_loop_base_instance_init(Object *obj)
442
- qemu_mutex_lock(&ssd->lock);
136
+{
443
- if (QTAILQ_EMPTY(&ssd->updates) && ssd->ds) {
137
+ EventLoopBase *base = EVENT_LOOP_BASE(obj);
444
- qemu_spice_create_update(ssd);
138
+
445
- ssd->notify++;
139
+ base->thread_pool_max = THREAD_POOL_MAX_THREADS_DEFAULT;
446
+ WITH_QEMU_LOCK_GUARD(&ssd->lock) {
140
+}
447
+ if (QTAILQ_EMPTY(&ssd->updates) && ssd->ds) {
141
+
448
+ qemu_spice_create_update(ssd);
142
static EventLoopBaseParamInfo aio_max_batch_info = {
449
+ ssd->notify++;
143
"aio-max-batch", offsetof(EventLoopBase, aio_max_batch),
450
+ }
144
};
451
}
145
+static EventLoopBaseParamInfo thread_pool_min_info = {
452
- qemu_mutex_unlock(&ssd->lock);
146
+ "thread-pool-min", offsetof(EventLoopBase, thread_pool_min),
453
147
+};
454
trace_qemu_spice_display_refresh(ssd->qxl.id, ssd->notify);
148
+static EventLoopBaseParamInfo thread_pool_max_info = {
455
if (ssd->notify) {
149
+ "thread-pool-max", offsetof(EventLoopBase, thread_pool_max),
456
@@ -XXX,XX +XXX,XX @@ static int interface_get_cursor_command(QXLInstance *sin, QXLCommandExt *ext)
150
+};
457
SimpleSpiceDisplay *ssd = container_of(sin, SimpleSpiceDisplay, qxl);
151
458
int ret;
152
static void event_loop_base_get_param(Object *obj, Visitor *v,
459
153
const char *name, void *opaque, Error **errp)
460
- qemu_mutex_lock(&ssd->lock);
154
@@ -XXX,XX +XXX,XX @@ static void event_loop_base_class_init(ObjectClass *klass, void *class_data)
461
+ QEMU_LOCK_GUARD(&ssd->lock);
155
event_loop_base_get_param,
462
if (ssd->ptr_define) {
156
event_loop_base_set_param,
463
*ext = ssd->ptr_define->ext;
157
NULL, &aio_max_batch_info);
464
ssd->ptr_define = NULL;
158
+ object_class_property_add(klass, "thread-pool-min", "int",
465
@@ -XXX,XX +XXX,XX @@ static int interface_get_cursor_command(QXLInstance *sin, QXLCommandExt *ext)
159
+ event_loop_base_get_param,
466
} else {
160
+ event_loop_base_set_param,
467
ret = false;
161
+ NULL, &thread_pool_min_info);
468
}
162
+ object_class_property_add(klass, "thread-pool-max", "int",
469
- qemu_mutex_unlock(&ssd->lock);
163
+ event_loop_base_get_param,
470
return ret;
164
+ event_loop_base_set_param,
471
}
165
+ NULL, &thread_pool_max_info);
472
166
}
473
diff --git a/util/log.c b/util/log.c
167
474
index XXXXXXX..XXXXXXX 100644
168
static const TypeInfo event_loop_base_info = {
475
--- a/util/log.c
169
.name = TYPE_EVENT_LOOP_BASE,
476
+++ b/util/log.c
170
.parent = TYPE_OBJECT,
171
.instance_size = sizeof(EventLoopBase),
172
+ .instance_init = event_loop_base_instance_init,
173
.class_size = sizeof(EventLoopBaseClass),
174
.class_init = event_loop_base_class_init,
175
.abstract = true,
176
diff --git a/iothread.c b/iothread.c
177
index XXXXXXX..XXXXXXX 100644
178
--- a/iothread.c
179
+++ b/iothread.c
180
@@ -XXX,XX +XXX,XX @@ static void iothread_set_aio_context_params(EventLoopBase *base, Error **errp)
181
aio_context_set_aio_params(iothread->ctx,
182
iothread->parent_obj.aio_max_batch,
183
errp);
184
+
185
+ aio_context_set_thread_pool_params(iothread->ctx, base->thread_pool_min,
186
+ base->thread_pool_max, errp);
187
}
188
189
190
diff --git a/util/aio-posix.c b/util/aio-posix.c
191
index XXXXXXX..XXXXXXX 100644
192
--- a/util/aio-posix.c
193
+++ b/util/aio-posix.c
194
@@ -XXX,XX +XXX,XX @@
477
@@ -XXX,XX +XXX,XX @@
195
478
#include "qemu/cutils.h"
196
#include "qemu/osdep.h"
479
#include "trace/control.h"
197
#include "block/block.h"
480
#include "qemu/thread.h"
198
+#include "block/thread-pool.h"
481
+#include "qemu/lockable.h"
482
483
static char *logfilename;
484
static QemuMutex qemu_logfile_mutex;
485
@@ -XXX,XX +XXX,XX @@ void qemu_set_log(int log_flags)
486
if (qemu_loglevel && (!is_daemonized() || logfilename)) {
487
need_to_open_file = true;
488
}
489
- qemu_mutex_lock(&qemu_logfile_mutex);
490
+ QEMU_LOCK_GUARD(&qemu_logfile_mutex);
491
if (qemu_logfile && !need_to_open_file) {
492
logfile = qemu_logfile;
493
atomic_rcu_set(&qemu_logfile, NULL);
494
@@ -XXX,XX +XXX,XX @@ void qemu_set_log(int log_flags)
495
}
496
atomic_rcu_set(&qemu_logfile, logfile);
497
}
498
- qemu_mutex_unlock(&qemu_logfile_mutex);
499
}
500
501
void qemu_log_needs_buffers(void)
502
diff --git a/util/qemu-timer.c b/util/qemu-timer.c
503
index XXXXXXX..XXXXXXX 100644
504
--- a/util/qemu-timer.c
505
+++ b/util/qemu-timer.c
506
@@ -XXX,XX +XXX,XX @@ void timer_mod_anticipate_ns(QEMUTimer *ts, int64_t expire_time)
507
QEMUTimerList *timer_list = ts->timer_list;
508
bool rearm;
509
510
- qemu_mutex_lock(&timer_list->active_timers_lock);
511
- if (ts->expire_time == -1 || ts->expire_time > expire_time) {
512
- if (ts->expire_time != -1) {
513
- timer_del_locked(timer_list, ts);
514
+ WITH_QEMU_LOCK_GUARD(&timer_list->active_timers_lock) {
515
+ if (ts->expire_time == -1 || ts->expire_time > expire_time) {
516
+ if (ts->expire_time != -1) {
517
+ timer_del_locked(timer_list, ts);
518
+ }
519
+ rearm = timer_mod_ns_locked(timer_list, ts, expire_time);
520
+ } else {
521
+ rearm = false;
522
}
523
- rearm = timer_mod_ns_locked(timer_list, ts, expire_time);
524
- } else {
525
- rearm = false;
526
}
527
- qemu_mutex_unlock(&timer_list->active_timers_lock);
528
-
529
if (rearm) {
530
timerlist_rearm(timer_list);
531
}
532
diff --git a/util/rcu.c b/util/rcu.c
533
index XXXXXXX..XXXXXXX 100644
534
--- a/util/rcu.c
535
+++ b/util/rcu.c
536
@@ -XXX,XX +XXX,XX @@
537
#include "qemu/atomic.h"
538
#include "qemu/thread.h"
199
#include "qemu/main-loop.h"
539
#include "qemu/main-loop.h"
200
#include "qemu/rcu.h"
540
+#include "qemu/lockable.h"
201
#include "qemu/rcu_queue.h"
541
#if defined(CONFIG_MALLOC_TRIM)
202
diff --git a/util/async.c b/util/async.c
542
#include <malloc.h>
203
index XXXXXXX..XXXXXXX 100644
543
#endif
204
--- a/util/async.c
544
@@ -XXX,XX +XXX,XX @@ static void wait_for_readers(void)
205
+++ b/util/async.c
545
206
@@ -XXX,XX +XXX,XX @@ AioContext *aio_context_new(Error **errp)
546
void synchronize_rcu(void)
207
208
ctx->aio_max_batch = 0;
209
210
+ ctx->thread_pool_min = 0;
211
+ ctx->thread_pool_max = THREAD_POOL_MAX_THREADS_DEFAULT;
212
+
213
return ctx;
214
fail:
215
g_source_destroy(&ctx->source);
216
@@ -XXX,XX +XXX,XX @@ void qemu_set_current_aio_context(AioContext *ctx)
217
assert(!get_my_aiocontext());
218
set_my_aiocontext(ctx);
219
}
220
+
221
+void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min,
222
+ int64_t max, Error **errp)
223
+{
224
+
225
+ if (min > max || !max || min > INT_MAX || max > INT_MAX) {
226
+ error_setg(errp, "bad thread-pool-min/thread-pool-max values");
227
+ return;
228
+ }
229
+
230
+ ctx->thread_pool_min = min;
231
+ ctx->thread_pool_max = max;
232
+
233
+ if (ctx->thread_pool) {
234
+ thread_pool_update_params(ctx->thread_pool, ctx);
235
+ }
236
+}
237
diff --git a/util/main-loop.c b/util/main-loop.c
238
index XXXXXXX..XXXXXXX 100644
239
--- a/util/main-loop.c
240
+++ b/util/main-loop.c
241
@@ -XXX,XX +XXX,XX @@
242
#include "sysemu/replay.h"
243
#include "qemu/main-loop.h"
244
#include "block/aio.h"
245
+#include "block/thread-pool.h"
246
#include "qemu/error-report.h"
247
#include "qemu/queue.h"
248
#include "qemu/compiler.h"
249
@@ -XXX,XX +XXX,XX @@ int qemu_init_main_loop(Error **errp)
250
251
static void main_loop_update_params(EventLoopBase *base, Error **errp)
252
{
547
{
253
+ ERRP_GUARD();
548
- qemu_mutex_lock(&rcu_sync_lock);
254
+
549
+ QEMU_LOCK_GUARD(&rcu_sync_lock);
255
if (!qemu_aio_context) {
550
256
error_setg(errp, "qemu aio context not ready");
551
/* Write RCU-protected pointers before reading p_rcu_reader->ctr.
257
return;
552
* Pairs with smp_mb_placeholder() in rcu_read_lock().
258
}
553
*/
259
554
smp_mb_global();
260
aio_context_set_aio_params(qemu_aio_context, base->aio_max_batch, errp);
555
261
+ if (*errp) {
556
- qemu_mutex_lock(&rcu_registry_lock);
262
+ return;
557
+ QEMU_LOCK_GUARD(&rcu_registry_lock);
263
+ }
558
if (!QLIST_EMPTY(&registry)) {
264
+
559
/* In either case, the atomic_mb_set below blocks stores that free
265
+ aio_context_set_thread_pool_params(qemu_aio_context, base->thread_pool_min,
560
* old RCU-protected pointers.
266
+ base->thread_pool_max, errp);
561
@@ -XXX,XX +XXX,XX @@ void synchronize_rcu(void)
267
}
562
268
563
wait_for_readers();
269
MainLoop *mloop;
564
}
565
-
566
- qemu_mutex_unlock(&rcu_registry_lock);
567
- qemu_mutex_unlock(&rcu_sync_lock);
568
}
569
570
270
diff --git a/util/thread-pool.c b/util/thread-pool.c
571
diff --git a/util/thread-pool.c b/util/thread-pool.c
271
index XXXXXXX..XXXXXXX 100644
572
index XXXXXXX..XXXXXXX 100644
272
--- a/util/thread-pool.c
573
--- a/util/thread-pool.c
273
+++ b/util/thread-pool.c
574
+++ b/util/thread-pool.c
274
@@ -XXX,XX +XXX,XX @@ struct ThreadPool {
575
@@ -XXX,XX +XXX,XX @@ static void thread_pool_cancel(BlockAIOCB *acb)
275
QemuMutex lock;
576
276
QemuCond worker_stopped;
577
trace_thread_pool_cancel(elem, elem->common.opaque);
277
QemuSemaphore sem;
578
278
- int max_threads;
579
- qemu_mutex_lock(&pool->lock);
279
QEMUBH *new_thread_bh;
580
+ QEMU_LOCK_GUARD(&pool->lock);
280
581
if (elem->state == THREAD_QUEUED &&
281
/* The following variables are only accessed from one AioContext. */
582
/* No thread has yet started working on elem. we can try to "steal"
282
@@ -XXX,XX +XXX,XX @@ struct ThreadPool {
583
* the item from the worker if we can get a signal from the
283
int new_threads; /* backlog of threads we need to create */
584
@@ -XXX,XX +XXX,XX @@ static void thread_pool_cancel(BlockAIOCB *acb)
284
int pending_threads; /* threads created but not running yet */
585
elem->ret = -ECANCELED;
285
bool stopping;
586
}
286
+ int min_threads;
587
287
+ int max_threads;
588
- qemu_mutex_unlock(&pool->lock);
288
};
589
}
289
590
290
+static inline bool back_to_sleep(ThreadPool *pool, int ret)
591
static AioContext *thread_pool_get_aio_context(BlockAIOCB *acb)
291
+{
592
diff --git a/util/vfio-helpers.c b/util/vfio-helpers.c
292
+ /*
593
index XXXXXXX..XXXXXXX 100644
293
+ * The semaphore timed out, we should exit the loop except when:
594
--- a/util/vfio-helpers.c
294
+ * - There is work to do, we raced with the signal.
595
+++ b/util/vfio-helpers.c
295
+ * - The max threads threshold just changed, we raced with the signal.
596
@@ -XXX,XX +XXX,XX @@
296
+ * - The thread pool forces a minimum number of readily available threads.
597
#include "standard-headers/linux/pci_regs.h"
297
+ */
598
#include "qemu/event_notifier.h"
298
+ if (ret == -1 && (!QTAILQ_EMPTY(&pool->request_list) ||
599
#include "qemu/vfio-helpers.h"
299
+ pool->cur_threads > pool->max_threads ||
600
+#include "qemu/lockable.h"
300
+ pool->cur_threads <= pool->min_threads)) {
601
#include "trace.h"
301
+ return true;
602
302
+ }
603
#define QEMU_VFIO_DEBUG 0
303
+
604
@@ -XXX,XX +XXX,XX @@ int qemu_vfio_dma_reset_temporary(QEMUVFIOState *s)
304
+ return false;
605
.size = QEMU_VFIO_IOVA_MAX - s->high_water_mark,
305
+}
606
};
306
+
607
trace_qemu_vfio_dma_reset_temporary(s);
307
static void *worker_thread(void *opaque)
608
- qemu_mutex_lock(&s->lock);
308
{
609
+ QEMU_LOCK_GUARD(&s->lock);
309
ThreadPool *pool = opaque;
610
if (ioctl(s->container, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
310
@@ -XXX,XX +XXX,XX @@ static void *worker_thread(void *opaque)
611
error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno));
311
ret = qemu_sem_timedwait(&pool->sem, 10000);
612
- qemu_mutex_unlock(&s->lock);
312
qemu_mutex_lock(&pool->lock);
613
return -errno;
313
pool->idle_threads--;
614
}
314
- } while (ret == -1 && !QTAILQ_EMPTY(&pool->request_list));
615
s->high_water_mark = QEMU_VFIO_IOVA_MAX;
315
- if (ret == -1 || pool->stopping) {
616
- qemu_mutex_unlock(&s->lock);
316
+ } while (back_to_sleep(pool, ret));
617
return 0;
317
+ if (ret == -1 || pool->stopping ||
618
}
318
+ pool->cur_threads > pool->max_threads) {
619
319
break;
320
}
321
322
@@ -XXX,XX +XXX,XX @@ void thread_pool_submit(ThreadPool *pool, ThreadPoolFunc *func, void *arg)
323
thread_pool_submit_aio(pool, func, arg, NULL, NULL);
324
}
325
326
+void thread_pool_update_params(ThreadPool *pool, AioContext *ctx)
327
+{
328
+ qemu_mutex_lock(&pool->lock);
329
+
330
+ pool->min_threads = ctx->thread_pool_min;
331
+ pool->max_threads = ctx->thread_pool_max;
332
+
333
+ /*
334
+ * We either have to:
335
+ * - Increase the number available of threads until over the min_threads
336
+ * threshold.
337
+ * - Decrease the number of available threads until under the max_threads
338
+ * threshold.
339
+ * - Do nothing. The current number of threads fall in between the min and
340
+ * max thresholds. We'll let the pool manage itself.
341
+ */
342
+ for (int i = pool->cur_threads; i < pool->min_threads; i++) {
343
+ spawn_thread(pool);
344
+ }
345
+
346
+ for (int i = pool->cur_threads; i > pool->max_threads; i--) {
347
+ qemu_sem_post(&pool->sem);
348
+ }
349
+
350
+ qemu_mutex_unlock(&pool->lock);
351
+}
352
+
353
static void thread_pool_init_one(ThreadPool *pool, AioContext *ctx)
354
{
355
if (!ctx) {
356
@@ -XXX,XX +XXX,XX @@ static void thread_pool_init_one(ThreadPool *pool, AioContext *ctx)
357
qemu_mutex_init(&pool->lock);
358
qemu_cond_init(&pool->worker_stopped);
359
qemu_sem_init(&pool->sem, 0);
360
- pool->max_threads = 64;
361
pool->new_thread_bh = aio_bh_new(ctx, spawn_thread_bh_fn, pool);
362
363
QLIST_INIT(&pool->head);
364
QTAILQ_INIT(&pool->request_list);
365
+
366
+ thread_pool_update_params(pool, ctx);
367
}
368
369
ThreadPool *thread_pool_new(AioContext *ctx)
370
--
620
--
371
2.35.1
621
2.25.3
622
diff view generated by jsdifflib
1
From: Nicolas Saenz Julienne <nsaenzju@redhat.com>
1
From: Simran Singhal <singhalsimran0@gmail.com>
2
2
3
'event-loop-base' provides basic property handling for all 'AioContext'
3
Replace manual lock()/unlock() calls with lock guard macros
4
based event loops. So let's define a new 'MainLoopClass' that inherits
4
(QEMU_LOCK_GUARD/WITH_QEMU_LOCK_GUARD).
5
from it. This will permit tweaking the main loop's properties through
6
qapi as well as through the command line using the '-object' keyword[1].
7
Only one instance of 'MainLoopClass' might be created at any time.
8
5
9
'EventLoopBaseClass' learns a new callback, 'can_be_deleted()' so as to
6
Signed-off-by: Simran Singhal <singhalsimran0@gmail.com>
10
mark 'MainLoop' as non-deletable.
7
Reviewed-by: Yuval Shaia <yuval.shaia.ml@gmail.com>
11
8
Reviewed-by: Marcel Apfelbaum<marcel.apfelbaum@gmail.com>
12
[1] For example:
9
Tested-by: Yuval Shaia <yuval.shaia.ml@gmail.com>
13
-object main-loop,id=main-loop,aio-max-batch=<value>
10
Message-id: 20200402065035.GA15477@simran-Inspiron-5558
14
15
Signed-off-by: Nicolas Saenz Julienne <nsaenzju@redhat.com>
16
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
17
Acked-by: Markus Armbruster <armbru@redhat.com>
18
Message-id: 20220425075723.20019-3-nsaenzju@redhat.com
19
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
20
---
12
---
21
qapi/qom.json | 13 ++++++++
13
hw/hyperv/hyperv.c | 15 ++++++-------
22
meson.build | 3 +-
14
hw/rdma/rdma_backend.c | 50 +++++++++++++++++++++---------------------
23
include/qemu/main-loop.h | 10 ++++++
15
hw/rdma/rdma_rm.c | 3 +--
24
include/sysemu/event-loop-base.h | 1 +
16
3 files changed, 33 insertions(+), 35 deletions(-)
25
event-loop-base.c | 13 ++++++++
26
util/main-loop.c | 56 ++++++++++++++++++++++++++++++++
27
6 files changed, 95 insertions(+), 1 deletion(-)
28
17
29
diff --git a/qapi/qom.json b/qapi/qom.json
18
diff --git a/hw/hyperv/hyperv.c b/hw/hyperv/hyperv.c
30
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
31
--- a/qapi/qom.json
20
--- a/hw/hyperv/hyperv.c
32
+++ b/qapi/qom.json
21
+++ b/hw/hyperv/hyperv.c
33
@@ -XXX,XX +XXX,XX @@
22
@@ -XXX,XX +XXX,XX @@
34
'*poll-grow': 'int',
23
#include "sysemu/kvm.h"
35
'*poll-shrink': 'int' } }
24
#include "qemu/bitops.h"
36
25
#include "qemu/error-report.h"
37
+##
26
+#include "qemu/lockable.h"
38
+# @MainLoopProperties:
27
#include "qemu/queue.h"
39
+#
28
#include "qemu/rcu.h"
40
+# Properties for the main-loop object.
29
#include "qemu/rcu_queue.h"
41
+#
30
@@ -XXX,XX +XXX,XX @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
42
+# Since: 7.1
31
int ret;
43
+##
32
MsgHandler *mh;
44
+{ 'struct': 'MainLoopProperties',
33
45
+ 'base': 'EventLoopBaseProperties',
34
- qemu_mutex_lock(&handlers_mutex);
46
+ 'data': {} }
35
+ QEMU_LOCK_GUARD(&handlers_mutex);
36
QLIST_FOREACH(mh, &msg_handlers, link) {
37
if (mh->conn_id == conn_id) {
38
if (handler) {
39
@@ -XXX,XX +XXX,XX @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
40
g_free_rcu(mh, rcu);
41
ret = 0;
42
}
43
- goto unlock;
44
+ return ret;
45
}
46
}
47
48
@@ -XXX,XX +XXX,XX @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
49
} else {
50
ret = -ENOENT;
51
}
52
-unlock:
53
- qemu_mutex_unlock(&handlers_mutex);
47
+
54
+
48
##
55
return ret;
49
# @MemoryBackendProperties:
56
}
50
#
57
51
@@ -XXX,XX +XXX,XX @@
58
@@ -XXX,XX +XXX,XX @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
52
{ 'name': 'input-linux',
59
int ret;
53
'if': 'CONFIG_LINUX' },
60
EventFlagHandler *handler;
54
'iothread',
61
55
+ 'main-loop',
62
- qemu_mutex_lock(&handlers_mutex);
56
{ 'name': 'memory-backend-epc',
63
+ QEMU_LOCK_GUARD(&handlers_mutex);
57
'if': 'CONFIG_LINUX' },
64
QLIST_FOREACH(handler, &event_flag_handlers, link) {
58
'memory-backend-file',
65
if (handler->conn_id == conn_id) {
59
@@ -XXX,XX +XXX,XX @@
66
if (notifier) {
60
'input-linux': { 'type': 'InputLinuxProperties',
67
@@ -XXX,XX +XXX,XX @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
61
'if': 'CONFIG_LINUX' },
68
g_free_rcu(handler, rcu);
62
'iothread': 'IothreadProperties',
69
ret = 0;
63
+ 'main-loop': 'MainLoopProperties',
70
}
64
'memory-backend-epc': { 'type': 'MemoryBackendEpcProperties',
71
- goto unlock;
65
'if': 'CONFIG_LINUX' },
72
+ return ret;
66
'memory-backend-file': 'MemoryBackendFileProperties',
73
}
67
diff --git a/meson.build b/meson.build
74
}
75
76
@@ -XXX,XX +XXX,XX @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
77
} else {
78
ret = -ENOENT;
79
}
80
-unlock:
81
- qemu_mutex_unlock(&handlers_mutex);
82
+
83
return ret;
84
}
85
86
diff --git a/hw/rdma/rdma_backend.c b/hw/rdma/rdma_backend.c
68
index XXXXXXX..XXXXXXX 100644
87
index XXXXXXX..XXXXXXX 100644
69
--- a/meson.build
88
--- a/hw/rdma/rdma_backend.c
70
+++ b/meson.build
89
+++ b/hw/rdma/rdma_backend.c
71
@@ -XXX,XX +XXX,XX @@ libqemuutil = static_library('qemuutil',
90
@@ -XXX,XX +XXX,XX @@ static int rdma_poll_cq(RdmaDeviceResources *rdma_dev_res, struct ibv_cq *ibcq)
72
sources: util_ss.sources() + stub_ss.sources() + genh,
91
struct ibv_wc wc[2];
73
dependencies: [util_ss.dependencies(), libm, threads, glib, socket, malloc, pixman])
92
RdmaProtectedGSList *cqe_ctx_list;
74
qemuutil = declare_dependency(link_with: libqemuutil,
93
75
- sources: genh + version_res)
94
- qemu_mutex_lock(&rdma_dev_res->lock);
76
+ sources: genh + version_res,
95
- do {
77
+ dependencies: [event_loop_base])
96
- ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
78
97
+ WITH_QEMU_LOCK_GUARD(&rdma_dev_res->lock) {
79
if have_system or have_user
98
+ do {
80
decodetree = generator(find_program('scripts/decodetree.py'),
99
+ ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
81
diff --git a/include/qemu/main-loop.h b/include/qemu/main-loop.h
100
101
- trace_rdma_poll_cq(ne, ibcq);
102
+ trace_rdma_poll_cq(ne, ibcq);
103
104
- for (i = 0; i < ne; i++) {
105
- bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
106
- if (unlikely(!bctx)) {
107
- rdma_error_report("No matching ctx for req %"PRId64,
108
- wc[i].wr_id);
109
- continue;
110
- }
111
+ for (i = 0; i < ne; i++) {
112
+ bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
113
+ if (unlikely(!bctx)) {
114
+ rdma_error_report("No matching ctx for req %"PRId64,
115
+ wc[i].wr_id);
116
+ continue;
117
+ }
118
119
- comp_handler(bctx->up_ctx, &wc[i]);
120
+ comp_handler(bctx->up_ctx, &wc[i]);
121
122
- if (bctx->backend_qp) {
123
- cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
124
- } else {
125
- cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
126
- }
127
+ if (bctx->backend_qp) {
128
+ cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
129
+ } else {
130
+ cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
131
+ }
132
133
- rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id);
134
- rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
135
- g_free(bctx);
136
- }
137
- total_ne += ne;
138
- } while (ne > 0);
139
- atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
140
- qemu_mutex_unlock(&rdma_dev_res->lock);
141
+ rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id);
142
+ rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
143
+ g_free(bctx);
144
+ }
145
+ total_ne += ne;
146
+ } while (ne > 0);
147
+ atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
148
+ }
149
150
if (ne < 0) {
151
rdma_error_report("ibv_poll_cq fail, rc=%d, errno=%d", ne, errno);
152
diff --git a/hw/rdma/rdma_rm.c b/hw/rdma/rdma_rm.c
82
index XXXXXXX..XXXXXXX 100644
153
index XXXXXXX..XXXXXXX 100644
83
--- a/include/qemu/main-loop.h
154
--- a/hw/rdma/rdma_rm.c
84
+++ b/include/qemu/main-loop.h
155
+++ b/hw/rdma/rdma_rm.c
85
@@ -XXX,XX +XXX,XX @@
156
@@ -XXX,XX +XXX,XX @@ static inline void rdma_res_tbl_dealloc(RdmaRmResTbl *tbl, uint32_t handle)
86
#define QEMU_MAIN_LOOP_H
157
{
87
158
trace_rdma_res_tbl_dealloc(tbl->name, handle);
88
#include "block/aio.h"
159
89
+#include "qom/object.h"
160
- qemu_mutex_lock(&tbl->lock);
90
+#include "sysemu/event-loop-base.h"
161
+ QEMU_LOCK_GUARD(&tbl->lock);
91
162
92
#define SIG_IPI SIGUSR1
163
if (handle < tbl->tbl_sz) {
93
164
clear_bit(handle, tbl->bitmap);
94
+#define TYPE_MAIN_LOOP "main-loop"
165
tbl->used--;
95
+OBJECT_DECLARE_TYPE(MainLoop, MainLoopClass, MAIN_LOOP)
96
+
97
+struct MainLoop {
98
+ EventLoopBase parent_obj;
99
+};
100
+typedef struct MainLoop MainLoop;
101
+
102
/**
103
* qemu_init_main_loop: Set up the process so that it can run the main loop.
104
*
105
diff --git a/include/sysemu/event-loop-base.h b/include/sysemu/event-loop-base.h
106
index XXXXXXX..XXXXXXX 100644
107
--- a/include/sysemu/event-loop-base.h
108
+++ b/include/sysemu/event-loop-base.h
109
@@ -XXX,XX +XXX,XX @@ struct EventLoopBaseClass {
110
111
void (*init)(EventLoopBase *base, Error **errp);
112
void (*update_params)(EventLoopBase *base, Error **errp);
113
+ bool (*can_be_deleted)(EventLoopBase *base);
114
};
115
116
struct EventLoopBase {
117
diff --git a/event-loop-base.c b/event-loop-base.c
118
index XXXXXXX..XXXXXXX 100644
119
--- a/event-loop-base.c
120
+++ b/event-loop-base.c
121
@@ -XXX,XX +XXX,XX @@ static void event_loop_base_complete(UserCreatable *uc, Error **errp)
122
}
166
}
167
168
- qemu_mutex_unlock(&tbl->lock);
123
}
169
}
124
170
125
+static bool event_loop_base_can_be_deleted(UserCreatable *uc)
171
int rdma_rm_alloc_pd(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,
126
+{
127
+ EventLoopBaseClass *bc = EVENT_LOOP_BASE_GET_CLASS(uc);
128
+ EventLoopBase *backend = EVENT_LOOP_BASE(uc);
129
+
130
+ if (bc->can_be_deleted) {
131
+ return bc->can_be_deleted(backend);
132
+ }
133
+
134
+ return true;
135
+}
136
+
137
static void event_loop_base_class_init(ObjectClass *klass, void *class_data)
138
{
139
UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass);
140
ucc->complete = event_loop_base_complete;
141
+ ucc->can_be_deleted = event_loop_base_can_be_deleted;
142
143
object_class_property_add(klass, "aio-max-batch", "int",
144
event_loop_base_get_param,
145
diff --git a/util/main-loop.c b/util/main-loop.c
146
index XXXXXXX..XXXXXXX 100644
147
--- a/util/main-loop.c
148
+++ b/util/main-loop.c
149
@@ -XXX,XX +XXX,XX @@
150
#include "qemu/error-report.h"
151
#include "qemu/queue.h"
152
#include "qemu/compiler.h"
153
+#include "qom/object.h"
154
155
#ifndef _WIN32
156
#include <sys/wait.h>
157
@@ -XXX,XX +XXX,XX @@ int qemu_init_main_loop(Error **errp)
158
return 0;
159
}
160
161
+static void main_loop_update_params(EventLoopBase *base, Error **errp)
162
+{
163
+ if (!qemu_aio_context) {
164
+ error_setg(errp, "qemu aio context not ready");
165
+ return;
166
+ }
167
+
168
+ aio_context_set_aio_params(qemu_aio_context, base->aio_max_batch, errp);
169
+}
170
+
171
+MainLoop *mloop;
172
+
173
+static void main_loop_init(EventLoopBase *base, Error **errp)
174
+{
175
+ MainLoop *m = MAIN_LOOP(base);
176
+
177
+ if (mloop) {
178
+ error_setg(errp, "only one main-loop instance allowed");
179
+ return;
180
+ }
181
+
182
+ main_loop_update_params(base, errp);
183
+
184
+ mloop = m;
185
+ return;
186
+}
187
+
188
+static bool main_loop_can_be_deleted(EventLoopBase *base)
189
+{
190
+ return false;
191
+}
192
+
193
+static void main_loop_class_init(ObjectClass *oc, void *class_data)
194
+{
195
+ EventLoopBaseClass *bc = EVENT_LOOP_BASE_CLASS(oc);
196
+
197
+ bc->init = main_loop_init;
198
+ bc->update_params = main_loop_update_params;
199
+ bc->can_be_deleted = main_loop_can_be_deleted;
200
+}
201
+
202
+static const TypeInfo main_loop_info = {
203
+ .name = TYPE_MAIN_LOOP,
204
+ .parent = TYPE_EVENT_LOOP_BASE,
205
+ .class_init = main_loop_class_init,
206
+ .instance_size = sizeof(MainLoop),
207
+};
208
+
209
+static void main_loop_register_types(void)
210
+{
211
+ type_register_static(&main_loop_info);
212
+}
213
+
214
+type_init(main_loop_register_types)
215
+
216
static int max_priority;
217
218
#ifndef _WIN32
219
--
172
--
220
2.35.1
173
2.25.3
174
diff view generated by jsdifflib