1
The following changes since commit 9cf289af47bcfae5c75de37d8e5d6fd23705322c:
1
The following changes since commit 887cba855bb6ff4775256f7968409281350b568c:
2
2
3
Merge tag 'qga-pull-request' of gitlab.com:marcandre.lureau/qemu into staging (2022-05-04 03:42:49 -0700)
3
configure: Fix cross-building for RISCV host (v5) (2023-07-11 17:56:09 +0100)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://gitlab.com/stefanha/qemu.git tags/block-pull-request
7
https://gitlab.com/stefanha/qemu.git tags/block-pull-request
8
8
9
for you to fetch changes up to bef2e050d6a7feb865854c65570c496ac5a8cf53:
9
for you to fetch changes up to 75dcb4d790bbe5327169fd72b185960ca58e2fa6:
10
10
11
util/event-loop-base: Introduce options to set the thread pool size (2022-05-04 17:02:19 +0100)
11
virtio-blk: fix host notifier issues during dataplane start/stop (2023-07-12 15:20:32 -0400)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Pull request
14
Pull request
15
15
16
Add new thread-pool-min/thread-pool-max parameters to control the thread pool
17
used for async I/O.
18
19
----------------------------------------------------------------
16
----------------------------------------------------------------
20
17
21
Nicolas Saenz Julienne (3):
18
Stefan Hajnoczi (1):
22
Introduce event-loop-base abstract class
19
virtio-blk: fix host notifier issues during dataplane start/stop
23
util/main-loop: Introduce the main loop into QOM
24
util/event-loop-base: Introduce options to set the thread pool size
25
20
26
qapi/qom.json | 43 ++++++++--
21
hw/block/dataplane/virtio-blk.c | 67 +++++++++++++++++++--------------
27
meson.build | 26 +++---
22
1 file changed, 38 insertions(+), 29 deletions(-)
28
include/block/aio.h | 10 +++
29
include/block/thread-pool.h | 3 +
30
include/qemu/main-loop.h | 10 +++
31
include/sysemu/event-loop-base.h | 41 +++++++++
32
include/sysemu/iothread.h | 6 +-
33
event-loop-base.c | 140 +++++++++++++++++++++++++++++++
34
iothread.c | 68 +++++----------
35
util/aio-posix.c | 1 +
36
util/async.c | 20 +++++
37
util/main-loop.c | 65 ++++++++++++++
38
util/thread-pool.c | 55 +++++++++++-
39
13 files changed, 419 insertions(+), 69 deletions(-)
40
create mode 100644 include/sysemu/event-loop-base.h
41
create mode 100644 event-loop-base.c
42
23
43
--
24
--
44
2.35.1
25
2.40.1
diff view generated by jsdifflib
1
From: Nicolas Saenz Julienne <nsaenzju@redhat.com>
1
The main loop thread can consume 100% CPU when using --device
2
virtio-blk-pci,iothread=<iothread>. ppoll() constantly returns but
3
reading virtqueue host notifiers fails with EAGAIN. The file descriptors
4
are stale and remain registered with the AioContext because of bugs in
5
the virtio-blk dataplane start/stop code.
2
6
3
Introduce the 'event-loop-base' abstract class, it'll hold the
7
The problem is that the dataplane start/stop code involves drain
4
properties common to all event loops and provide the necessary hooks for
8
operations, which call virtio_blk_drained_begin() and
5
their creation and maintenance. Then have iothread inherit from it.
9
virtio_blk_drained_end() at points where the host notifier is not
10
operational:
11
- In virtio_blk_data_plane_start(), blk_set_aio_context() drains after
12
vblk->dataplane_started has been set to true but the host notifier has
13
not been attached yet.
14
- In virtio_blk_data_plane_stop(), blk_drain() and blk_set_aio_context()
15
drain after the host notifier has already been detached but with
16
vblk->dataplane_started still set to true.
6
17
7
EventLoopBaseClass is defined as user creatable and provides a hook for
18
I would like to simplify ->ioeventfd_start/stop() to avoid interactions
8
its children to attach themselves to the user creatable class 'complete'
19
with drain entirely, but couldn't find a way to do that. Instead, this
9
function. It also provides an update_params() callback to propagate
20
patch accepts the fragile nature of the code and reorders it so that
10
property changes onto its children.
21
vblk->dataplane_started is false during drain operations. This way the
22
virtio_blk_drained_begin() and virtio_blk_drained_end() calls don't
23
touch the host notifier. The result is that
24
virtio_blk_data_plane_start() and virtio_blk_data_plane_stop() have
25
complete control over the host notifier and stale file descriptors are
26
no longer left in the AioContext.
11
27
12
The new 'event-loop-base' class will live in the root directory. It is
28
This patch fixes the 100% CPU consumption in the main loop thread and
13
built on its own using the 'link_whole' option (there are no direct
29
correctly moves host notifier processing to the IOThread.
14
function dependencies between the class and its children, it all happens
15
trough 'constructor' magic). And also imposes new compilation
16
dependencies:
17
30
18
qom <- event-loop-base <- blockdev (iothread.c)
31
Fixes: 1665d9326fd2 ("virtio-blk: implement BlockDevOps->drained_begin()")
19
32
Reported-by: Lukáš Doktor <ldoktor@redhat.com>
20
And in subsequent patches:
33
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
21
34
Tested-by: Lukas Doktor <ldoktor@redhat.com>
22
qom <- event-loop-base <- qemuutil (util/main-loop.c)
35
Message-id: 20230704151527.193586-1-stefanha@redhat.com
23
24
All this forced some amount of reordering in meson.build:
25
26
- Moved qom build definition before qemuutil. Doing it the other way
27
around (i.e. moving qemuutil after qom) isn't possible as a lot of
28
core libraries that live in between the two depend on it.
29
30
- Process the 'hw' subdir earlier, as it introduces files into the
31
'qom' source set.
32
33
No functional changes intended.
34
35
Signed-off-by: Nicolas Saenz Julienne <nsaenzju@redhat.com>
36
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
37
Acked-by: Markus Armbruster <armbru@redhat.com>
38
Message-id: 20220425075723.20019-2-nsaenzju@redhat.com
39
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
36
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
40
---
37
---
41
qapi/qom.json | 22 +++++--
38
hw/block/dataplane/virtio-blk.c | 67 +++++++++++++++++++--------------
42
meson.build | 23 ++++---
39
1 file changed, 38 insertions(+), 29 deletions(-)
43
include/sysemu/event-loop-base.h | 36 +++++++++++
44
include/sysemu/iothread.h | 6 +-
45
event-loop-base.c | 104 +++++++++++++++++++++++++++++++
46
iothread.c | 65 ++++++-------------
47
6 files changed, 192 insertions(+), 64 deletions(-)
48
create mode 100644 include/sysemu/event-loop-base.h
49
create mode 100644 event-loop-base.c
50
40
51
diff --git a/qapi/qom.json b/qapi/qom.json
41
diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
52
index XXXXXXX..XXXXXXX 100644
42
index XXXXXXX..XXXXXXX 100644
53
--- a/qapi/qom.json
43
--- a/hw/block/dataplane/virtio-blk.c
54
+++ b/qapi/qom.json
44
+++ b/hw/block/dataplane/virtio-blk.c
55
@@ -XXX,XX +XXX,XX @@
45
@@ -XXX,XX +XXX,XX @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
56
'*repeat': 'bool',
46
57
'*grab-toggle': 'GrabToggleKeys' } }
47
memory_region_transaction_commit();
58
48
59
+##
49
- /*
60
+# @EventLoopBaseProperties:
50
- * These fields are visible to the IOThread so we rely on implicit barriers
61
+#
51
- * in aio_context_acquire() on the write side and aio_notify_accept() on
62
+# Common properties for event loops
52
- * the read side.
63
+#
53
- */
64
+# @aio-max-batch: maximum number of requests in a batch for the AIO engine,
54
- s->starting = false;
65
+# 0 means that the engine will use its default.
55
- vblk->dataplane_started = true;
66
+# (default: 0)
56
trace_virtio_blk_data_plane_start(s);
67
+#
57
68
+# Since: 7.1
58
old_context = blk_get_aio_context(s->conf->conf.blk);
69
+##
59
@@ -XXX,XX +XXX,XX @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
70
+{ 'struct': 'EventLoopBaseProperties',
60
event_notifier_set(virtio_queue_get_host_notifier(vq));
71
+ 'data': { '*aio-max-batch': 'int' } }
61
}
62
63
+ /*
64
+ * These fields must be visible to the IOThread when it processes the
65
+ * virtqueue, otherwise it will think dataplane has not started yet.
66
+ *
67
+ * Make sure ->dataplane_started is false when blk_set_aio_context() is
68
+ * called above so that draining does not cause the host notifier to be
69
+ * detached/attached prematurely.
70
+ */
71
+ s->starting = false;
72
+ vblk->dataplane_started = true;
73
+ smp_wmb(); /* paired with aio_notify_accept() on the read side */
72
+
74
+
73
##
75
/* Get this show started by hooking up our callbacks */
74
# @IothreadProperties:
76
if (!blk_in_drain(s->conf->conf.blk)) {
75
#
77
aio_context_acquire(s->ctx);
76
@@ -XXX,XX +XXX,XX @@
78
@@ -XXX,XX +XXX,XX @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
77
# algorithm detects it is spending too long polling without
79
fail_guest_notifiers:
78
# encountering events. 0 selects a default behaviour (default: 0)
80
vblk->dataplane_disabled = true;
79
#
81
s->starting = false;
80
-# @aio-max-batch: maximum number of requests in a batch for the AIO engine,
82
- vblk->dataplane_started = true;
81
-# 0 means that the engine will use its default
83
return -ENOSYS;
82
-# (default:0, since 6.1)
84
}
83
+# The @aio-max-batch option is available since 6.1.
85
84
#
86
@@ -XXX,XX +XXX,XX @@ void virtio_blk_data_plane_stop(VirtIODevice *vdev)
85
# Since: 2.0
87
aio_wait_bh_oneshot(s->ctx, virtio_blk_data_plane_stop_bh, s);
86
##
88
}
87
{ 'struct': 'IothreadProperties',
89
88
+ 'base': 'EventLoopBaseProperties',
90
+ /*
89
'data': { '*poll-max-ns': 'int',
91
+ * Batch all the host notifiers in a single transaction to avoid
90
'*poll-grow': 'int',
92
+ * quadratic time complexity in address_space_update_ioeventfds().
91
- '*poll-shrink': 'int',
93
+ */
92
- '*aio-max-batch': 'int' } }
94
+ memory_region_transaction_begin();
93
+ '*poll-shrink': 'int' } }
94
95
##
96
# @MemoryBackendProperties:
97
diff --git a/meson.build b/meson.build
98
index XXXXXXX..XXXXXXX 100644
99
--- a/meson.build
100
+++ b/meson.build
101
@@ -XXX,XX +XXX,XX @@ subdir('qom')
102
subdir('authz')
103
subdir('crypto')
104
subdir('ui')
105
+subdir('hw')
106
107
108
if enable_modules
109
@@ -XXX,XX +XXX,XX @@ if enable_modules
110
modulecommon = declare_dependency(link_whole: libmodulecommon, compile_args: '-DBUILD_DSO')
111
endif
112
113
+qom_ss = qom_ss.apply(config_host, strict: false)
114
+libqom = static_library('qom', qom_ss.sources() + genh,
115
+ dependencies: [qom_ss.dependencies()],
116
+ name_suffix: 'fa')
117
+qom = declare_dependency(link_whole: libqom)
118
+
95
+
119
+event_loop_base = files('event-loop-base.c')
96
+ for (i = 0; i < nvqs; i++) {
120
+event_loop_base = static_library('event-loop-base', sources: event_loop_base + genh,
97
+ virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
121
+ build_by_default: true)
122
+event_loop_base = declare_dependency(link_whole: event_loop_base,
123
+ dependencies: [qom])
124
+
125
stub_ss = stub_ss.apply(config_all, strict: false)
126
127
util_ss.add_all(trace_ss)
128
@@ -XXX,XX +XXX,XX @@ subdir('monitor')
129
subdir('net')
130
subdir('replay')
131
subdir('semihosting')
132
-subdir('hw')
133
subdir('tcg')
134
subdir('fpu')
135
subdir('accel')
136
@@ -XXX,XX +XXX,XX @@ qemu_syms = custom_target('qemu.syms', output: 'qemu.syms',
137
capture: true,
138
command: [undefsym, nm, '@INPUT@'])
139
140
-qom_ss = qom_ss.apply(config_host, strict: false)
141
-libqom = static_library('qom', qom_ss.sources() + genh,
142
- dependencies: [qom_ss.dependencies()],
143
- name_suffix: 'fa')
144
-
145
-qom = declare_dependency(link_whole: libqom)
146
-
147
authz_ss = authz_ss.apply(config_host, strict: false)
148
libauthz = static_library('authz', authz_ss.sources() + genh,
149
dependencies: [authz_ss.dependencies()],
150
@@ -XXX,XX +XXX,XX @@ libblockdev = static_library('blockdev', blockdev_ss.sources() + genh,
151
build_by_default: false)
152
153
blockdev = declare_dependency(link_whole: [libblockdev],
154
- dependencies: [block])
155
+ dependencies: [block, event_loop_base])
156
157
qmp_ss = qmp_ss.apply(config_host, strict: false)
158
libqmp = static_library('qmp', qmp_ss.sources() + genh,
159
diff --git a/include/sysemu/event-loop-base.h b/include/sysemu/event-loop-base.h
160
new file mode 100644
161
index XXXXXXX..XXXXXXX
162
--- /dev/null
163
+++ b/include/sysemu/event-loop-base.h
164
@@ -XXX,XX +XXX,XX @@
165
+/*
166
+ * QEMU event-loop backend
167
+ *
168
+ * Copyright (C) 2022 Red Hat Inc
169
+ *
170
+ * Authors:
171
+ * Nicolas Saenz Julienne <nsaenzju@redhat.com>
172
+ *
173
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
174
+ * See the COPYING file in the top-level directory.
175
+ */
176
+#ifndef QEMU_EVENT_LOOP_BASE_H
177
+#define QEMU_EVENT_LOOP_BASE_H
178
+
179
+#include "qom/object.h"
180
+#include "block/aio.h"
181
+#include "qemu/typedefs.h"
182
+
183
+#define TYPE_EVENT_LOOP_BASE "event-loop-base"
184
+OBJECT_DECLARE_TYPE(EventLoopBase, EventLoopBaseClass,
185
+ EVENT_LOOP_BASE)
186
+
187
+struct EventLoopBaseClass {
188
+ ObjectClass parent_class;
189
+
190
+ void (*init)(EventLoopBase *base, Error **errp);
191
+ void (*update_params)(EventLoopBase *base, Error **errp);
192
+};
193
+
194
+struct EventLoopBase {
195
+ Object parent;
196
+
197
+ /* AioContext AIO engine parameters */
198
+ int64_t aio_max_batch;
199
+};
200
+#endif
201
diff --git a/include/sysemu/iothread.h b/include/sysemu/iothread.h
202
index XXXXXXX..XXXXXXX 100644
203
--- a/include/sysemu/iothread.h
204
+++ b/include/sysemu/iothread.h
205
@@ -XXX,XX +XXX,XX @@
206
#include "block/aio.h"
207
#include "qemu/thread.h"
208
#include "qom/object.h"
209
+#include "sysemu/event-loop-base.h"
210
211
#define TYPE_IOTHREAD "iothread"
212
213
struct IOThread {
214
- Object parent_obj;
215
+ EventLoopBase parent_obj;
216
217
QemuThread thread;
218
AioContext *ctx;
219
@@ -XXX,XX +XXX,XX @@ struct IOThread {
220
int64_t poll_max_ns;
221
int64_t poll_grow;
222
int64_t poll_shrink;
223
-
224
- /* AioContext AIO engine parameters */
225
- int64_t aio_max_batch;
226
};
227
typedef struct IOThread IOThread;
228
229
diff --git a/event-loop-base.c b/event-loop-base.c
230
new file mode 100644
231
index XXXXXXX..XXXXXXX
232
--- /dev/null
233
+++ b/event-loop-base.c
234
@@ -XXX,XX +XXX,XX @@
235
+/*
236
+ * QEMU event-loop base
237
+ *
238
+ * Copyright (C) 2022 Red Hat Inc
239
+ *
240
+ * Authors:
241
+ * Stefan Hajnoczi <stefanha@redhat.com>
242
+ * Nicolas Saenz Julienne <nsaenzju@redhat.com>
243
+ *
244
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
245
+ * See the COPYING file in the top-level directory.
246
+ */
247
+
248
+#include "qemu/osdep.h"
249
+#include "qom/object_interfaces.h"
250
+#include "qapi/error.h"
251
+#include "sysemu/event-loop-base.h"
252
+
253
+typedef struct {
254
+ const char *name;
255
+ ptrdiff_t offset; /* field's byte offset in EventLoopBase struct */
256
+} EventLoopBaseParamInfo;
257
+
258
+static EventLoopBaseParamInfo aio_max_batch_info = {
259
+ "aio-max-batch", offsetof(EventLoopBase, aio_max_batch),
260
+};
261
+
262
+static void event_loop_base_get_param(Object *obj, Visitor *v,
263
+ const char *name, void *opaque, Error **errp)
264
+{
265
+ EventLoopBase *event_loop_base = EVENT_LOOP_BASE(obj);
266
+ EventLoopBaseParamInfo *info = opaque;
267
+ int64_t *field = (void *)event_loop_base + info->offset;
268
+
269
+ visit_type_int64(v, name, field, errp);
270
+}
271
+
272
+static void event_loop_base_set_param(Object *obj, Visitor *v,
273
+ const char *name, void *opaque, Error **errp)
274
+{
275
+ EventLoopBaseClass *bc = EVENT_LOOP_BASE_GET_CLASS(obj);
276
+ EventLoopBase *base = EVENT_LOOP_BASE(obj);
277
+ EventLoopBaseParamInfo *info = opaque;
278
+ int64_t *field = (void *)base + info->offset;
279
+ int64_t value;
280
+
281
+ if (!visit_type_int64(v, name, &value, errp)) {
282
+ return;
283
+ }
98
+ }
284
+
99
+
285
+ if (value < 0) {
100
+ /*
286
+ error_setg(errp, "%s value must be in range [0, %" PRId64 "]",
101
+ * The transaction expects the ioeventfds to be open when it
287
+ info->name, INT64_MAX);
102
+ * commits. Do it now, before the cleanup loop.
288
+ return;
103
+ */
104
+ memory_region_transaction_commit();
105
+
106
+ for (i = 0; i < nvqs; i++) {
107
+ virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i);
289
+ }
108
+ }
290
+
109
+
291
+ *field = value;
110
+ /*
111
+ * Set ->dataplane_started to false before draining so that host notifiers
112
+ * are not detached/attached anymore.
113
+ */
114
+ vblk->dataplane_started = false;
292
+
115
+
293
+ if (bc->update_params) {
116
aio_context_acquire(s->ctx);
294
+ bc->update_params(base, errp);
117
295
+ }
118
/* Wait for virtio_blk_dma_restart_bh() and in flight I/O to complete */
296
+
119
@@ -XXX,XX +XXX,XX @@ void virtio_blk_data_plane_stop(VirtIODevice *vdev)
297
+ return;
120
298
+}
121
aio_context_release(s->ctx);
299
+
122
300
+static void event_loop_base_complete(UserCreatable *uc, Error **errp)
123
- /*
301
+{
124
- * Batch all the host notifiers in a single transaction to avoid
302
+ EventLoopBaseClass *bc = EVENT_LOOP_BASE_GET_CLASS(uc);
125
- * quadratic time complexity in address_space_update_ioeventfds().
303
+ EventLoopBase *base = EVENT_LOOP_BASE(uc);
126
- */
304
+
127
- memory_region_transaction_begin();
305
+ if (bc->init) {
306
+ bc->init(base, errp);
307
+ }
308
+}
309
+
310
+static void event_loop_base_class_init(ObjectClass *klass, void *class_data)
311
+{
312
+ UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass);
313
+ ucc->complete = event_loop_base_complete;
314
+
315
+ object_class_property_add(klass, "aio-max-batch", "int",
316
+ event_loop_base_get_param,
317
+ event_loop_base_set_param,
318
+ NULL, &aio_max_batch_info);
319
+}
320
+
321
+static const TypeInfo event_loop_base_info = {
322
+ .name = TYPE_EVENT_LOOP_BASE,
323
+ .parent = TYPE_OBJECT,
324
+ .instance_size = sizeof(EventLoopBase),
325
+ .class_size = sizeof(EventLoopBaseClass),
326
+ .class_init = event_loop_base_class_init,
327
+ .abstract = true,
328
+ .interfaces = (InterfaceInfo[]) {
329
+ { TYPE_USER_CREATABLE },
330
+ { }
331
+ }
332
+};
333
+
334
+static void register_types(void)
335
+{
336
+ type_register_static(&event_loop_base_info);
337
+}
338
+type_init(register_types);
339
diff --git a/iothread.c b/iothread.c
340
index XXXXXXX..XXXXXXX 100644
341
--- a/iothread.c
342
+++ b/iothread.c
343
@@ -XXX,XX +XXX,XX @@
344
#include "qemu/module.h"
345
#include "block/aio.h"
346
#include "block/block.h"
347
+#include "sysemu/event-loop-base.h"
348
#include "sysemu/iothread.h"
349
#include "qapi/error.h"
350
#include "qapi/qapi-commands-misc.h"
351
@@ -XXX,XX +XXX,XX @@ static void iothread_init_gcontext(IOThread *iothread)
352
iothread->main_loop = g_main_loop_new(iothread->worker_context, TRUE);
353
}
354
355
-static void iothread_set_aio_context_params(IOThread *iothread, Error **errp)
356
+static void iothread_set_aio_context_params(EventLoopBase *base, Error **errp)
357
{
358
+ IOThread *iothread = IOTHREAD(base);
359
ERRP_GUARD();
360
361
+ if (!iothread->ctx) {
362
+ return;
363
+ }
364
+
365
aio_context_set_poll_params(iothread->ctx,
366
iothread->poll_max_ns,
367
iothread->poll_grow,
368
@@ -XXX,XX +XXX,XX @@ static void iothread_set_aio_context_params(IOThread *iothread, Error **errp)
369
}
370
371
aio_context_set_aio_params(iothread->ctx,
372
- iothread->aio_max_batch,
373
+ iothread->parent_obj.aio_max_batch,
374
errp);
375
}
376
377
-static void iothread_complete(UserCreatable *obj, Error **errp)
378
+
379
+static void iothread_init(EventLoopBase *base, Error **errp)
380
{
381
Error *local_error = NULL;
382
- IOThread *iothread = IOTHREAD(obj);
383
+ IOThread *iothread = IOTHREAD(base);
384
char *thread_name;
385
386
iothread->stopping = false;
387
@@ -XXX,XX +XXX,XX @@ static void iothread_complete(UserCreatable *obj, Error **errp)
388
*/
389
iothread_init_gcontext(iothread);
390
391
- iothread_set_aio_context_params(iothread, &local_error);
392
+ iothread_set_aio_context_params(base, &local_error);
393
if (local_error) {
394
error_propagate(errp, local_error);
395
aio_context_unref(iothread->ctx);
396
@@ -XXX,XX +XXX,XX @@ static void iothread_complete(UserCreatable *obj, Error **errp)
397
* to inherit.
398
*/
399
thread_name = g_strdup_printf("IO %s",
400
- object_get_canonical_path_component(OBJECT(obj)));
401
+ object_get_canonical_path_component(OBJECT(base)));
402
qemu_thread_create(&iothread->thread, thread_name, iothread_run,
403
iothread, QEMU_THREAD_JOINABLE);
404
g_free(thread_name);
405
@@ -XXX,XX +XXX,XX @@ static IOThreadParamInfo poll_grow_info = {
406
static IOThreadParamInfo poll_shrink_info = {
407
"poll-shrink", offsetof(IOThread, poll_shrink),
408
};
409
-static IOThreadParamInfo aio_max_batch_info = {
410
- "aio-max-batch", offsetof(IOThread, aio_max_batch),
411
-};
412
413
static void iothread_get_param(Object *obj, Visitor *v,
414
const char *name, IOThreadParamInfo *info, Error **errp)
415
@@ -XXX,XX +XXX,XX @@ static void iothread_set_poll_param(Object *obj, Visitor *v,
416
}
417
}
418
419
-static void iothread_get_aio_param(Object *obj, Visitor *v,
420
- const char *name, void *opaque, Error **errp)
421
-{
422
- IOThreadParamInfo *info = opaque;
423
-
128
-
424
- iothread_get_param(obj, v, name, info, errp);
129
- for (i = 0; i < nvqs; i++) {
425
-}
130
- virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
426
-
427
-static void iothread_set_aio_param(Object *obj, Visitor *v,
428
- const char *name, void *opaque, Error **errp)
429
-{
430
- IOThread *iothread = IOTHREAD(obj);
431
- IOThreadParamInfo *info = opaque;
432
-
433
- if (!iothread_set_param(obj, v, name, info, errp)) {
434
- return;
435
- }
131
- }
436
-
132
-
437
- if (iothread->ctx) {
133
- /*
438
- aio_context_set_aio_params(iothread->ctx,
134
- * The transaction expects the ioeventfds to be open when it
439
- iothread->aio_max_batch,
135
- * commits. Do it now, before the cleanup loop.
440
- errp);
136
- */
137
- memory_region_transaction_commit();
138
-
139
- for (i = 0; i < nvqs; i++) {
140
- virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i);
441
- }
141
- }
442
-}
443
-
142
-
444
static void iothread_class_init(ObjectClass *klass, void *class_data)
143
qemu_bh_cancel(s->bh);
445
{
144
notify_guest_bh(s); /* final chance to notify guest */
446
- UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass);
145
447
- ucc->complete = iothread_complete;
146
/* Clean up guest notifier (irq) */
448
+ EventLoopBaseClass *bc = EVENT_LOOP_BASE_CLASS(klass);
147
k->set_guest_notifiers(qbus->parent, nvqs, false);
449
+
148
450
+ bc->init = iothread_init;
149
- vblk->dataplane_started = false;
451
+ bc->update_params = iothread_set_aio_context_params;
150
s->stopping = false;
452
453
object_class_property_add(klass, "poll-max-ns", "int",
454
iothread_get_poll_param,
455
@@ -XXX,XX +XXX,XX @@ static void iothread_class_init(ObjectClass *klass, void *class_data)
456
iothread_get_poll_param,
457
iothread_set_poll_param,
458
NULL, &poll_shrink_info);
459
- object_class_property_add(klass, "aio-max-batch", "int",
460
- iothread_get_aio_param,
461
- iothread_set_aio_param,
462
- NULL, &aio_max_batch_info);
463
}
151
}
464
465
static const TypeInfo iothread_info = {
466
.name = TYPE_IOTHREAD,
467
- .parent = TYPE_OBJECT,
468
+ .parent = TYPE_EVENT_LOOP_BASE,
469
.class_init = iothread_class_init,
470
.instance_size = sizeof(IOThread),
471
.instance_init = iothread_instance_init,
472
.instance_finalize = iothread_instance_finalize,
473
- .interfaces = (InterfaceInfo[]) {
474
- {TYPE_USER_CREATABLE},
475
- {}
476
- },
477
};
478
479
static void iothread_register_types(void)
480
@@ -XXX,XX +XXX,XX @@ static int query_one_iothread(Object *object, void *opaque)
481
info->poll_max_ns = iothread->poll_max_ns;
482
info->poll_grow = iothread->poll_grow;
483
info->poll_shrink = iothread->poll_shrink;
484
- info->aio_max_batch = iothread->aio_max_batch;
485
+ info->aio_max_batch = iothread->parent_obj.aio_max_batch;
486
487
QAPI_LIST_APPEND(*tail, info);
488
return 0;
489
--
152
--
490
2.35.1
153
2.40.1
154
155
diff view generated by jsdifflib
Deleted patch
1
From: Nicolas Saenz Julienne <nsaenzju@redhat.com>
2
1
3
'event-loop-base' provides basic property handling for all 'AioContext'
4
based event loops. So let's define a new 'MainLoopClass' that inherits
5
from it. This will permit tweaking the main loop's properties through
6
qapi as well as through the command line using the '-object' keyword[1].
7
Only one instance of 'MainLoopClass' might be created at any time.
8
9
'EventLoopBaseClass' learns a new callback, 'can_be_deleted()' so as to
10
mark 'MainLoop' as non-deletable.
11
12
[1] For example:
13
-object main-loop,id=main-loop,aio-max-batch=<value>
14
15
Signed-off-by: Nicolas Saenz Julienne <nsaenzju@redhat.com>
16
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
17
Acked-by: Markus Armbruster <armbru@redhat.com>
18
Message-id: 20220425075723.20019-3-nsaenzju@redhat.com
19
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
20
---
21
qapi/qom.json | 13 ++++++++
22
meson.build | 3 +-
23
include/qemu/main-loop.h | 10 ++++++
24
include/sysemu/event-loop-base.h | 1 +
25
event-loop-base.c | 13 ++++++++
26
util/main-loop.c | 56 ++++++++++++++++++++++++++++++++
27
6 files changed, 95 insertions(+), 1 deletion(-)
28
29
diff --git a/qapi/qom.json b/qapi/qom.json
30
index XXXXXXX..XXXXXXX 100644
31
--- a/qapi/qom.json
32
+++ b/qapi/qom.json
33
@@ -XXX,XX +XXX,XX @@
34
'*poll-grow': 'int',
35
'*poll-shrink': 'int' } }
36
37
+##
38
+# @MainLoopProperties:
39
+#
40
+# Properties for the main-loop object.
41
+#
42
+# Since: 7.1
43
+##
44
+{ 'struct': 'MainLoopProperties',
45
+ 'base': 'EventLoopBaseProperties',
46
+ 'data': {} }
47
+
48
##
49
# @MemoryBackendProperties:
50
#
51
@@ -XXX,XX +XXX,XX @@
52
{ 'name': 'input-linux',
53
'if': 'CONFIG_LINUX' },
54
'iothread',
55
+ 'main-loop',
56
{ 'name': 'memory-backend-epc',
57
'if': 'CONFIG_LINUX' },
58
'memory-backend-file',
59
@@ -XXX,XX +XXX,XX @@
60
'input-linux': { 'type': 'InputLinuxProperties',
61
'if': 'CONFIG_LINUX' },
62
'iothread': 'IothreadProperties',
63
+ 'main-loop': 'MainLoopProperties',
64
'memory-backend-epc': { 'type': 'MemoryBackendEpcProperties',
65
'if': 'CONFIG_LINUX' },
66
'memory-backend-file': 'MemoryBackendFileProperties',
67
diff --git a/meson.build b/meson.build
68
index XXXXXXX..XXXXXXX 100644
69
--- a/meson.build
70
+++ b/meson.build
71
@@ -XXX,XX +XXX,XX @@ libqemuutil = static_library('qemuutil',
72
sources: util_ss.sources() + stub_ss.sources() + genh,
73
dependencies: [util_ss.dependencies(), libm, threads, glib, socket, malloc, pixman])
74
qemuutil = declare_dependency(link_with: libqemuutil,
75
- sources: genh + version_res)
76
+ sources: genh + version_res,
77
+ dependencies: [event_loop_base])
78
79
if have_system or have_user
80
decodetree = generator(find_program('scripts/decodetree.py'),
81
diff --git a/include/qemu/main-loop.h b/include/qemu/main-loop.h
82
index XXXXXXX..XXXXXXX 100644
83
--- a/include/qemu/main-loop.h
84
+++ b/include/qemu/main-loop.h
85
@@ -XXX,XX +XXX,XX @@
86
#define QEMU_MAIN_LOOP_H
87
88
#include "block/aio.h"
89
+#include "qom/object.h"
90
+#include "sysemu/event-loop-base.h"
91
92
#define SIG_IPI SIGUSR1
93
94
+#define TYPE_MAIN_LOOP "main-loop"
95
+OBJECT_DECLARE_TYPE(MainLoop, MainLoopClass, MAIN_LOOP)
96
+
97
+struct MainLoop {
98
+ EventLoopBase parent_obj;
99
+};
100
+typedef struct MainLoop MainLoop;
101
+
102
/**
103
* qemu_init_main_loop: Set up the process so that it can run the main loop.
104
*
105
diff --git a/include/sysemu/event-loop-base.h b/include/sysemu/event-loop-base.h
106
index XXXXXXX..XXXXXXX 100644
107
--- a/include/sysemu/event-loop-base.h
108
+++ b/include/sysemu/event-loop-base.h
109
@@ -XXX,XX +XXX,XX @@ struct EventLoopBaseClass {
110
111
void (*init)(EventLoopBase *base, Error **errp);
112
void (*update_params)(EventLoopBase *base, Error **errp);
113
+ bool (*can_be_deleted)(EventLoopBase *base);
114
};
115
116
struct EventLoopBase {
117
diff --git a/event-loop-base.c b/event-loop-base.c
118
index XXXXXXX..XXXXXXX 100644
119
--- a/event-loop-base.c
120
+++ b/event-loop-base.c
121
@@ -XXX,XX +XXX,XX @@ static void event_loop_base_complete(UserCreatable *uc, Error **errp)
122
}
123
}
124
125
+static bool event_loop_base_can_be_deleted(UserCreatable *uc)
126
+{
127
+ EventLoopBaseClass *bc = EVENT_LOOP_BASE_GET_CLASS(uc);
128
+ EventLoopBase *backend = EVENT_LOOP_BASE(uc);
129
+
130
+ if (bc->can_be_deleted) {
131
+ return bc->can_be_deleted(backend);
132
+ }
133
+
134
+ return true;
135
+}
136
+
137
static void event_loop_base_class_init(ObjectClass *klass, void *class_data)
138
{
139
UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass);
140
ucc->complete = event_loop_base_complete;
141
+ ucc->can_be_deleted = event_loop_base_can_be_deleted;
142
143
object_class_property_add(klass, "aio-max-batch", "int",
144
event_loop_base_get_param,
145
diff --git a/util/main-loop.c b/util/main-loop.c
146
index XXXXXXX..XXXXXXX 100644
147
--- a/util/main-loop.c
148
+++ b/util/main-loop.c
149
@@ -XXX,XX +XXX,XX @@
150
#include "qemu/error-report.h"
151
#include "qemu/queue.h"
152
#include "qemu/compiler.h"
153
+#include "qom/object.h"
154
155
#ifndef _WIN32
156
#include <sys/wait.h>
157
@@ -XXX,XX +XXX,XX @@ int qemu_init_main_loop(Error **errp)
158
return 0;
159
}
160
161
+static void main_loop_update_params(EventLoopBase *base, Error **errp)
162
+{
163
+ if (!qemu_aio_context) {
164
+ error_setg(errp, "qemu aio context not ready");
165
+ return;
166
+ }
167
+
168
+ aio_context_set_aio_params(qemu_aio_context, base->aio_max_batch, errp);
169
+}
170
+
171
+MainLoop *mloop;
172
+
173
+static void main_loop_init(EventLoopBase *base, Error **errp)
174
+{
175
+ MainLoop *m = MAIN_LOOP(base);
176
+
177
+ if (mloop) {
178
+ error_setg(errp, "only one main-loop instance allowed");
179
+ return;
180
+ }
181
+
182
+ main_loop_update_params(base, errp);
183
+
184
+ mloop = m;
185
+ return;
186
+}
187
+
188
+static bool main_loop_can_be_deleted(EventLoopBase *base)
189
+{
190
+ return false;
191
+}
192
+
193
+static void main_loop_class_init(ObjectClass *oc, void *class_data)
194
+{
195
+ EventLoopBaseClass *bc = EVENT_LOOP_BASE_CLASS(oc);
196
+
197
+ bc->init = main_loop_init;
198
+ bc->update_params = main_loop_update_params;
199
+ bc->can_be_deleted = main_loop_can_be_deleted;
200
+}
201
+
202
+static const TypeInfo main_loop_info = {
203
+ .name = TYPE_MAIN_LOOP,
204
+ .parent = TYPE_EVENT_LOOP_BASE,
205
+ .class_init = main_loop_class_init,
206
+ .instance_size = sizeof(MainLoop),
207
+};
208
+
209
+static void main_loop_register_types(void)
210
+{
211
+ type_register_static(&main_loop_info);
212
+}
213
+
214
+type_init(main_loop_register_types)
215
+
216
static int max_priority;
217
218
#ifndef _WIN32
219
--
220
2.35.1
diff view generated by jsdifflib
Deleted patch
1
From: Nicolas Saenz Julienne <nsaenzju@redhat.com>
2
1
3
The thread pool regulates itself: when idle, it kills threads until
4
empty, when in demand, it creates new threads until full. This behaviour
5
doesn't play well with latency sensitive workloads where the price of
6
creating a new thread is too high. For example, when paired with qemu's
7
'-mlock', or using safety features like SafeStack, creating a new thread
8
has been measured take multiple milliseconds.
9
10
In order to mitigate this let's introduce a new 'EventLoopBase'
11
property to set the thread pool size. The threads will be created during
12
the pool's initialization or upon updating the property's value, remain
13
available during its lifetime regardless of demand, and destroyed upon
14
freeing it. A properly characterized workload will then be able to
15
configure the pool to avoid any latency spikes.
16
17
Signed-off-by: Nicolas Saenz Julienne <nsaenzju@redhat.com>
18
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
19
Acked-by: Markus Armbruster <armbru@redhat.com>
20
Message-id: 20220425075723.20019-4-nsaenzju@redhat.com
21
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
22
---
23
qapi/qom.json | 10 +++++-
24
include/block/aio.h | 10 ++++++
25
include/block/thread-pool.h | 3 ++
26
include/sysemu/event-loop-base.h | 4 +++
27
event-loop-base.c | 23 +++++++++++++
28
iothread.c | 3 ++
29
util/aio-posix.c | 1 +
30
util/async.c | 20 ++++++++++++
31
util/main-loop.c | 9 ++++++
32
util/thread-pool.c | 55 +++++++++++++++++++++++++++++---
33
10 files changed, 133 insertions(+), 5 deletions(-)
34
35
diff --git a/qapi/qom.json b/qapi/qom.json
36
index XXXXXXX..XXXXXXX 100644
37
--- a/qapi/qom.json
38
+++ b/qapi/qom.json
39
@@ -XXX,XX +XXX,XX @@
40
# 0 means that the engine will use its default.
41
# (default: 0)
42
#
43
+# @thread-pool-min: minimum number of threads reserved in the thread pool
44
+# (default:0)
45
+#
46
+# @thread-pool-max: maximum number of threads the thread pool can contain
47
+# (default:64)
48
+#
49
# Since: 7.1
50
##
51
{ 'struct': 'EventLoopBaseProperties',
52
- 'data': { '*aio-max-batch': 'int' } }
53
+ 'data': { '*aio-max-batch': 'int',
54
+ '*thread-pool-min': 'int',
55
+ '*thread-pool-max': 'int' } }
56
57
##
58
# @IothreadProperties:
59
diff --git a/include/block/aio.h b/include/block/aio.h
60
index XXXXXXX..XXXXXXX 100644
61
--- a/include/block/aio.h
62
+++ b/include/block/aio.h
63
@@ -XXX,XX +XXX,XX @@ struct AioContext {
64
QSLIST_HEAD(, Coroutine) scheduled_coroutines;
65
QEMUBH *co_schedule_bh;
66
67
+ int thread_pool_min;
68
+ int thread_pool_max;
69
/* Thread pool for performing work and receiving completion callbacks.
70
* Has its own locking.
71
*/
72
@@ -XXX,XX +XXX,XX @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
73
void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
74
Error **errp);
75
76
+/**
77
+ * aio_context_set_thread_pool_params:
78
+ * @ctx: the aio context
79
+ * @min: min number of threads to have readily available in the thread pool
80
+ * @min: max number of threads the thread pool can contain
81
+ */
82
+void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min,
83
+ int64_t max, Error **errp);
84
#endif
85
diff --git a/include/block/thread-pool.h b/include/block/thread-pool.h
86
index XXXXXXX..XXXXXXX 100644
87
--- a/include/block/thread-pool.h
88
+++ b/include/block/thread-pool.h
89
@@ -XXX,XX +XXX,XX @@
90
91
#include "block/block.h"
92
93
+#define THREAD_POOL_MAX_THREADS_DEFAULT 64
94
+
95
typedef int ThreadPoolFunc(void *opaque);
96
97
typedef struct ThreadPool ThreadPool;
98
@@ -XXX,XX +XXX,XX @@ BlockAIOCB *thread_pool_submit_aio(ThreadPool *pool,
99
int coroutine_fn thread_pool_submit_co(ThreadPool *pool,
100
ThreadPoolFunc *func, void *arg);
101
void thread_pool_submit(ThreadPool *pool, ThreadPoolFunc *func, void *arg);
102
+void thread_pool_update_params(ThreadPool *pool, struct AioContext *ctx);
103
104
#endif
105
diff --git a/include/sysemu/event-loop-base.h b/include/sysemu/event-loop-base.h
106
index XXXXXXX..XXXXXXX 100644
107
--- a/include/sysemu/event-loop-base.h
108
+++ b/include/sysemu/event-loop-base.h
109
@@ -XXX,XX +XXX,XX @@ struct EventLoopBase {
110
111
/* AioContext AIO engine parameters */
112
int64_t aio_max_batch;
113
+
114
+ /* AioContext thread pool parameters */
115
+ int64_t thread_pool_min;
116
+ int64_t thread_pool_max;
117
};
118
#endif
119
diff --git a/event-loop-base.c b/event-loop-base.c
120
index XXXXXXX..XXXXXXX 100644
121
--- a/event-loop-base.c
122
+++ b/event-loop-base.c
123
@@ -XXX,XX +XXX,XX @@
124
#include "qemu/osdep.h"
125
#include "qom/object_interfaces.h"
126
#include "qapi/error.h"
127
+#include "block/thread-pool.h"
128
#include "sysemu/event-loop-base.h"
129
130
typedef struct {
131
@@ -XXX,XX +XXX,XX @@ typedef struct {
132
ptrdiff_t offset; /* field's byte offset in EventLoopBase struct */
133
} EventLoopBaseParamInfo;
134
135
+static void event_loop_base_instance_init(Object *obj)
136
+{
137
+ EventLoopBase *base = EVENT_LOOP_BASE(obj);
138
+
139
+ base->thread_pool_max = THREAD_POOL_MAX_THREADS_DEFAULT;
140
+}
141
+
142
static EventLoopBaseParamInfo aio_max_batch_info = {
143
"aio-max-batch", offsetof(EventLoopBase, aio_max_batch),
144
};
145
+static EventLoopBaseParamInfo thread_pool_min_info = {
146
+ "thread-pool-min", offsetof(EventLoopBase, thread_pool_min),
147
+};
148
+static EventLoopBaseParamInfo thread_pool_max_info = {
149
+ "thread-pool-max", offsetof(EventLoopBase, thread_pool_max),
150
+};
151
152
static void event_loop_base_get_param(Object *obj, Visitor *v,
153
const char *name, void *opaque, Error **errp)
154
@@ -XXX,XX +XXX,XX @@ static void event_loop_base_class_init(ObjectClass *klass, void *class_data)
155
event_loop_base_get_param,
156
event_loop_base_set_param,
157
NULL, &aio_max_batch_info);
158
+ object_class_property_add(klass, "thread-pool-min", "int",
159
+ event_loop_base_get_param,
160
+ event_loop_base_set_param,
161
+ NULL, &thread_pool_min_info);
162
+ object_class_property_add(klass, "thread-pool-max", "int",
163
+ event_loop_base_get_param,
164
+ event_loop_base_set_param,
165
+ NULL, &thread_pool_max_info);
166
}
167
168
static const TypeInfo event_loop_base_info = {
169
.name = TYPE_EVENT_LOOP_BASE,
170
.parent = TYPE_OBJECT,
171
.instance_size = sizeof(EventLoopBase),
172
+ .instance_init = event_loop_base_instance_init,
173
.class_size = sizeof(EventLoopBaseClass),
174
.class_init = event_loop_base_class_init,
175
.abstract = true,
176
diff --git a/iothread.c b/iothread.c
177
index XXXXXXX..XXXXXXX 100644
178
--- a/iothread.c
179
+++ b/iothread.c
180
@@ -XXX,XX +XXX,XX @@ static void iothread_set_aio_context_params(EventLoopBase *base, Error **errp)
181
aio_context_set_aio_params(iothread->ctx,
182
iothread->parent_obj.aio_max_batch,
183
errp);
184
+
185
+ aio_context_set_thread_pool_params(iothread->ctx, base->thread_pool_min,
186
+ base->thread_pool_max, errp);
187
}
188
189
190
diff --git a/util/aio-posix.c b/util/aio-posix.c
191
index XXXXXXX..XXXXXXX 100644
192
--- a/util/aio-posix.c
193
+++ b/util/aio-posix.c
194
@@ -XXX,XX +XXX,XX @@
195
196
#include "qemu/osdep.h"
197
#include "block/block.h"
198
+#include "block/thread-pool.h"
199
#include "qemu/main-loop.h"
200
#include "qemu/rcu.h"
201
#include "qemu/rcu_queue.h"
202
diff --git a/util/async.c b/util/async.c
203
index XXXXXXX..XXXXXXX 100644
204
--- a/util/async.c
205
+++ b/util/async.c
206
@@ -XXX,XX +XXX,XX @@ AioContext *aio_context_new(Error **errp)
207
208
ctx->aio_max_batch = 0;
209
210
+ ctx->thread_pool_min = 0;
211
+ ctx->thread_pool_max = THREAD_POOL_MAX_THREADS_DEFAULT;
212
+
213
return ctx;
214
fail:
215
g_source_destroy(&ctx->source);
216
@@ -XXX,XX +XXX,XX @@ void qemu_set_current_aio_context(AioContext *ctx)
217
assert(!get_my_aiocontext());
218
set_my_aiocontext(ctx);
219
}
220
+
221
+void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min,
222
+ int64_t max, Error **errp)
223
+{
224
+
225
+ if (min > max || !max || min > INT_MAX || max > INT_MAX) {
226
+ error_setg(errp, "bad thread-pool-min/thread-pool-max values");
227
+ return;
228
+ }
229
+
230
+ ctx->thread_pool_min = min;
231
+ ctx->thread_pool_max = max;
232
+
233
+ if (ctx->thread_pool) {
234
+ thread_pool_update_params(ctx->thread_pool, ctx);
235
+ }
236
+}
237
diff --git a/util/main-loop.c b/util/main-loop.c
238
index XXXXXXX..XXXXXXX 100644
239
--- a/util/main-loop.c
240
+++ b/util/main-loop.c
241
@@ -XXX,XX +XXX,XX @@
242
#include "sysemu/replay.h"
243
#include "qemu/main-loop.h"
244
#include "block/aio.h"
245
+#include "block/thread-pool.h"
246
#include "qemu/error-report.h"
247
#include "qemu/queue.h"
248
#include "qemu/compiler.h"
249
@@ -XXX,XX +XXX,XX @@ int qemu_init_main_loop(Error **errp)
250
251
static void main_loop_update_params(EventLoopBase *base, Error **errp)
252
{
253
+ ERRP_GUARD();
254
+
255
if (!qemu_aio_context) {
256
error_setg(errp, "qemu aio context not ready");
257
return;
258
}
259
260
aio_context_set_aio_params(qemu_aio_context, base->aio_max_batch, errp);
261
+ if (*errp) {
262
+ return;
263
+ }
264
+
265
+ aio_context_set_thread_pool_params(qemu_aio_context, base->thread_pool_min,
266
+ base->thread_pool_max, errp);
267
}
268
269
MainLoop *mloop;
270
diff --git a/util/thread-pool.c b/util/thread-pool.c
271
index XXXXXXX..XXXXXXX 100644
272
--- a/util/thread-pool.c
273
+++ b/util/thread-pool.c
274
@@ -XXX,XX +XXX,XX @@ struct ThreadPool {
275
QemuMutex lock;
276
QemuCond worker_stopped;
277
QemuSemaphore sem;
278
- int max_threads;
279
QEMUBH *new_thread_bh;
280
281
/* The following variables are only accessed from one AioContext. */
282
@@ -XXX,XX +XXX,XX @@ struct ThreadPool {
283
int new_threads; /* backlog of threads we need to create */
284
int pending_threads; /* threads created but not running yet */
285
bool stopping;
286
+ int min_threads;
287
+ int max_threads;
288
};
289
290
+static inline bool back_to_sleep(ThreadPool *pool, int ret)
291
+{
292
+ /*
293
+ * The semaphore timed out, we should exit the loop except when:
294
+ * - There is work to do, we raced with the signal.
295
+ * - The max threads threshold just changed, we raced with the signal.
296
+ * - The thread pool forces a minimum number of readily available threads.
297
+ */
298
+ if (ret == -1 && (!QTAILQ_EMPTY(&pool->request_list) ||
299
+ pool->cur_threads > pool->max_threads ||
300
+ pool->cur_threads <= pool->min_threads)) {
301
+ return true;
302
+ }
303
+
304
+ return false;
305
+}
306
+
307
static void *worker_thread(void *opaque)
308
{
309
ThreadPool *pool = opaque;
310
@@ -XXX,XX +XXX,XX @@ static void *worker_thread(void *opaque)
311
ret = qemu_sem_timedwait(&pool->sem, 10000);
312
qemu_mutex_lock(&pool->lock);
313
pool->idle_threads--;
314
- } while (ret == -1 && !QTAILQ_EMPTY(&pool->request_list));
315
- if (ret == -1 || pool->stopping) {
316
+ } while (back_to_sleep(pool, ret));
317
+ if (ret == -1 || pool->stopping ||
318
+ pool->cur_threads > pool->max_threads) {
319
break;
320
}
321
322
@@ -XXX,XX +XXX,XX @@ void thread_pool_submit(ThreadPool *pool, ThreadPoolFunc *func, void *arg)
323
thread_pool_submit_aio(pool, func, arg, NULL, NULL);
324
}
325
326
+void thread_pool_update_params(ThreadPool *pool, AioContext *ctx)
327
+{
328
+ qemu_mutex_lock(&pool->lock);
329
+
330
+ pool->min_threads = ctx->thread_pool_min;
331
+ pool->max_threads = ctx->thread_pool_max;
332
+
333
+ /*
334
+ * We either have to:
335
+ * - Increase the number available of threads until over the min_threads
336
+ * threshold.
337
+ * - Decrease the number of available threads until under the max_threads
338
+ * threshold.
339
+ * - Do nothing. The current number of threads fall in between the min and
340
+ * max thresholds. We'll let the pool manage itself.
341
+ */
342
+ for (int i = pool->cur_threads; i < pool->min_threads; i++) {
343
+ spawn_thread(pool);
344
+ }
345
+
346
+ for (int i = pool->cur_threads; i > pool->max_threads; i--) {
347
+ qemu_sem_post(&pool->sem);
348
+ }
349
+
350
+ qemu_mutex_unlock(&pool->lock);
351
+}
352
+
353
static void thread_pool_init_one(ThreadPool *pool, AioContext *ctx)
354
{
355
if (!ctx) {
356
@@ -XXX,XX +XXX,XX @@ static void thread_pool_init_one(ThreadPool *pool, AioContext *ctx)
357
qemu_mutex_init(&pool->lock);
358
qemu_cond_init(&pool->worker_stopped);
359
qemu_sem_init(&pool->sem, 0);
360
- pool->max_threads = 64;
361
pool->new_thread_bh = aio_bh_new(ctx, spawn_thread_bh_fn, pool);
362
363
QLIST_INIT(&pool->head);
364
QTAILQ_INIT(&pool->request_list);
365
+
366
+ thread_pool_update_params(pool, ctx);
367
}
368
369
ThreadPool *thread_pool_new(AioContext *ctx)
370
--
371
2.35.1
diff view generated by jsdifflib