1
The following changes since commit 64175afc695c0672876fbbfc31b299c86d562cb4:
1
The following changes since commit 801f3db7564dcce8a37a70833c0abe40ec19f8ce:
2
2
3
arm_gicv3: Fix ICC_BPR1 reset value when EL3 not implemented (2017-06-07 17:21:44 +0100)
3
Merge remote-tracking branch 'remotes/philmd/tags/kconfig-20210720' into staging (2021-07-20 19:30:28 +0100)
4
4
5
are available in the git repository at:
5
are available in the Git repository at:
6
6
7
git://github.com/codyprime/qemu-kvm-jtc.git tags/block-pull-request
7
https://gitlab.com/stefanha/qemu.git tags/block-pull-request
8
8
9
for you to fetch changes up to 56faeb9bb6872b3f926b3b3e0452a70beea10af2:
9
for you to fetch changes up to d7ddd0a1618a75b31dc308bb37365ce1da972154:
10
10
11
block/gluster.c: Handle qdict_array_entries() failure (2017-06-09 08:41:29 -0400)
11
linux-aio: limit the batch size using `aio-max-batch` parameter (2021-07-21 13:47:50 +0100)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Gluster patch
14
Pull request
15
16
Stefano's performance regression fix for commit 2558cb8dd4 ("linux-aio:
17
increasing MAX_EVENTS to a larger hardcoded value").
18
15
----------------------------------------------------------------
19
----------------------------------------------------------------
16
20
17
Peter Maydell (1):
21
Stefano Garzarella (3):
18
block/gluster.c: Handle qdict_array_entries() failure
22
iothread: generalize iothread_set_param/iothread_get_param
23
iothread: add aio-max-batch parameter
24
linux-aio: limit the batch size using `aio-max-batch` parameter
19
25
20
block/gluster.c | 3 +--
26
qapi/misc.json | 6 ++-
21
1 file changed, 1 insertion(+), 2 deletions(-)
27
qapi/qom.json | 7 +++-
28
include/block/aio.h | 12 ++++++
29
include/sysemu/iothread.h | 3 ++
30
block/linux-aio.c | 9 ++++-
31
iothread.c | 82 ++++++++++++++++++++++++++++++++++-----
32
monitor/hmp-cmds.c | 2 +
33
util/aio-posix.c | 12 ++++++
34
util/aio-win32.c | 5 +++
35
util/async.c | 2 +
36
qemu-options.hx | 8 +++-
37
11 files changed, 134 insertions(+), 14 deletions(-)
22
38
23
--
39
--
24
2.9.3
40
2.31.1
25
41
26
diff view generated by jsdifflib
New patch
1
From: Stefano Garzarella <sgarzare@redhat.com>
1
2
3
Changes in preparation for next patches where we add a new
4
parameter not related to the poll mechanism.
5
6
Let's add two new generic functions (iothread_set_param and
7
iothread_get_param) that we use to set and get IOThread
8
parameters.
9
10
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
11
Message-id: 20210721094211.69853-2-sgarzare@redhat.com
12
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
13
---
14
iothread.c | 27 +++++++++++++++++++++++----
15
1 file changed, 23 insertions(+), 4 deletions(-)
16
17
diff --git a/iothread.c b/iothread.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/iothread.c
20
+++ b/iothread.c
21
@@ -XXX,XX +XXX,XX @@ static PollParamInfo poll_shrink_info = {
22
"poll-shrink", offsetof(IOThread, poll_shrink),
23
};
24
25
-static void iothread_get_poll_param(Object *obj, Visitor *v,
26
+static void iothread_get_param(Object *obj, Visitor *v,
27
const char *name, void *opaque, Error **errp)
28
{
29
IOThread *iothread = IOTHREAD(obj);
30
@@ -XXX,XX +XXX,XX @@ static void iothread_get_poll_param(Object *obj, Visitor *v,
31
visit_type_int64(v, name, field, errp);
32
}
33
34
-static void iothread_set_poll_param(Object *obj, Visitor *v,
35
+static bool iothread_set_param(Object *obj, Visitor *v,
36
const char *name, void *opaque, Error **errp)
37
{
38
IOThread *iothread = IOTHREAD(obj);
39
@@ -XXX,XX +XXX,XX @@ static void iothread_set_poll_param(Object *obj, Visitor *v,
40
int64_t value;
41
42
if (!visit_type_int64(v, name, &value, errp)) {
43
- return;
44
+ return false;
45
}
46
47
if (value < 0) {
48
error_setg(errp, "%s value must be in range [0, %" PRId64 "]",
49
info->name, INT64_MAX);
50
- return;
51
+ return false;
52
}
53
54
*field = value;
55
56
+ return true;
57
+}
58
+
59
+static void iothread_get_poll_param(Object *obj, Visitor *v,
60
+ const char *name, void *opaque, Error **errp)
61
+{
62
+
63
+ iothread_get_param(obj, v, name, opaque, errp);
64
+}
65
+
66
+static void iothread_set_poll_param(Object *obj, Visitor *v,
67
+ const char *name, void *opaque, Error **errp)
68
+{
69
+ IOThread *iothread = IOTHREAD(obj);
70
+
71
+ if (!iothread_set_param(obj, v, name, opaque, errp)) {
72
+ return;
73
+ }
74
+
75
if (iothread->ctx) {
76
aio_context_set_poll_params(iothread->ctx,
77
iothread->poll_max_ns,
78
--
79
2.31.1
80
diff view generated by jsdifflib
New patch
1
1
From: Stefano Garzarella <sgarzare@redhat.com>
2
3
The `aio-max-batch` parameter will be propagated to AIO engines
4
and it will be used to control the maximum number of queued requests.
5
6
When there are in queue a number of requests equal to `aio-max-batch`,
7
the engine invokes the system call to forward the requests to the kernel.
8
9
This parameter allows us to control the maximum batch size to reduce
10
the latency that requests might accumulate while queued in the AIO
11
engine queue.
12
13
If `aio-max-batch` is equal to 0 (default value), the AIO engine will
14
use its default maximum batch size value.
15
16
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
17
Message-id: 20210721094211.69853-3-sgarzare@redhat.com
18
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
19
---
20
qapi/misc.json | 6 ++++-
21
qapi/qom.json | 7 ++++-
22
include/block/aio.h | 12 +++++++++
23
include/sysemu/iothread.h | 3 +++
24
iothread.c | 55 +++++++++++++++++++++++++++++++++++----
25
monitor/hmp-cmds.c | 2 ++
26
util/aio-posix.c | 12 +++++++++
27
util/aio-win32.c | 5 ++++
28
util/async.c | 2 ++
29
qemu-options.hx | 8 ++++--
30
10 files changed, 103 insertions(+), 9 deletions(-)
31
32
diff --git a/qapi/misc.json b/qapi/misc.json
33
index XXXXXXX..XXXXXXX 100644
34
--- a/qapi/misc.json
35
+++ b/qapi/misc.json
36
@@ -XXX,XX +XXX,XX @@
37
# @poll-shrink: how many ns will be removed from polling time, 0 means that
38
# it's not configured (since 2.9)
39
#
40
+# @aio-max-batch: maximum number of requests in a batch for the AIO engine,
41
+# 0 means that the engine will use its default (since 6.1)
42
+#
43
# Since: 2.0
44
##
45
{ 'struct': 'IOThreadInfo',
46
@@ -XXX,XX +XXX,XX @@
47
'thread-id': 'int',
48
'poll-max-ns': 'int',
49
'poll-grow': 'int',
50
- 'poll-shrink': 'int' } }
51
+ 'poll-shrink': 'int',
52
+ 'aio-max-batch': 'int' } }
53
54
##
55
# @query-iothreads:
56
diff --git a/qapi/qom.json b/qapi/qom.json
57
index XXXXXXX..XXXXXXX 100644
58
--- a/qapi/qom.json
59
+++ b/qapi/qom.json
60
@@ -XXX,XX +XXX,XX @@
61
# algorithm detects it is spending too long polling without
62
# encountering events. 0 selects a default behaviour (default: 0)
63
#
64
+# @aio-max-batch: maximum number of requests in a batch for the AIO engine,
65
+# 0 means that the engine will use its default
66
+# (default:0, since 6.1)
67
+#
68
# Since: 2.0
69
##
70
{ 'struct': 'IothreadProperties',
71
'data': { '*poll-max-ns': 'int',
72
'*poll-grow': 'int',
73
- '*poll-shrink': 'int' } }
74
+ '*poll-shrink': 'int',
75
+ '*aio-max-batch': 'int' } }
76
77
##
78
# @MemoryBackendProperties:
79
diff --git a/include/block/aio.h b/include/block/aio.h
80
index XXXXXXX..XXXXXXX 100644
81
--- a/include/block/aio.h
82
+++ b/include/block/aio.h
83
@@ -XXX,XX +XXX,XX @@ struct AioContext {
84
int64_t poll_grow; /* polling time growth factor */
85
int64_t poll_shrink; /* polling time shrink factor */
86
87
+ /* AIO engine parameters */
88
+ int64_t aio_max_batch; /* maximum number of requests in a batch */
89
+
90
/*
91
* List of handlers participating in userspace polling. Protected by
92
* ctx->list_lock. Iterated and modified mostly by the event loop thread
93
@@ -XXX,XX +XXX,XX @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
94
int64_t grow, int64_t shrink,
95
Error **errp);
96
97
+/**
98
+ * aio_context_set_aio_params:
99
+ * @ctx: the aio context
100
+ * @max_batch: maximum number of requests in a batch, 0 means that the
101
+ * engine will use its default
102
+ */
103
+void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
104
+ Error **errp);
105
+
106
#endif
107
diff --git a/include/sysemu/iothread.h b/include/sysemu/iothread.h
108
index XXXXXXX..XXXXXXX 100644
109
--- a/include/sysemu/iothread.h
110
+++ b/include/sysemu/iothread.h
111
@@ -XXX,XX +XXX,XX @@ struct IOThread {
112
int64_t poll_max_ns;
113
int64_t poll_grow;
114
int64_t poll_shrink;
115
+
116
+ /* AioContext AIO engine parameters */
117
+ int64_t aio_max_batch;
118
};
119
typedef struct IOThread IOThread;
120
121
diff --git a/iothread.c b/iothread.c
122
index XXXXXXX..XXXXXXX 100644
123
--- a/iothread.c
124
+++ b/iothread.c
125
@@ -XXX,XX +XXX,XX @@ static void iothread_init_gcontext(IOThread *iothread)
126
iothread->main_loop = g_main_loop_new(iothread->worker_context, TRUE);
127
}
128
129
+static void iothread_set_aio_context_params(IOThread *iothread, Error **errp)
130
+{
131
+ ERRP_GUARD();
132
+
133
+ aio_context_set_poll_params(iothread->ctx,
134
+ iothread->poll_max_ns,
135
+ iothread->poll_grow,
136
+ iothread->poll_shrink,
137
+ errp);
138
+ if (*errp) {
139
+ return;
140
+ }
141
+
142
+ aio_context_set_aio_params(iothread->ctx,
143
+ iothread->aio_max_batch,
144
+ errp);
145
+}
146
+
147
static void iothread_complete(UserCreatable *obj, Error **errp)
148
{
149
Error *local_error = NULL;
150
@@ -XXX,XX +XXX,XX @@ static void iothread_complete(UserCreatable *obj, Error **errp)
151
*/
152
iothread_init_gcontext(iothread);
153
154
- aio_context_set_poll_params(iothread->ctx,
155
- iothread->poll_max_ns,
156
- iothread->poll_grow,
157
- iothread->poll_shrink,
158
- &local_error);
159
+ iothread_set_aio_context_params(iothread, &local_error);
160
if (local_error) {
161
error_propagate(errp, local_error);
162
aio_context_unref(iothread->ctx);
163
@@ -XXX,XX +XXX,XX @@ static PollParamInfo poll_grow_info = {
164
static PollParamInfo poll_shrink_info = {
165
"poll-shrink", offsetof(IOThread, poll_shrink),
166
};
167
+static PollParamInfo aio_max_batch_info = {
168
+ "aio-max-batch", offsetof(IOThread, aio_max_batch),
169
+};
170
171
static void iothread_get_param(Object *obj, Visitor *v,
172
const char *name, void *opaque, Error **errp)
173
@@ -XXX,XX +XXX,XX @@ static void iothread_set_poll_param(Object *obj, Visitor *v,
174
}
175
}
176
177
+static void iothread_get_aio_param(Object *obj, Visitor *v,
178
+ const char *name, void *opaque, Error **errp)
179
+{
180
+
181
+ iothread_get_param(obj, v, name, opaque, errp);
182
+}
183
+
184
+static void iothread_set_aio_param(Object *obj, Visitor *v,
185
+ const char *name, void *opaque, Error **errp)
186
+{
187
+ IOThread *iothread = IOTHREAD(obj);
188
+
189
+ if (!iothread_set_param(obj, v, name, opaque, errp)) {
190
+ return;
191
+ }
192
+
193
+ if (iothread->ctx) {
194
+ aio_context_set_aio_params(iothread->ctx,
195
+ iothread->aio_max_batch,
196
+ errp);
197
+ }
198
+}
199
+
200
static void iothread_class_init(ObjectClass *klass, void *class_data)
201
{
202
UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass);
203
@@ -XXX,XX +XXX,XX @@ static void iothread_class_init(ObjectClass *klass, void *class_data)
204
iothread_get_poll_param,
205
iothread_set_poll_param,
206
NULL, &poll_shrink_info);
207
+ object_class_property_add(klass, "aio-max-batch", "int",
208
+ iothread_get_aio_param,
209
+ iothread_set_aio_param,
210
+ NULL, &aio_max_batch_info);
211
}
212
213
static const TypeInfo iothread_info = {
214
@@ -XXX,XX +XXX,XX @@ static int query_one_iothread(Object *object, void *opaque)
215
info->poll_max_ns = iothread->poll_max_ns;
216
info->poll_grow = iothread->poll_grow;
217
info->poll_shrink = iothread->poll_shrink;
218
+ info->aio_max_batch = iothread->aio_max_batch;
219
220
QAPI_LIST_APPEND(*tail, info);
221
return 0;
222
diff --git a/monitor/hmp-cmds.c b/monitor/hmp-cmds.c
223
index XXXXXXX..XXXXXXX 100644
224
--- a/monitor/hmp-cmds.c
225
+++ b/monitor/hmp-cmds.c
226
@@ -XXX,XX +XXX,XX @@ void hmp_info_iothreads(Monitor *mon, const QDict *qdict)
227
monitor_printf(mon, " poll-max-ns=%" PRId64 "\n", value->poll_max_ns);
228
monitor_printf(mon, " poll-grow=%" PRId64 "\n", value->poll_grow);
229
monitor_printf(mon, " poll-shrink=%" PRId64 "\n", value->poll_shrink);
230
+ monitor_printf(mon, " aio-max-batch=%" PRId64 "\n",
231
+ value->aio_max_batch);
232
}
233
234
qapi_free_IOThreadInfoList(info_list);
235
diff --git a/util/aio-posix.c b/util/aio-posix.c
236
index XXXXXXX..XXXXXXX 100644
237
--- a/util/aio-posix.c
238
+++ b/util/aio-posix.c
239
@@ -XXX,XX +XXX,XX @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
240
241
aio_notify(ctx);
242
}
243
+
244
+void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
245
+ Error **errp)
246
+{
247
+ /*
248
+ * No thread synchronization here, it doesn't matter if an incorrect value
249
+ * is used once.
250
+ */
251
+ ctx->aio_max_batch = max_batch;
252
+
253
+ aio_notify(ctx);
254
+}
255
diff --git a/util/aio-win32.c b/util/aio-win32.c
256
index XXXXXXX..XXXXXXX 100644
257
--- a/util/aio-win32.c
258
+++ b/util/aio-win32.c
259
@@ -XXX,XX +XXX,XX @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
260
error_setg(errp, "AioContext polling is not implemented on Windows");
261
}
262
}
263
+
264
+void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
265
+ Error **errp)
266
+{
267
+}
268
diff --git a/util/async.c b/util/async.c
269
index XXXXXXX..XXXXXXX 100644
270
--- a/util/async.c
271
+++ b/util/async.c
272
@@ -XXX,XX +XXX,XX @@ AioContext *aio_context_new(Error **errp)
273
ctx->poll_grow = 0;
274
ctx->poll_shrink = 0;
275
276
+ ctx->aio_max_batch = 0;
277
+
278
return ctx;
279
fail:
280
g_source_destroy(&ctx->source);
281
diff --git a/qemu-options.hx b/qemu-options.hx
282
index XXXXXXX..XXXXXXX 100644
283
--- a/qemu-options.hx
284
+++ b/qemu-options.hx
285
@@ -XXX,XX +XXX,XX @@ SRST
286
287
CN=laptop.example.com,O=Example Home,L=London,ST=London,C=GB
288
289
- ``-object iothread,id=id,poll-max-ns=poll-max-ns,poll-grow=poll-grow,poll-shrink=poll-shrink``
290
+ ``-object iothread,id=id,poll-max-ns=poll-max-ns,poll-grow=poll-grow,poll-shrink=poll-shrink,aio-max-batch=aio-max-batch``
291
Creates a dedicated event loop thread that devices can be
292
assigned to. This is known as an IOThread. By default device
293
emulation happens in vCPU threads or the main event loop thread.
294
@@ -XXX,XX +XXX,XX @@ SRST
295
the polling time when the algorithm detects it is spending too
296
long polling without encountering events.
297
298
- The polling parameters can be modified at run-time using the
299
+ The ``aio-max-batch`` parameter is the maximum number of requests
300
+ in a batch for the AIO engine, 0 means that the engine will use
301
+ its default.
302
+
303
+ The IOThread parameters can be modified at run-time using the
304
``qom-set`` command (where ``iothread1`` is the IOThread's
305
``id``):
306
307
--
308
2.31.1
309
diff view generated by jsdifflib
1
From: Peter Maydell <peter.maydell@linaro.org>
1
From: Stefano Garzarella <sgarzare@redhat.com>
2
2
3
In qemu_gluster_parse_json(), the call to qdict_array_entries()
3
When there are multiple queues attached to the same AIO context,
4
could return a negative error code, which we were ignoring
4
some requests may experience high latency, since in the worst case
5
because we assigned the result to an unsigned variable.
5
the AIO engine queue is only flushed when it is full (MAX_EVENTS) or
6
Fix this by using the 'int' type instead, which matches the
6
there are no more queues plugged.
7
return type of qdict_array_entries() and also the type
8
we use for the loop enumeration variable 'i'.
9
7
10
(Spotted by Coverity, CID 1360960.)
8
Commit 2558cb8dd4 ("linux-aio: increasing MAX_EVENTS to a larger
9
hardcoded value") changed MAX_EVENTS from 128 to 1024, to increase
10
the number of in-flight requests. But this change also increased
11
the potential maximum batch to 1024 elements.
11
12
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
When there is a single queue attached to the AIO context, the issue
13
Reviewed-by: Eric Blake <eblake@redhat.com>
14
is mitigated from laio_io_unplug() that will flush the queue every
14
Reviewed-by: Jeff Cody <jcody@redhat.com>
15
time is invoked since there can't be others queue plugged.
15
Message-id: 1496682098-1540-1-git-send-email-peter.maydell@linaro.org
16
16
Signed-off-by: Jeff Cody <jcody@redhat.com>
17
Let's use the new `aio-max-batch` IOThread parameter to mitigate
18
this issue, limiting the number of requests in a batch.
19
20
We also define a default value (32): this value is obtained running
21
some benchmarks and it represents a good tradeoff between the latency
22
increase while a request is queued and the cost of the io_submit(2)
23
system call.
24
25
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
26
Message-id: 20210721094211.69853-4-sgarzare@redhat.com
27
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
17
---
28
---
18
block/gluster.c | 3 +--
29
block/linux-aio.c | 9 ++++++++-
19
1 file changed, 1 insertion(+), 2 deletions(-)
30
1 file changed, 8 insertions(+), 1 deletion(-)
20
31
21
diff --git a/block/gluster.c b/block/gluster.c
32
diff --git a/block/linux-aio.c b/block/linux-aio.c
22
index XXXXXXX..XXXXXXX 100644
33
index XXXXXXX..XXXXXXX 100644
23
--- a/block/gluster.c
34
--- a/block/linux-aio.c
24
+++ b/block/gluster.c
35
+++ b/block/linux-aio.c
25
@@ -XXX,XX +XXX,XX @@ static int qemu_gluster_parse_json(BlockdevOptionsGluster *gconf,
36
@@ -XXX,XX +XXX,XX @@
26
Error *local_err = NULL;
37
*/
27
char *str = NULL;
38
#define MAX_EVENTS 1024
28
const char *ptr;
39
29
- size_t num_servers;
40
+/* Maximum number of requests in a batch. (default value) */
30
- int i, type;
41
+#define DEFAULT_MAX_BATCH 32
31
+ int i, type, num_servers;
42
+
32
43
struct qemu_laiocb {
33
/* create opts info from runtime_json_opts list */
44
Coroutine *co;
34
opts = qemu_opts_create(&runtime_json_opts, NULL, 0, &error_abort);
45
LinuxAioState *ctx;
46
@@ -XXX,XX +XXX,XX @@ static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset,
47
LinuxAioState *s = laiocb->ctx;
48
struct iocb *iocbs = &laiocb->iocb;
49
QEMUIOVector *qiov = laiocb->qiov;
50
+ int64_t max_batch = s->aio_context->aio_max_batch ?: DEFAULT_MAX_BATCH;
51
+
52
+ /* limit the batch with the number of available events */
53
+ max_batch = MIN_NON_ZERO(MAX_EVENTS - s->io_q.in_flight, max_batch);
54
55
switch (type) {
56
case QEMU_AIO_WRITE:
57
@@ -XXX,XX +XXX,XX @@ static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset,
58
s->io_q.in_queue++;
59
if (!s->io_q.blocked &&
60
(!s->io_q.plugged ||
61
- s->io_q.in_flight + s->io_q.in_queue >= MAX_EVENTS)) {
62
+ s->io_q.in_queue >= max_batch)) {
63
ioq_submit(s);
64
}
65
35
--
66
--
36
2.9.3
67
2.31.1
37
68
38
diff view generated by jsdifflib