1
The following changes since commit f58d9620aa4a514b1227074ff56eefd1334a6225:
1
The following changes since commit ca61fa4b803e5d0abaf6f1ceb690f23bb78a4def:
2
2
3
Merge remote-tracking branch 'remotes/rth/tags/pull-dt-20180326' into staging (2018-03-27 10:27:34 +0100)
3
Merge remote-tracking branch 'remotes/quic/tags/pull-hex-20211006' into staging (2021-10-06 12:11:14 -0700)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
git://github.com/stefanha/qemu.git tags/block-pull-request
7
https://gitlab.com/stefanha/qemu.git tags/block-pull-request
8
8
9
for you to fetch changes up to f5a53faad4bfbf1b86012a13055d2a1a774a42b6:
9
for you to fetch changes up to 1cc7eada97914f090125e588497986f6f7900514:
10
10
11
MAINTAINERS: add include/block/aio-wait.h (2018-03-27 13:05:48 +0100)
11
iothread: use IOThreadParamInfo in iothread_[set|get]_param() (2021-10-07 15:29:50 +0100)
12
13
----------------------------------------------------------------
14
Pull request
12
15
13
----------------------------------------------------------------
16
----------------------------------------------------------------
14
17
15
----------------------------------------------------------------
18
Stefano Garzarella (2):
19
iothread: rename PollParamInfo to IOThreadParamInfo
20
iothread: use IOThreadParamInfo in iothread_[set|get]_param()
16
21
17
Stefan Hajnoczi (4):
22
iothread.c | 28 +++++++++++++++-------------
18
queue: add QSIMPLEQ_PREPEND()
23
1 file changed, 15 insertions(+), 13 deletions(-)
19
coroutine: avoid co_queue_wakeup recursion
20
coroutine: add test-aio coroutine queue chaining test case
21
MAINTAINERS: add include/block/aio-wait.h
22
23
MAINTAINERS | 1 +
24
include/qemu/coroutine_int.h | 1 -
25
include/qemu/queue.h | 8 ++++
26
block/io.c | 3 +-
27
tests/test-aio.c | 65 ++++++++++++++++++++-----
28
util/qemu-coroutine-lock.c | 34 -------------
29
util/qemu-coroutine.c | 110 +++++++++++++++++++++++--------------------
30
7 files changed, 121 insertions(+), 101 deletions(-)
31
24
32
--
25
--
33
2.14.3
26
2.31.1
34
27
35
28
29
diff view generated by jsdifflib
Deleted patch
1
QSIMPLEQ_CONCAT(a, b) joins a = a + b. The new QSIMPLEQ_PREPEND(a, b)
2
API joins a = b + a.
3
1
4
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
5
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
6
Message-id: 20180322152834.12656-2-stefanha@redhat.com
7
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
8
---
9
include/qemu/queue.h | 8 ++++++++
10
1 file changed, 8 insertions(+)
11
12
diff --git a/include/qemu/queue.h b/include/qemu/queue.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/include/qemu/queue.h
15
+++ b/include/qemu/queue.h
16
@@ -XXX,XX +XXX,XX @@ struct { \
17
} \
18
} while (/*CONSTCOND*/0)
19
20
+#define QSIMPLEQ_PREPEND(head1, head2) do { \
21
+ if (!QSIMPLEQ_EMPTY((head2))) { \
22
+ *(head2)->sqh_last = (head1)->sqh_first; \
23
+ (head1)->sqh_first = (head2)->sqh_first; \
24
+ QSIMPLEQ_INIT((head2)); \
25
+ } \
26
+} while (/*CONSTCOND*/0)
27
+
28
#define QSIMPLEQ_LAST(head, type, field) \
29
(QSIMPLEQ_EMPTY((head)) ? \
30
NULL : \
31
--
32
2.14.3
33
34
diff view generated by jsdifflib
1
The include/block/aio-wait.h header file was added by commit
1
From: Stefano Garzarella <sgarzare@redhat.com>
2
7719f3c968c59e1bcda7e177679dc765b59e578f ("block: extract
3
AIO_WAIT_WHILE() from BlockDriverState") without updating MAINTAINERS.
4
2
5
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
3
Commit 1793ad0247 ("iothread: add aio-max-batch parameter") added
6
Reviewed-by: Eric Blake <eblake@redhat.com>
4
a new parameter (aio-max-batch) to IOThread and used PollParamInfo
7
Message-id: 20180312132204.23683-1-stefanha@redhat.com
5
structure to handle it.
6
7
Since it is not a parameter of the polling mechanism, we rename the
8
structure to a more generic IOThreadParamInfo.
9
10
Suggested-by: Kevin Wolf <kwolf@redhat.com>
11
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
12
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
13
Message-id: 20210727145936.147032-2-sgarzare@redhat.com
8
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
14
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
9
---
15
---
10
MAINTAINERS | 1 +
16
iothread.c | 14 +++++++-------
11
1 file changed, 1 insertion(+)
17
1 file changed, 7 insertions(+), 7 deletions(-)
12
18
13
diff --git a/MAINTAINERS b/MAINTAINERS
19
diff --git a/iothread.c b/iothread.c
14
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
15
--- a/MAINTAINERS
21
--- a/iothread.c
16
+++ b/MAINTAINERS
22
+++ b/iothread.c
17
@@ -XXX,XX +XXX,XX @@ F: util/aio-*.c
23
@@ -XXX,XX +XXX,XX @@ static void iothread_complete(UserCreatable *obj, Error **errp)
18
F: block/io.c
24
typedef struct {
19
F: migration/block*
25
const char *name;
20
F: include/block/aio.h
26
ptrdiff_t offset; /* field's byte offset in IOThread struct */
21
+F: include/block/aio-wait.h
27
-} PollParamInfo;
22
F: scripts/qemugdb/aio.py
28
+} IOThreadParamInfo;
23
T: git git://github.com/stefanha/qemu.git block
29
30
-static PollParamInfo poll_max_ns_info = {
31
+static IOThreadParamInfo poll_max_ns_info = {
32
"poll-max-ns", offsetof(IOThread, poll_max_ns),
33
};
34
-static PollParamInfo poll_grow_info = {
35
+static IOThreadParamInfo poll_grow_info = {
36
"poll-grow", offsetof(IOThread, poll_grow),
37
};
38
-static PollParamInfo poll_shrink_info = {
39
+static IOThreadParamInfo poll_shrink_info = {
40
"poll-shrink", offsetof(IOThread, poll_shrink),
41
};
42
-static PollParamInfo aio_max_batch_info = {
43
+static IOThreadParamInfo aio_max_batch_info = {
44
"aio-max-batch", offsetof(IOThread, aio_max_batch),
45
};
46
47
@@ -XXX,XX +XXX,XX @@ static void iothread_get_param(Object *obj, Visitor *v,
48
const char *name, void *opaque, Error **errp)
49
{
50
IOThread *iothread = IOTHREAD(obj);
51
- PollParamInfo *info = opaque;
52
+ IOThreadParamInfo *info = opaque;
53
int64_t *field = (void *)iothread + info->offset;
54
55
visit_type_int64(v, name, field, errp);
56
@@ -XXX,XX +XXX,XX @@ static bool iothread_set_param(Object *obj, Visitor *v,
57
const char *name, void *opaque, Error **errp)
58
{
59
IOThread *iothread = IOTHREAD(obj);
60
- PollParamInfo *info = opaque;
61
+ IOThreadParamInfo *info = opaque;
62
int64_t *field = (void *)iothread + info->offset;
63
int64_t value;
24
64
25
--
65
--
26
2.14.3
66
2.31.1
27
67
28
68
diff view generated by jsdifflib
1
qemu_aio_coroutine_enter() is (indirectly) called recursively when
1
From: Stefano Garzarella <sgarzare@redhat.com>
2
processing co_queue_wakeup. This can lead to stack exhaustion.
3
2
4
This patch rewrites co_queue_wakeup in an iterative fashion (instead of
3
Commit 0445409d74 ("iothread: generalize
5
recursive) with bounded memory usage to prevent stack exhaustion.
4
iothread_set_param/iothread_get_param") moved common code to set and
5
get IOThread parameters in two new functions.
6
6
7
qemu_co_queue_run_restart() is inlined into qemu_aio_coroutine_enter()
7
These functions are called inside callbacks, so we don't need to use an
8
and the qemu_coroutine_enter() call is turned into a loop to avoid
8
opaque pointer. Let's replace `void *opaque` parameter with
9
recursion.
9
`IOThreadParamInfo *info`.
10
10
11
There is one change that is worth mentioning: Previously, when
11
Suggested-by: Kevin Wolf <kwolf@redhat.com>
12
coroutine A queued coroutine B, qemu_co_queue_run_restart() entered
12
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
13
coroutine B from coroutine A. If A was terminating then it would still
13
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
14
stay alive until B yielded. After this patch B is entered by A's parent
14
Message-id: 20210727145936.147032-3-sgarzare@redhat.com
15
so that a A can be deleted immediately if it is terminating.
16
17
It is safe to make this change since B could never interact with A if it
18
was terminating anyway.
19
20
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
21
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
22
Message-id: 20180322152834.12656-3-stefanha@redhat.com
23
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
15
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
24
---
16
---
25
include/qemu/coroutine_int.h | 1 -
17
iothread.c | 18 ++++++++++--------
26
block/io.c | 3 +-
18
1 file changed, 10 insertions(+), 8 deletions(-)
27
util/qemu-coroutine-lock.c | 34 -------------
28
util/qemu-coroutine.c | 110 +++++++++++++++++++++++--------------------
29
4 files changed, 60 insertions(+), 88 deletions(-)
30
19
31
diff --git a/include/qemu/coroutine_int.h b/include/qemu/coroutine_int.h
20
diff --git a/iothread.c b/iothread.c
32
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
33
--- a/include/qemu/coroutine_int.h
22
--- a/iothread.c
34
+++ b/include/qemu/coroutine_int.h
23
+++ b/iothread.c
35
@@ -XXX,XX +XXX,XX @@ Coroutine *qemu_coroutine_new(void);
24
@@ -XXX,XX +XXX,XX @@ static IOThreadParamInfo aio_max_batch_info = {
36
void qemu_coroutine_delete(Coroutine *co);
25
};
37
CoroutineAction qemu_coroutine_switch(Coroutine *from, Coroutine *to,
26
38
CoroutineAction action);
27
static void iothread_get_param(Object *obj, Visitor *v,
39
-void coroutine_fn qemu_co_queue_run_restart(Coroutine *co);
28
- const char *name, void *opaque, Error **errp)
40
29
+ const char *name, IOThreadParamInfo *info, Error **errp)
41
#endif
30
{
42
diff --git a/block/io.c b/block/io.c
31
IOThread *iothread = IOTHREAD(obj);
43
index XXXXXXX..XXXXXXX 100644
32
- IOThreadParamInfo *info = opaque;
44
--- a/block/io.c
33
int64_t *field = (void *)iothread + info->offset;
45
+++ b/block/io.c
34
46
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
35
visit_type_int64(v, name, field, errp);
47
BdrvCoDrainData data;
36
}
48
37
49
/* Calling bdrv_drain() from a BH ensures the current coroutine yields and
38
static bool iothread_set_param(Object *obj, Visitor *v,
50
- * other coroutines run if they were queued from
39
- const char *name, void *opaque, Error **errp)
51
- * qemu_co_queue_run_restart(). */
40
+ const char *name, IOThreadParamInfo *info, Error **errp)
52
+ * other coroutines run if they were queued by aio_co_enter(). */
41
{
53
42
IOThread *iothread = IOTHREAD(obj);
54
assert(qemu_in_coroutine());
43
- IOThreadParamInfo *info = opaque;
55
data = (BdrvCoDrainData) {
44
int64_t *field = (void *)iothread + info->offset;
56
diff --git a/util/qemu-coroutine-lock.c b/util/qemu-coroutine-lock.c
45
int64_t value;
57
index XXXXXXX..XXXXXXX 100644
46
58
--- a/util/qemu-coroutine-lock.c
47
@@ -XXX,XX +XXX,XX @@ static bool iothread_set_param(Object *obj, Visitor *v,
59
+++ b/util/qemu-coroutine-lock.c
48
static void iothread_get_poll_param(Object *obj, Visitor *v,
60
@@ -XXX,XX +XXX,XX @@ void coroutine_fn qemu_co_queue_wait_impl(CoQueue *queue, QemuLockable *lock)
49
const char *name, void *opaque, Error **errp)
50
{
51
+ IOThreadParamInfo *info = opaque;
52
53
- iothread_get_param(obj, v, name, opaque, errp);
54
+ iothread_get_param(obj, v, name, info, errp);
55
}
56
57
static void iothread_set_poll_param(Object *obj, Visitor *v,
58
const char *name, void *opaque, Error **errp)
59
{
60
IOThread *iothread = IOTHREAD(obj);
61
+ IOThreadParamInfo *info = opaque;
62
63
- if (!iothread_set_param(obj, v, name, opaque, errp)) {
64
+ if (!iothread_set_param(obj, v, name, info, errp)) {
65
return;
61
}
66
}
67
68
@@ -XXX,XX +XXX,XX @@ static void iothread_set_poll_param(Object *obj, Visitor *v,
69
static void iothread_get_aio_param(Object *obj, Visitor *v,
70
const char *name, void *opaque, Error **errp)
71
{
72
+ IOThreadParamInfo *info = opaque;
73
74
- iothread_get_param(obj, v, name, opaque, errp);
75
+ iothread_get_param(obj, v, name, info, errp);
62
}
76
}
63
77
64
-/**
78
static void iothread_set_aio_param(Object *obj, Visitor *v,
65
- * qemu_co_queue_run_restart:
79
const char *name, void *opaque, Error **errp)
66
- *
67
- * Enter each coroutine that was previously marked for restart by
68
- * qemu_co_queue_next() or qemu_co_queue_restart_all(). This function is
69
- * invoked by the core coroutine code when the current coroutine yields or
70
- * terminates.
71
- */
72
-void qemu_co_queue_run_restart(Coroutine *co)
73
-{
74
- Coroutine *next;
75
- QSIMPLEQ_HEAD(, Coroutine) tmp_queue_wakeup =
76
- QSIMPLEQ_HEAD_INITIALIZER(tmp_queue_wakeup);
77
-
78
- trace_qemu_co_queue_run_restart(co);
79
-
80
- /* Because "co" has yielded, any coroutine that we wakeup can resume it.
81
- * If this happens and "co" terminates, co->co_queue_wakeup becomes
82
- * invalid memory. Therefore, use a temporary queue and do not touch
83
- * the "co" coroutine as soon as you enter another one.
84
- *
85
- * In its turn resumed "co" can populate "co_queue_wakeup" queue with
86
- * new coroutines to be woken up. The caller, who has resumed "co",
87
- * will be responsible for traversing the same queue, which may cause
88
- * a different wakeup order but not any missing wakeups.
89
- */
90
- QSIMPLEQ_CONCAT(&tmp_queue_wakeup, &co->co_queue_wakeup);
91
-
92
- while ((next = QSIMPLEQ_FIRST(&tmp_queue_wakeup))) {
93
- QSIMPLEQ_REMOVE_HEAD(&tmp_queue_wakeup, co_queue_next);
94
- qemu_coroutine_enter(next);
95
- }
96
-}
97
-
98
static bool qemu_co_queue_do_restart(CoQueue *queue, bool single)
99
{
80
{
100
Coroutine *next;
81
IOThread *iothread = IOTHREAD(obj);
101
diff --git a/util/qemu-coroutine.c b/util/qemu-coroutine.c
82
+ IOThreadParamInfo *info = opaque;
102
index XXXXXXX..XXXXXXX 100644
83
103
--- a/util/qemu-coroutine.c
84
- if (!iothread_set_param(obj, v, name, opaque, errp)) {
104
+++ b/util/qemu-coroutine.c
85
+ if (!iothread_set_param(obj, v, name, info, errp)) {
105
@@ -XXX,XX +XXX,XX @@ static void coroutine_delete(Coroutine *co)
86
return;
106
107
void qemu_aio_coroutine_enter(AioContext *ctx, Coroutine *co)
108
{
109
- Coroutine *self = qemu_coroutine_self();
110
- CoroutineAction ret;
111
-
112
- /* Cannot rely on the read barrier for co in aio_co_wake(), as there are
113
- * callers outside of aio_co_wake() */
114
- const char *scheduled = atomic_mb_read(&co->scheduled);
115
-
116
- trace_qemu_aio_coroutine_enter(ctx, self, co, co->entry_arg);
117
-
118
- /* if the Coroutine has already been scheduled, entering it again will
119
- * cause us to enter it twice, potentially even after the coroutine has
120
- * been deleted */
121
- if (scheduled) {
122
- fprintf(stderr,
123
- "%s: Co-routine was already scheduled in '%s'\n",
124
- __func__, scheduled);
125
- abort();
126
- }
127
-
128
- if (co->caller) {
129
- fprintf(stderr, "Co-routine re-entered recursively\n");
130
- abort();
131
- }
132
-
133
- co->caller = self;
134
- co->ctx = ctx;
135
-
136
- /* Store co->ctx before anything that stores co. Matches
137
- * barrier in aio_co_wake and qemu_co_mutex_wake.
138
- */
139
- smp_wmb();
140
-
141
- ret = qemu_coroutine_switch(self, co, COROUTINE_ENTER);
142
-
143
- qemu_co_queue_run_restart(co);
144
-
145
- /* Beware, if ret == COROUTINE_YIELD and qemu_co_queue_run_restart()
146
- * has started any other coroutine, "co" might have been reentered
147
- * and even freed by now! So be careful and do not touch it.
148
- */
149
-
150
- switch (ret) {
151
- case COROUTINE_YIELD:
152
- return;
153
- case COROUTINE_TERMINATE:
154
- assert(!co->locks_held);
155
- trace_qemu_coroutine_terminate(co);
156
- coroutine_delete(co);
157
- return;
158
- default:
159
- abort();
160
+ QSIMPLEQ_HEAD(, Coroutine) pending = QSIMPLEQ_HEAD_INITIALIZER(pending);
161
+ Coroutine *from = qemu_coroutine_self();
162
+
163
+ QSIMPLEQ_INSERT_TAIL(&pending, co, co_queue_next);
164
+
165
+ /* Run co and any queued coroutines */
166
+ while (!QSIMPLEQ_EMPTY(&pending)) {
167
+ Coroutine *to = QSIMPLEQ_FIRST(&pending);
168
+ CoroutineAction ret;
169
+
170
+ /* Cannot rely on the read barrier for to in aio_co_wake(), as there are
171
+ * callers outside of aio_co_wake() */
172
+ const char *scheduled = atomic_mb_read(&to->scheduled);
173
+
174
+ QSIMPLEQ_REMOVE_HEAD(&pending, co_queue_next);
175
+
176
+ trace_qemu_aio_coroutine_enter(ctx, from, to, to->entry_arg);
177
+
178
+ /* if the Coroutine has already been scheduled, entering it again will
179
+ * cause us to enter it twice, potentially even after the coroutine has
180
+ * been deleted */
181
+ if (scheduled) {
182
+ fprintf(stderr,
183
+ "%s: Co-routine was already scheduled in '%s'\n",
184
+ __func__, scheduled);
185
+ abort();
186
+ }
187
+
188
+ if (to->caller) {
189
+ fprintf(stderr, "Co-routine re-entered recursively\n");
190
+ abort();
191
+ }
192
+
193
+ to->caller = from;
194
+ to->ctx = ctx;
195
+
196
+ /* Store to->ctx before anything that stores to. Matches
197
+ * barrier in aio_co_wake and qemu_co_mutex_wake.
198
+ */
199
+ smp_wmb();
200
+
201
+ ret = qemu_coroutine_switch(from, to, COROUTINE_ENTER);
202
+
203
+ /* Queued coroutines are run depth-first; previously pending coroutines
204
+ * run after those queued more recently.
205
+ */
206
+ QSIMPLEQ_PREPEND(&pending, &to->co_queue_wakeup);
207
+
208
+ switch (ret) {
209
+ case COROUTINE_YIELD:
210
+ break;
211
+ case COROUTINE_TERMINATE:
212
+ assert(!to->locks_held);
213
+ trace_qemu_coroutine_terminate(to);
214
+ coroutine_delete(to);
215
+ break;
216
+ default:
217
+ abort();
218
+ }
219
}
87
}
220
}
221
88
222
--
89
--
223
2.14.3
90
2.31.1
224
91
225
92
diff view generated by jsdifflib
Deleted patch
1
Check that two coroutines can queue each other repeatedly without
2
hitting stack exhaustion.
3
1
4
Switch to qemu_init_main_loop() in main() because coroutines use
5
qemu_get_aio_context() - they don't know about test-aio's ctx variable.
6
7
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
8
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
9
Message-id: 20180322152834.12656-4-stefanha@redhat.com
10
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
11
---
12
tests/test-aio.c | 65 ++++++++++++++++++++++++++++++++++++++++++++------------
13
1 file changed, 52 insertions(+), 13 deletions(-)
14
15
diff --git a/tests/test-aio.c b/tests/test-aio.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/tests/test-aio.c
18
+++ b/tests/test-aio.c
19
@@ -XXX,XX +XXX,XX @@
20
#include "qemu/timer.h"
21
#include "qemu/sockets.h"
22
#include "qemu/error-report.h"
23
+#include "qemu/coroutine.h"
24
+#include "qemu/main-loop.h"
25
26
static AioContext *ctx;
27
28
@@ -XXX,XX +XXX,XX @@ static void test_source_timer_schedule(void)
29
timer_del(&data.timer);
30
}
31
32
+/*
33
+ * Check that aio_co_enter() can chain many times
34
+ *
35
+ * Two coroutines should be able to invoke each other via aio_co_enter() many
36
+ * times without hitting a limit like stack exhaustion. In other words, the
37
+ * calls should be chained instead of nested.
38
+ */
39
+
40
+typedef struct {
41
+ Coroutine *other;
42
+ unsigned i;
43
+ unsigned max;
44
+} ChainData;
45
+
46
+static void coroutine_fn chain(void *opaque)
47
+{
48
+ ChainData *data = opaque;
49
+
50
+ for (data->i = 0; data->i < data->max; data->i++) {
51
+ /* Queue up the other coroutine... */
52
+ aio_co_enter(ctx, data->other);
53
+
54
+ /* ...and give control to it */
55
+ qemu_coroutine_yield();
56
+ }
57
+}
58
+
59
+static void test_queue_chaining(void)
60
+{
61
+ /* This number of iterations hit stack exhaustion in the past: */
62
+ ChainData data_a = { .max = 25000 };
63
+ ChainData data_b = { .max = 25000 };
64
+
65
+ data_b.other = qemu_coroutine_create(chain, &data_a);
66
+ data_a.other = qemu_coroutine_create(chain, &data_b);
67
+
68
+ qemu_coroutine_enter(data_b.other);
69
+
70
+ g_assert_cmpint(data_a.i, ==, data_a.max);
71
+ g_assert_cmpint(data_b.i, ==, data_b.max - 1);
72
+
73
+ /* Allow the second coroutine to terminate */
74
+ qemu_coroutine_enter(data_a.other);
75
+
76
+ g_assert_cmpint(data_b.i, ==, data_b.max);
77
+}
78
79
/* End of tests. */
80
81
int main(int argc, char **argv)
82
{
83
- Error *local_error = NULL;
84
- GSource *src;
85
-
86
- init_clocks(NULL);
87
-
88
- ctx = aio_context_new(&local_error);
89
- if (!ctx) {
90
- error_reportf_err(local_error, "Failed to create AIO Context: ");
91
- exit(1);
92
- }
93
- src = aio_get_g_source(ctx);
94
- g_source_attach(src, NULL);
95
- g_source_unref(src);
96
+ qemu_init_main_loop(&error_fatal);
97
+ ctx = qemu_get_aio_context();
98
99
while (g_main_context_iteration(NULL, false));
100
101
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
102
g_test_add_func("/aio/external-client", test_aio_external_client);
103
g_test_add_func("/aio/timer/schedule", test_timer_schedule);
104
105
+ g_test_add_func("/aio/coroutine/queue-chaining", test_queue_chaining);
106
+
107
g_test_add_func("/aio-gsource/flush", test_source_flush);
108
g_test_add_func("/aio-gsource/bh/schedule", test_source_bh_schedule);
109
g_test_add_func("/aio-gsource/bh/schedule10", test_source_bh_schedule10);
110
--
111
2.14.3
112
113
diff view generated by jsdifflib