1
The following changes since commit 9cf289af47bcfae5c75de37d8e5d6fd23705322c:
1
The following changes since commit f5fe7c17ac4e309e47e78f0f9761aebc8d2f2c81:
2
2
3
Merge tag 'qga-pull-request' of gitlab.com:marcandre.lureau/qemu into staging (2022-05-04 03:42:49 -0700)
3
Merge tag 'pull-tcg-20230823-2' of https://gitlab.com/rth7680/qemu into staging (2023-08-28 16:07:04 -0400)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://gitlab.com/stefanha/qemu.git tags/block-pull-request
7
https://gitlab.com/hreitz/qemu.git tags/pull-block-2023-09-01
8
8
9
for you to fetch changes up to bef2e050d6a7feb865854c65570c496ac5a8cf53:
9
for you to fetch changes up to 380448464dd89291cf7fd7434be6c225482a334d:
10
10
11
util/event-loop-base: Introduce options to set the thread pool size (2022-05-04 17:02:19 +0100)
11
tests/file-io-error: New test (2023-08-29 13:01:24 +0200)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Pull request
14
Block patches
15
15
16
Add new thread-pool-min/thread-pool-max parameters to control the thread pool
16
- Fix for file-posix's zoning code crashing on I/O errors
17
used for async I/O.
17
- Throttling refactoring
18
18
19
----------------------------------------------------------------
19
----------------------------------------------------------------
20
Hanna Czenczek (5):
21
file-posix: Clear bs->bl.zoned on error
22
file-posix: Check bs->bl.zoned for zone info
23
file-posix: Fix zone update in I/O error path
24
file-posix: Simplify raw_co_prw's 'out' zone code
25
tests/file-io-error: New test
20
26
21
Nicolas Saenz Julienne (3):
27
Zhenwei Pi (9):
22
Introduce event-loop-base abstract class
28
throttle: introduce enum ThrottleDirection
23
util/main-loop: Introduce the main loop into QOM
29
test-throttle: use enum ThrottleDirection
24
util/event-loop-base: Introduce options to set the thread pool size
30
throttle: support read-only and write-only
31
test-throttle: test read only and write only
32
cryptodev: use NULL throttle timer cb for read direction
33
throttle: use enum ThrottleDirection instead of bool is_write
34
throttle: use THROTTLE_MAX/ARRAY_SIZE for hard code
35
fsdev: Use ThrottleDirection instread of bool is_write
36
block/throttle-groups: Use ThrottleDirection instread of bool is_write
25
37
26
qapi/qom.json | 43 ++++++++--
38
fsdev/qemu-fsdev-throttle.h | 4 +-
27
meson.build | 26 +++---
39
include/block/throttle-groups.h | 6 +-
28
include/block/aio.h | 10 +++
40
include/qemu/throttle.h | 16 +-
29
include/block/thread-pool.h | 3 +
41
backends/cryptodev.c | 12 +-
30
include/qemu/main-loop.h | 10 +++
42
block/block-backend.c | 4 +-
31
include/sysemu/event-loop-base.h | 41 +++++++++
43
block/file-posix.c | 42 +++---
32
include/sysemu/iothread.h | 6 +-
44
block/throttle-groups.c | 163 +++++++++++----------
33
event-loop-base.c | 140 +++++++++++++++++++++++++++++++
45
block/throttle.c | 8 +-
34
iothread.c | 68 +++++----------
46
fsdev/qemu-fsdev-throttle.c | 18 ++-
35
util/aio-posix.c | 1 +
47
hw/9pfs/cofile.c | 4 +-
36
util/async.c | 20 +++++
48
tests/unit/test-throttle.c | 76 +++++++++-
37
util/main-loop.c | 65 ++++++++++++++
49
util/throttle.c | 84 +++++++----
38
util/thread-pool.c | 55 +++++++++++-
50
tests/qemu-iotests/tests/file-io-error | 119 +++++++++++++++
39
13 files changed, 419 insertions(+), 69 deletions(-)
51
tests/qemu-iotests/tests/file-io-error.out | 33 +++++
40
create mode 100644 include/sysemu/event-loop-base.h
52
14 files changed, 418 insertions(+), 171 deletions(-)
41
create mode 100644 event-loop-base.c
53
create mode 100755 tests/qemu-iotests/tests/file-io-error
54
create mode 100644 tests/qemu-iotests/tests/file-io-error.out
42
55
43
--
56
--
44
2.35.1
57
2.41.0
diff view generated by jsdifflib
New patch
1
From: zhenwei pi <pizhenwei@bytedance.com>
1
2
3
Use enum ThrottleDirection instead of number index.
4
5
Reviewed-by: Alberto Garcia <berto@igalia.com>
6
Reviewed-by: Hanna Czenczek <hreitz@redhat.com>
7
Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
8
Message-Id: <20230728022006.1098509-2-pizhenwei@bytedance.com>
9
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
10
---
11
include/qemu/throttle.h | 11 ++++++++---
12
util/throttle.c | 16 +++++++++-------
13
2 files changed, 17 insertions(+), 10 deletions(-)
14
15
diff --git a/include/qemu/throttle.h b/include/qemu/throttle.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/include/qemu/throttle.h
18
+++ b/include/qemu/throttle.h
19
@@ -XXX,XX +XXX,XX @@ typedef struct ThrottleState {
20
int64_t previous_leak; /* timestamp of the last leak done */
21
} ThrottleState;
22
23
+typedef enum {
24
+ THROTTLE_READ = 0,
25
+ THROTTLE_WRITE,
26
+ THROTTLE_MAX
27
+} ThrottleDirection;
28
+
29
typedef struct ThrottleTimers {
30
- QEMUTimer *timers[2]; /* timers used to do the throttling */
31
+ QEMUTimer *timers[THROTTLE_MAX]; /* timers used to do the throttling */
32
QEMUClockType clock_type; /* the clock used */
33
34
/* Callbacks */
35
- QEMUTimerCB *read_timer_cb;
36
- QEMUTimerCB *write_timer_cb;
37
+ QEMUTimerCB *timer_cb[THROTTLE_MAX];
38
void *timer_opaque;
39
} ThrottleTimers;
40
41
diff --git a/util/throttle.c b/util/throttle.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/util/throttle.c
44
+++ b/util/throttle.c
45
@@ -XXX,XX +XXX,XX @@ static bool throttle_compute_timer(ThrottleState *ts,
46
void throttle_timers_attach_aio_context(ThrottleTimers *tt,
47
AioContext *new_context)
48
{
49
- tt->timers[0] = aio_timer_new(new_context, tt->clock_type, SCALE_NS,
50
- tt->read_timer_cb, tt->timer_opaque);
51
- tt->timers[1] = aio_timer_new(new_context, tt->clock_type, SCALE_NS,
52
- tt->write_timer_cb, tt->timer_opaque);
53
+ tt->timers[THROTTLE_READ] =
54
+ aio_timer_new(new_context, tt->clock_type, SCALE_NS,
55
+ tt->timer_cb[THROTTLE_READ], tt->timer_opaque);
56
+ tt->timers[THROTTLE_WRITE] =
57
+ aio_timer_new(new_context, tt->clock_type, SCALE_NS,
58
+ tt->timer_cb[THROTTLE_WRITE], tt->timer_opaque);
59
}
60
61
/*
62
@@ -XXX,XX +XXX,XX @@ void throttle_timers_init(ThrottleTimers *tt,
63
memset(tt, 0, sizeof(ThrottleTimers));
64
65
tt->clock_type = clock_type;
66
- tt->read_timer_cb = read_timer_cb;
67
- tt->write_timer_cb = write_timer_cb;
68
+ tt->timer_cb[THROTTLE_READ] = read_timer_cb;
69
+ tt->timer_cb[THROTTLE_WRITE] = write_timer_cb;
70
tt->timer_opaque = timer_opaque;
71
throttle_timers_attach_aio_context(tt, aio_context);
72
}
73
@@ -XXX,XX +XXX,XX @@ void throttle_timers_detach_aio_context(ThrottleTimers *tt)
74
{
75
int i;
76
77
- for (i = 0; i < 2; i++) {
78
+ for (i = 0; i < THROTTLE_MAX; i++) {
79
throttle_timer_destroy(&tt->timers[i]);
80
}
81
}
82
--
83
2.41.0
diff view generated by jsdifflib
New patch
1
From: zhenwei pi <pizhenwei@bytedance.com>
1
2
3
Use enum ThrottleDirection instead in the throttle test codes.
4
5
Reviewed-by: Alberto Garcia <berto@igalia.com>
6
Reviewed-by: Hanna Czenczek <hreitz@redhat.com>
7
Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
8
Message-Id: <20230728022006.1098509-3-pizhenwei@bytedance.com>
9
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
10
---
11
tests/unit/test-throttle.c | 6 +++---
12
1 file changed, 3 insertions(+), 3 deletions(-)
13
14
diff --git a/tests/unit/test-throttle.c b/tests/unit/test-throttle.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/tests/unit/test-throttle.c
17
+++ b/tests/unit/test-throttle.c
18
@@ -XXX,XX +XXX,XX @@ static void test_init(void)
19
20
/* check initialized fields */
21
g_assert(tt->clock_type == QEMU_CLOCK_VIRTUAL);
22
- g_assert(tt->timers[0]);
23
- g_assert(tt->timers[1]);
24
+ g_assert(tt->timers[THROTTLE_READ]);
25
+ g_assert(tt->timers[THROTTLE_WRITE]);
26
27
/* check other fields where cleared */
28
g_assert(!ts.previous_leak);
29
@@ -XXX,XX +XXX,XX @@ static void test_destroy(void)
30
throttle_timers_init(tt, ctx, QEMU_CLOCK_VIRTUAL,
31
read_timer_cb, write_timer_cb, &ts);
32
throttle_timers_destroy(tt);
33
- for (i = 0; i < 2; i++) {
34
+ for (i = 0; i < THROTTLE_MAX; i++) {
35
g_assert(!tt->timers[i]);
36
}
37
}
38
--
39
2.41.0
diff view generated by jsdifflib
1
From: Nicolas Saenz Julienne <nsaenzju@redhat.com>
1
From: zhenwei pi <pizhenwei@bytedance.com>
2
2
3
The thread pool regulates itself: when idle, it kills threads until
3
Only one direction is necessary in several scenarios:
4
empty, when in demand, it creates new threads until full. This behaviour
4
- a read-only disk
5
doesn't play well with latency sensitive workloads where the price of
5
- operations on a device are considered as *write* only. For example,
6
creating a new thread is too high. For example, when paired with qemu's
6
encrypt/decrypt/sign/verify operations on a cryptodev use a single
7
'-mlock', or using safety features like SafeStack, creating a new thread
7
*write* timer(read timer callback is defined, but never invoked).
8
has been measured take multiple milliseconds.
9
8
10
In order to mitigate this let's introduce a new 'EventLoopBase'
9
Allow a single direction in throttle, this reduces memory, and uplayer
11
property to set the thread pool size. The threads will be created during
10
does not need a dummy callback any more.
12
the pool's initialization or upon updating the property's value, remain
13
available during its lifetime regardless of demand, and destroyed upon
14
freeing it. A properly characterized workload will then be able to
15
configure the pool to avoid any latency spikes.
16
11
17
Signed-off-by: Nicolas Saenz Julienne <nsaenzju@redhat.com>
12
Reviewed-by: Alberto Garcia <berto@igalia.com>
18
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
13
Reviewed-by: Hanna Czenczek <hreitz@redhat.com>
19
Acked-by: Markus Armbruster <armbru@redhat.com>
14
Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
20
Message-id: 20220425075723.20019-4-nsaenzju@redhat.com
15
Message-Id: <20230728022006.1098509-4-pizhenwei@bytedance.com>
21
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
16
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
22
---
17
---
23
qapi/qom.json | 10 +++++-
18
util/throttle.c | 42 ++++++++++++++++++++++++++++--------------
24
include/block/aio.h | 10 ++++++
19
1 file changed, 28 insertions(+), 14 deletions(-)
25
include/block/thread-pool.h | 3 ++
26
include/sysemu/event-loop-base.h | 4 +++
27
event-loop-base.c | 23 +++++++++++++
28
iothread.c | 3 ++
29
util/aio-posix.c | 1 +
30
util/async.c | 20 ++++++++++++
31
util/main-loop.c | 9 ++++++
32
util/thread-pool.c | 55 +++++++++++++++++++++++++++++---
33
10 files changed, 133 insertions(+), 5 deletions(-)
34
20
35
diff --git a/qapi/qom.json b/qapi/qom.json
21
diff --git a/util/throttle.c b/util/throttle.c
36
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
37
--- a/qapi/qom.json
23
--- a/util/throttle.c
38
+++ b/qapi/qom.json
24
+++ b/util/throttle.c
39
@@ -XXX,XX +XXX,XX @@
25
@@ -XXX,XX +XXX,XX @@ static bool throttle_compute_timer(ThrottleState *ts,
40
# 0 means that the engine will use its default.
26
void throttle_timers_attach_aio_context(ThrottleTimers *tt,
41
# (default: 0)
27
AioContext *new_context)
42
#
28
{
43
+# @thread-pool-min: minimum number of threads reserved in the thread pool
29
- tt->timers[THROTTLE_READ] =
44
+# (default:0)
30
- aio_timer_new(new_context, tt->clock_type, SCALE_NS,
45
+#
31
- tt->timer_cb[THROTTLE_READ], tt->timer_opaque);
46
+# @thread-pool-max: maximum number of threads the thread pool can contain
32
- tt->timers[THROTTLE_WRITE] =
47
+# (default:64)
33
- aio_timer_new(new_context, tt->clock_type, SCALE_NS,
48
+#
34
- tt->timer_cb[THROTTLE_WRITE], tt->timer_opaque);
49
# Since: 7.1
35
+ ThrottleDirection dir;
50
##
51
{ 'struct': 'EventLoopBaseProperties',
52
- 'data': { '*aio-max-batch': 'int' } }
53
+ 'data': { '*aio-max-batch': 'int',
54
+ '*thread-pool-min': 'int',
55
+ '*thread-pool-max': 'int' } }
56
57
##
58
# @IothreadProperties:
59
diff --git a/include/block/aio.h b/include/block/aio.h
60
index XXXXXXX..XXXXXXX 100644
61
--- a/include/block/aio.h
62
+++ b/include/block/aio.h
63
@@ -XXX,XX +XXX,XX @@ struct AioContext {
64
QSLIST_HEAD(, Coroutine) scheduled_coroutines;
65
QEMUBH *co_schedule_bh;
66
67
+ int thread_pool_min;
68
+ int thread_pool_max;
69
/* Thread pool for performing work and receiving completion callbacks.
70
* Has its own locking.
71
*/
72
@@ -XXX,XX +XXX,XX @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
73
void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
74
Error **errp);
75
76
+/**
77
+ * aio_context_set_thread_pool_params:
78
+ * @ctx: the aio context
79
+ * @min: min number of threads to have readily available in the thread pool
80
+ * @min: max number of threads the thread pool can contain
81
+ */
82
+void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min,
83
+ int64_t max, Error **errp);
84
#endif
85
diff --git a/include/block/thread-pool.h b/include/block/thread-pool.h
86
index XXXXXXX..XXXXXXX 100644
87
--- a/include/block/thread-pool.h
88
+++ b/include/block/thread-pool.h
89
@@ -XXX,XX +XXX,XX @@
90
91
#include "block/block.h"
92
93
+#define THREAD_POOL_MAX_THREADS_DEFAULT 64
94
+
36
+
95
typedef int ThreadPoolFunc(void *opaque);
37
+ for (dir = THROTTLE_READ; dir < THROTTLE_MAX; dir++) {
96
38
+ if (tt->timer_cb[dir]) {
97
typedef struct ThreadPool ThreadPool;
39
+ tt->timers[dir] =
98
@@ -XXX,XX +XXX,XX @@ BlockAIOCB *thread_pool_submit_aio(ThreadPool *pool,
40
+ aio_timer_new(new_context, tt->clock_type, SCALE_NS,
99
int coroutine_fn thread_pool_submit_co(ThreadPool *pool,
41
+ tt->timer_cb[dir], tt->timer_opaque);
100
ThreadPoolFunc *func, void *arg);
42
+ }
101
void thread_pool_submit(ThreadPool *pool, ThreadPoolFunc *func, void *arg);
43
+ }
102
+void thread_pool_update_params(ThreadPool *pool, struct AioContext *ctx);
103
104
#endif
105
diff --git a/include/sysemu/event-loop-base.h b/include/sysemu/event-loop-base.h
106
index XXXXXXX..XXXXXXX 100644
107
--- a/include/sysemu/event-loop-base.h
108
+++ b/include/sysemu/event-loop-base.h
109
@@ -XXX,XX +XXX,XX @@ struct EventLoopBase {
110
111
/* AioContext AIO engine parameters */
112
int64_t aio_max_batch;
113
+
114
+ /* AioContext thread pool parameters */
115
+ int64_t thread_pool_min;
116
+ int64_t thread_pool_max;
117
};
118
#endif
119
diff --git a/event-loop-base.c b/event-loop-base.c
120
index XXXXXXX..XXXXXXX 100644
121
--- a/event-loop-base.c
122
+++ b/event-loop-base.c
123
@@ -XXX,XX +XXX,XX @@
124
#include "qemu/osdep.h"
125
#include "qom/object_interfaces.h"
126
#include "qapi/error.h"
127
+#include "block/thread-pool.h"
128
#include "sysemu/event-loop-base.h"
129
130
typedef struct {
131
@@ -XXX,XX +XXX,XX @@ typedef struct {
132
ptrdiff_t offset; /* field's byte offset in EventLoopBase struct */
133
} EventLoopBaseParamInfo;
134
135
+static void event_loop_base_instance_init(Object *obj)
136
+{
137
+ EventLoopBase *base = EVENT_LOOP_BASE(obj);
138
+
139
+ base->thread_pool_max = THREAD_POOL_MAX_THREADS_DEFAULT;
140
+}
141
+
142
static EventLoopBaseParamInfo aio_max_batch_info = {
143
"aio-max-batch", offsetof(EventLoopBase, aio_max_batch),
144
};
145
+static EventLoopBaseParamInfo thread_pool_min_info = {
146
+ "thread-pool-min", offsetof(EventLoopBase, thread_pool_min),
147
+};
148
+static EventLoopBaseParamInfo thread_pool_max_info = {
149
+ "thread-pool-max", offsetof(EventLoopBase, thread_pool_max),
150
+};
151
152
static void event_loop_base_get_param(Object *obj, Visitor *v,
153
const char *name, void *opaque, Error **errp)
154
@@ -XXX,XX +XXX,XX @@ static void event_loop_base_class_init(ObjectClass *klass, void *class_data)
155
event_loop_base_get_param,
156
event_loop_base_set_param,
157
NULL, &aio_max_batch_info);
158
+ object_class_property_add(klass, "thread-pool-min", "int",
159
+ event_loop_base_get_param,
160
+ event_loop_base_set_param,
161
+ NULL, &thread_pool_min_info);
162
+ object_class_property_add(klass, "thread-pool-max", "int",
163
+ event_loop_base_get_param,
164
+ event_loop_base_set_param,
165
+ NULL, &thread_pool_max_info);
166
}
44
}
167
45
168
static const TypeInfo event_loop_base_info = {
46
/*
169
.name = TYPE_EVENT_LOOP_BASE,
47
@@ -XXX,XX +XXX,XX @@ void throttle_timers_init(ThrottleTimers *tt,
170
.parent = TYPE_OBJECT,
48
QEMUTimerCB *write_timer_cb,
171
.instance_size = sizeof(EventLoopBase),
49
void *timer_opaque)
172
+ .instance_init = event_loop_base_instance_init,
50
{
173
.class_size = sizeof(EventLoopBaseClass),
51
+ assert(read_timer_cb || write_timer_cb);
174
.class_init = event_loop_base_class_init,
52
memset(tt, 0, sizeof(ThrottleTimers));
175
.abstract = true,
53
176
diff --git a/iothread.c b/iothread.c
54
tt->clock_type = clock_type;
177
index XXXXXXX..XXXXXXX 100644
55
@@ -XXX,XX +XXX,XX @@ void throttle_timers_init(ThrottleTimers *tt,
178
--- a/iothread.c
56
/* destroy a timer */
179
+++ b/iothread.c
57
static void throttle_timer_destroy(QEMUTimer **timer)
180
@@ -XXX,XX +XXX,XX @@ static void iothread_set_aio_context_params(EventLoopBase *base, Error **errp)
58
{
181
aio_context_set_aio_params(iothread->ctx,
59
- assert(*timer != NULL);
182
iothread->parent_obj.aio_max_batch,
60
+ if (*timer == NULL) {
183
errp);
184
+
185
+ aio_context_set_thread_pool_params(iothread->ctx, base->thread_pool_min,
186
+ base->thread_pool_max, errp);
187
}
188
189
190
diff --git a/util/aio-posix.c b/util/aio-posix.c
191
index XXXXXXX..XXXXXXX 100644
192
--- a/util/aio-posix.c
193
+++ b/util/aio-posix.c
194
@@ -XXX,XX +XXX,XX @@
195
196
#include "qemu/osdep.h"
197
#include "block/block.h"
198
+#include "block/thread-pool.h"
199
#include "qemu/main-loop.h"
200
#include "qemu/rcu.h"
201
#include "qemu/rcu_queue.h"
202
diff --git a/util/async.c b/util/async.c
203
index XXXXXXX..XXXXXXX 100644
204
--- a/util/async.c
205
+++ b/util/async.c
206
@@ -XXX,XX +XXX,XX @@ AioContext *aio_context_new(Error **errp)
207
208
ctx->aio_max_batch = 0;
209
210
+ ctx->thread_pool_min = 0;
211
+ ctx->thread_pool_max = THREAD_POOL_MAX_THREADS_DEFAULT;
212
+
213
return ctx;
214
fail:
215
g_source_destroy(&ctx->source);
216
@@ -XXX,XX +XXX,XX @@ void qemu_set_current_aio_context(AioContext *ctx)
217
assert(!get_my_aiocontext());
218
set_my_aiocontext(ctx);
219
}
220
+
221
+void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min,
222
+ int64_t max, Error **errp)
223
+{
224
+
225
+ if (min > max || !max || min > INT_MAX || max > INT_MAX) {
226
+ error_setg(errp, "bad thread-pool-min/thread-pool-max values");
227
+ return;
61
+ return;
228
+ }
62
+ }
63
64
timer_free(*timer);
65
*timer = NULL;
66
@@ -XXX,XX +XXX,XX @@ static void throttle_timer_destroy(QEMUTimer **timer)
67
/* Remove timers from event loop */
68
void throttle_timers_detach_aio_context(ThrottleTimers *tt)
69
{
70
- int i;
71
+ ThrottleDirection dir;
72
73
- for (i = 0; i < THROTTLE_MAX; i++) {
74
- throttle_timer_destroy(&tt->timers[i]);
75
+ for (dir = THROTTLE_READ; dir < THROTTLE_MAX; dir++) {
76
+ throttle_timer_destroy(&tt->timers[dir]);
77
}
78
}
79
80
@@ -XXX,XX +XXX,XX @@ void throttle_timers_destroy(ThrottleTimers *tt)
81
/* is any throttling timer configured */
82
bool throttle_timers_are_initialized(ThrottleTimers *tt)
83
{
84
- if (tt->timers[0]) {
85
- return true;
86
+ ThrottleDirection dir;
229
+
87
+
230
+ ctx->thread_pool_min = min;
88
+ for (dir = THROTTLE_READ; dir < THROTTLE_MAX; dir++) {
231
+ ctx->thread_pool_max = max;
89
+ if (tt->timers[dir]) {
90
+ return true;
91
+ }
92
}
93
94
return false;
95
@@ -XXX,XX +XXX,XX @@ bool throttle_schedule_timer(ThrottleState *ts,
96
{
97
int64_t now = qemu_clock_get_ns(tt->clock_type);
98
int64_t next_timestamp;
99
+ QEMUTimer *timer;
100
bool must_wait;
101
102
+ timer = is_write ? tt->timers[THROTTLE_WRITE] : tt->timers[THROTTLE_READ];
103
+ assert(timer);
232
+
104
+
233
+ if (ctx->thread_pool) {
105
must_wait = throttle_compute_timer(ts,
234
+ thread_pool_update_params(ctx->thread_pool, ctx);
106
is_write,
235
+ }
107
now,
236
+}
108
@@ -XXX,XX +XXX,XX @@ bool throttle_schedule_timer(ThrottleState *ts,
237
diff --git a/util/main-loop.c b/util/main-loop.c
238
index XXXXXXX..XXXXXXX 100644
239
--- a/util/main-loop.c
240
+++ b/util/main-loop.c
241
@@ -XXX,XX +XXX,XX @@
242
#include "sysemu/replay.h"
243
#include "qemu/main-loop.h"
244
#include "block/aio.h"
245
+#include "block/thread-pool.h"
246
#include "qemu/error-report.h"
247
#include "qemu/queue.h"
248
#include "qemu/compiler.h"
249
@@ -XXX,XX +XXX,XX @@ int qemu_init_main_loop(Error **errp)
250
251
static void main_loop_update_params(EventLoopBase *base, Error **errp)
252
{
253
+ ERRP_GUARD();
254
+
255
if (!qemu_aio_context) {
256
error_setg(errp, "qemu aio context not ready");
257
return;
258
}
109
}
259
110
260
aio_context_set_aio_params(qemu_aio_context, base->aio_max_batch, errp);
111
/* request throttled and timer pending -> do nothing */
261
+ if (*errp) {
112
- if (timer_pending(tt->timers[is_write])) {
262
+ return;
113
+ if (timer_pending(timer)) {
263
+ }
114
return true;
264
+
115
}
265
+ aio_context_set_thread_pool_params(qemu_aio_context, base->thread_pool_min,
116
266
+ base->thread_pool_max, errp);
117
/* request throttled and timer not pending -> arm timer */
118
- timer_mod(tt->timers[is_write], next_timestamp);
119
+ timer_mod(timer, next_timestamp);
120
return true;
267
}
121
}
268
122
269
MainLoop *mloop;
270
diff --git a/util/thread-pool.c b/util/thread-pool.c
271
index XXXXXXX..XXXXXXX 100644
272
--- a/util/thread-pool.c
273
+++ b/util/thread-pool.c
274
@@ -XXX,XX +XXX,XX @@ struct ThreadPool {
275
QemuMutex lock;
276
QemuCond worker_stopped;
277
QemuSemaphore sem;
278
- int max_threads;
279
QEMUBH *new_thread_bh;
280
281
/* The following variables are only accessed from one AioContext. */
282
@@ -XXX,XX +XXX,XX @@ struct ThreadPool {
283
int new_threads; /* backlog of threads we need to create */
284
int pending_threads; /* threads created but not running yet */
285
bool stopping;
286
+ int min_threads;
287
+ int max_threads;
288
};
289
290
+static inline bool back_to_sleep(ThreadPool *pool, int ret)
291
+{
292
+ /*
293
+ * The semaphore timed out, we should exit the loop except when:
294
+ * - There is work to do, we raced with the signal.
295
+ * - The max threads threshold just changed, we raced with the signal.
296
+ * - The thread pool forces a minimum number of readily available threads.
297
+ */
298
+ if (ret == -1 && (!QTAILQ_EMPTY(&pool->request_list) ||
299
+ pool->cur_threads > pool->max_threads ||
300
+ pool->cur_threads <= pool->min_threads)) {
301
+ return true;
302
+ }
303
+
304
+ return false;
305
+}
306
+
307
static void *worker_thread(void *opaque)
308
{
309
ThreadPool *pool = opaque;
310
@@ -XXX,XX +XXX,XX @@ static void *worker_thread(void *opaque)
311
ret = qemu_sem_timedwait(&pool->sem, 10000);
312
qemu_mutex_lock(&pool->lock);
313
pool->idle_threads--;
314
- } while (ret == -1 && !QTAILQ_EMPTY(&pool->request_list));
315
- if (ret == -1 || pool->stopping) {
316
+ } while (back_to_sleep(pool, ret));
317
+ if (ret == -1 || pool->stopping ||
318
+ pool->cur_threads > pool->max_threads) {
319
break;
320
}
321
322
@@ -XXX,XX +XXX,XX @@ void thread_pool_submit(ThreadPool *pool, ThreadPoolFunc *func, void *arg)
323
thread_pool_submit_aio(pool, func, arg, NULL, NULL);
324
}
325
326
+void thread_pool_update_params(ThreadPool *pool, AioContext *ctx)
327
+{
328
+ qemu_mutex_lock(&pool->lock);
329
+
330
+ pool->min_threads = ctx->thread_pool_min;
331
+ pool->max_threads = ctx->thread_pool_max;
332
+
333
+ /*
334
+ * We either have to:
335
+ * - Increase the number available of threads until over the min_threads
336
+ * threshold.
337
+ * - Decrease the number of available threads until under the max_threads
338
+ * threshold.
339
+ * - Do nothing. The current number of threads fall in between the min and
340
+ * max thresholds. We'll let the pool manage itself.
341
+ */
342
+ for (int i = pool->cur_threads; i < pool->min_threads; i++) {
343
+ spawn_thread(pool);
344
+ }
345
+
346
+ for (int i = pool->cur_threads; i > pool->max_threads; i--) {
347
+ qemu_sem_post(&pool->sem);
348
+ }
349
+
350
+ qemu_mutex_unlock(&pool->lock);
351
+}
352
+
353
static void thread_pool_init_one(ThreadPool *pool, AioContext *ctx)
354
{
355
if (!ctx) {
356
@@ -XXX,XX +XXX,XX @@ static void thread_pool_init_one(ThreadPool *pool, AioContext *ctx)
357
qemu_mutex_init(&pool->lock);
358
qemu_cond_init(&pool->worker_stopped);
359
qemu_sem_init(&pool->sem, 0);
360
- pool->max_threads = 64;
361
pool->new_thread_bh = aio_bh_new(ctx, spawn_thread_bh_fn, pool);
362
363
QLIST_INIT(&pool->head);
364
QTAILQ_INIT(&pool->request_list);
365
+
366
+ thread_pool_update_params(pool, ctx);
367
}
368
369
ThreadPool *thread_pool_new(AioContext *ctx)
370
--
123
--
371
2.35.1
124
2.41.0
diff view generated by jsdifflib
1
From: Nicolas Saenz Julienne <nsaenzju@redhat.com>
1
From: zhenwei pi <pizhenwei@bytedance.com>
2
2
3
'event-loop-base' provides basic property handling for all 'AioContext'
3
Reviewed-by: Alberto Garcia <berto@igalia.com>
4
based event loops. So let's define a new 'MainLoopClass' that inherits
4
Reviewed-by: Hanna Czenczek <hreitz@redhat.com>
5
from it. This will permit tweaking the main loop's properties through
5
Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
6
qapi as well as through the command line using the '-object' keyword[1].
6
Message-Id: <20230728022006.1098509-5-pizhenwei@bytedance.com>
7
Only one instance of 'MainLoopClass' might be created at any time.
7
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
8
---
9
tests/unit/test-throttle.c | 66 ++++++++++++++++++++++++++++++++++++++
10
1 file changed, 66 insertions(+)
8
11
9
'EventLoopBaseClass' learns a new callback, 'can_be_deleted()' so as to
12
diff --git a/tests/unit/test-throttle.c b/tests/unit/test-throttle.c
10
mark 'MainLoop' as non-deletable.
11
12
[1] For example:
13
-object main-loop,id=main-loop,aio-max-batch=<value>
14
15
Signed-off-by: Nicolas Saenz Julienne <nsaenzju@redhat.com>
16
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
17
Acked-by: Markus Armbruster <armbru@redhat.com>
18
Message-id: 20220425075723.20019-3-nsaenzju@redhat.com
19
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
20
---
21
qapi/qom.json | 13 ++++++++
22
meson.build | 3 +-
23
include/qemu/main-loop.h | 10 ++++++
24
include/sysemu/event-loop-base.h | 1 +
25
event-loop-base.c | 13 ++++++++
26
util/main-loop.c | 56 ++++++++++++++++++++++++++++++++
27
6 files changed, 95 insertions(+), 1 deletion(-)
28
29
diff --git a/qapi/qom.json b/qapi/qom.json
30
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
31
--- a/qapi/qom.json
14
--- a/tests/unit/test-throttle.c
32
+++ b/qapi/qom.json
15
+++ b/tests/unit/test-throttle.c
33
@@ -XXX,XX +XXX,XX @@
16
@@ -XXX,XX +XXX,XX @@ static void test_init(void)
34
'*poll-grow': 'int',
17
throttle_timers_destroy(tt);
35
'*poll-shrink': 'int' } }
18
}
36
19
37
+##
20
+static void test_init_readonly(void)
38
+# @MainLoopProperties:
21
+{
39
+#
22
+ int i;
40
+# Properties for the main-loop object.
41
+#
42
+# Since: 7.1
43
+##
44
+{ 'struct': 'MainLoopProperties',
45
+ 'base': 'EventLoopBaseProperties',
46
+ 'data': {} }
47
+
23
+
48
##
24
+ tt = &tgm.throttle_timers;
49
# @MemoryBackendProperties:
50
#
51
@@ -XXX,XX +XXX,XX @@
52
{ 'name': 'input-linux',
53
'if': 'CONFIG_LINUX' },
54
'iothread',
55
+ 'main-loop',
56
{ 'name': 'memory-backend-epc',
57
'if': 'CONFIG_LINUX' },
58
'memory-backend-file',
59
@@ -XXX,XX +XXX,XX @@
60
'input-linux': { 'type': 'InputLinuxProperties',
61
'if': 'CONFIG_LINUX' },
62
'iothread': 'IothreadProperties',
63
+ 'main-loop': 'MainLoopProperties',
64
'memory-backend-epc': { 'type': 'MemoryBackendEpcProperties',
65
'if': 'CONFIG_LINUX' },
66
'memory-backend-file': 'MemoryBackendFileProperties',
67
diff --git a/meson.build b/meson.build
68
index XXXXXXX..XXXXXXX 100644
69
--- a/meson.build
70
+++ b/meson.build
71
@@ -XXX,XX +XXX,XX @@ libqemuutil = static_library('qemuutil',
72
sources: util_ss.sources() + stub_ss.sources() + genh,
73
dependencies: [util_ss.dependencies(), libm, threads, glib, socket, malloc, pixman])
74
qemuutil = declare_dependency(link_with: libqemuutil,
75
- sources: genh + version_res)
76
+ sources: genh + version_res,
77
+ dependencies: [event_loop_base])
78
79
if have_system or have_user
80
decodetree = generator(find_program('scripts/decodetree.py'),
81
diff --git a/include/qemu/main-loop.h b/include/qemu/main-loop.h
82
index XXXXXXX..XXXXXXX 100644
83
--- a/include/qemu/main-loop.h
84
+++ b/include/qemu/main-loop.h
85
@@ -XXX,XX +XXX,XX @@
86
#define QEMU_MAIN_LOOP_H
87
88
#include "block/aio.h"
89
+#include "qom/object.h"
90
+#include "sysemu/event-loop-base.h"
91
92
#define SIG_IPI SIGUSR1
93
94
+#define TYPE_MAIN_LOOP "main-loop"
95
+OBJECT_DECLARE_TYPE(MainLoop, MainLoopClass, MAIN_LOOP)
96
+
25
+
97
+struct MainLoop {
26
+ /* fill the structures with crap */
98
+ EventLoopBase parent_obj;
27
+ memset(&ts, 1, sizeof(ts));
99
+};
28
+ memset(tt, 1, sizeof(*tt));
100
+typedef struct MainLoop MainLoop;
101
+
29
+
102
/**
30
+ /* init structures */
103
* qemu_init_main_loop: Set up the process so that it can run the main loop.
31
+ throttle_init(&ts);
104
*
32
+ throttle_timers_init(tt, ctx, QEMU_CLOCK_VIRTUAL,
105
diff --git a/include/sysemu/event-loop-base.h b/include/sysemu/event-loop-base.h
33
+ read_timer_cb, NULL, &ts);
106
index XXXXXXX..XXXXXXX 100644
107
--- a/include/sysemu/event-loop-base.h
108
+++ b/include/sysemu/event-loop-base.h
109
@@ -XXX,XX +XXX,XX @@ struct EventLoopBaseClass {
110
111
void (*init)(EventLoopBase *base, Error **errp);
112
void (*update_params)(EventLoopBase *base, Error **errp);
113
+ bool (*can_be_deleted)(EventLoopBase *base);
114
};
115
116
struct EventLoopBase {
117
diff --git a/event-loop-base.c b/event-loop-base.c
118
index XXXXXXX..XXXXXXX 100644
119
--- a/event-loop-base.c
120
+++ b/event-loop-base.c
121
@@ -XXX,XX +XXX,XX @@ static void event_loop_base_complete(UserCreatable *uc, Error **errp)
122
}
123
}
124
125
+static bool event_loop_base_can_be_deleted(UserCreatable *uc)
126
+{
127
+ EventLoopBaseClass *bc = EVENT_LOOP_BASE_GET_CLASS(uc);
128
+ EventLoopBase *backend = EVENT_LOOP_BASE(uc);
129
+
34
+
130
+ if (bc->can_be_deleted) {
35
+ /* check initialized fields */
131
+ return bc->can_be_deleted(backend);
36
+ g_assert(tt->clock_type == QEMU_CLOCK_VIRTUAL);
37
+ g_assert(tt->timers[THROTTLE_READ]);
38
+ g_assert(!tt->timers[THROTTLE_WRITE]);
39
+
40
+ /* check other fields where cleared */
41
+ g_assert(!ts.previous_leak);
42
+ g_assert(!ts.cfg.op_size);
43
+ for (i = 0; i < BUCKETS_COUNT; i++) {
44
+ g_assert(!ts.cfg.buckets[i].avg);
45
+ g_assert(!ts.cfg.buckets[i].max);
46
+ g_assert(!ts.cfg.buckets[i].level);
132
+ }
47
+ }
133
+
48
+
134
+ return true;
49
+ throttle_timers_destroy(tt);
135
+}
50
+}
136
+
51
+
137
static void event_loop_base_class_init(ObjectClass *klass, void *class_data)
52
+static void test_init_writeonly(void)
138
{
139
UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass);
140
ucc->complete = event_loop_base_complete;
141
+ ucc->can_be_deleted = event_loop_base_can_be_deleted;
142
143
object_class_property_add(klass, "aio-max-batch", "int",
144
event_loop_base_get_param,
145
diff --git a/util/main-loop.c b/util/main-loop.c
146
index XXXXXXX..XXXXXXX 100644
147
--- a/util/main-loop.c
148
+++ b/util/main-loop.c
149
@@ -XXX,XX +XXX,XX @@
150
#include "qemu/error-report.h"
151
#include "qemu/queue.h"
152
#include "qemu/compiler.h"
153
+#include "qom/object.h"
154
155
#ifndef _WIN32
156
#include <sys/wait.h>
157
@@ -XXX,XX +XXX,XX @@ int qemu_init_main_loop(Error **errp)
158
return 0;
159
}
160
161
+static void main_loop_update_params(EventLoopBase *base, Error **errp)
162
+{
53
+{
163
+ if (!qemu_aio_context) {
54
+ int i;
164
+ error_setg(errp, "qemu aio context not ready");
55
+
165
+ return;
56
+ tt = &tgm.throttle_timers;
57
+
58
+ /* fill the structures with crap */
59
+ memset(&ts, 1, sizeof(ts));
60
+ memset(tt, 1, sizeof(*tt));
61
+
62
+ /* init structures */
63
+ throttle_init(&ts);
64
+ throttle_timers_init(tt, ctx, QEMU_CLOCK_VIRTUAL,
65
+ NULL, write_timer_cb, &ts);
66
+
67
+ /* check initialized fields */
68
+ g_assert(tt->clock_type == QEMU_CLOCK_VIRTUAL);
69
+ g_assert(!tt->timers[THROTTLE_READ]);
70
+ g_assert(tt->timers[THROTTLE_WRITE]);
71
+
72
+ /* check other fields where cleared */
73
+ g_assert(!ts.previous_leak);
74
+ g_assert(!ts.cfg.op_size);
75
+ for (i = 0; i < BUCKETS_COUNT; i++) {
76
+ g_assert(!ts.cfg.buckets[i].avg);
77
+ g_assert(!ts.cfg.buckets[i].max);
78
+ g_assert(!ts.cfg.buckets[i].level);
166
+ }
79
+ }
167
+
80
+
168
+ aio_context_set_aio_params(qemu_aio_context, base->aio_max_batch, errp);
81
+ throttle_timers_destroy(tt);
169
+}
82
+}
170
+
83
+
171
+MainLoop *mloop;
84
static void test_destroy(void)
172
+
85
{
173
+static void main_loop_init(EventLoopBase *base, Error **errp)
86
int i;
174
+{
87
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
175
+ MainLoop *m = MAIN_LOOP(base);
88
g_test_add_func("/throttle/leak_bucket", test_leak_bucket);
176
+
89
g_test_add_func("/throttle/compute_wait", test_compute_wait);
177
+ if (mloop) {
90
g_test_add_func("/throttle/init", test_init);
178
+ error_setg(errp, "only one main-loop instance allowed");
91
+ g_test_add_func("/throttle/init_readonly", test_init_readonly);
179
+ return;
92
+ g_test_add_func("/throttle/init_writeonly", test_init_writeonly);
180
+ }
93
g_test_add_func("/throttle/destroy", test_destroy);
181
+
94
g_test_add_func("/throttle/have_timer", test_have_timer);
182
+ main_loop_update_params(base, errp);
95
g_test_add_func("/throttle/detach_attach", test_detach_attach);
183
+
184
+ mloop = m;
185
+ return;
186
+}
187
+
188
+static bool main_loop_can_be_deleted(EventLoopBase *base)
189
+{
190
+ return false;
191
+}
192
+
193
+static void main_loop_class_init(ObjectClass *oc, void *class_data)
194
+{
195
+ EventLoopBaseClass *bc = EVENT_LOOP_BASE_CLASS(oc);
196
+
197
+ bc->init = main_loop_init;
198
+ bc->update_params = main_loop_update_params;
199
+ bc->can_be_deleted = main_loop_can_be_deleted;
200
+}
201
+
202
+static const TypeInfo main_loop_info = {
203
+ .name = TYPE_MAIN_LOOP,
204
+ .parent = TYPE_EVENT_LOOP_BASE,
205
+ .class_init = main_loop_class_init,
206
+ .instance_size = sizeof(MainLoop),
207
+};
208
+
209
+static void main_loop_register_types(void)
210
+{
211
+ type_register_static(&main_loop_info);
212
+}
213
+
214
+type_init(main_loop_register_types)
215
+
216
static int max_priority;
217
218
#ifndef _WIN32
219
--
96
--
220
2.35.1
97
2.41.0
diff view generated by jsdifflib
New patch
1
From: zhenwei pi <pizhenwei@bytedance.com>
1
2
3
Operations on a cryptodev are considered as *write* only, the callback
4
of read direction is never invoked. Use NULL instead of an unreachable
5
path(cryptodev_backend_throttle_timer_cb on read direction).
6
7
The dummy read timer(never invoked) is already removed here, it means
8
that the 'FIXME' tag is no longer needed.
9
10
Reviewed-by: Alberto Garcia <berto@igalia.com>
11
Reviewed-by: Hanna Czenczek <hreitz@redhat.com>
12
Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
13
Message-Id: <20230728022006.1098509-6-pizhenwei@bytedance.com>
14
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
15
---
16
backends/cryptodev.c | 3 +--
17
1 file changed, 1 insertion(+), 2 deletions(-)
18
19
diff --git a/backends/cryptodev.c b/backends/cryptodev.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/backends/cryptodev.c
22
+++ b/backends/cryptodev.c
23
@@ -XXX,XX +XXX,XX @@ static void cryptodev_backend_set_throttle(CryptoDevBackend *backend, int field,
24
if (!enabled) {
25
throttle_init(&backend->ts);
26
throttle_timers_init(&backend->tt, qemu_get_aio_context(),
27
- QEMU_CLOCK_REALTIME,
28
- cryptodev_backend_throttle_timer_cb, /* FIXME */
29
+ QEMU_CLOCK_REALTIME, NULL,
30
cryptodev_backend_throttle_timer_cb, backend);
31
}
32
33
--
34
2.41.0
diff view generated by jsdifflib
New patch
1
From: zhenwei pi <pizhenwei@bytedance.com>
1
2
3
enum ThrottleDirection is already there, use ThrottleDirection instead
4
of 'bool is_write' for throttle API, also modify related codes from
5
block, fsdev, cryptodev and tests.
6
7
Reviewed-by: Hanna Czenczek <hreitz@redhat.com>
8
Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
9
Message-Id: <20230728022006.1098509-7-pizhenwei@bytedance.com>
10
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
11
---
12
include/qemu/throttle.h | 5 +++--
13
backends/cryptodev.c | 9 +++++----
14
block/throttle-groups.c | 6 ++++--
15
fsdev/qemu-fsdev-throttle.c | 8 +++++---
16
tests/unit/test-throttle.c | 4 ++--
17
util/throttle.c | 31 +++++++++++++++++--------------
18
6 files changed, 36 insertions(+), 27 deletions(-)
19
20
diff --git a/include/qemu/throttle.h b/include/qemu/throttle.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/include/qemu/throttle.h
23
+++ b/include/qemu/throttle.h
24
@@ -XXX,XX +XXX,XX @@ void throttle_config_init(ThrottleConfig *cfg);
25
/* usage */
26
bool throttle_schedule_timer(ThrottleState *ts,
27
ThrottleTimers *tt,
28
- bool is_write);
29
+ ThrottleDirection direction);
30
31
-void throttle_account(ThrottleState *ts, bool is_write, uint64_t size);
32
+void throttle_account(ThrottleState *ts, ThrottleDirection direction,
33
+ uint64_t size);
34
void throttle_limits_to_config(ThrottleLimits *arg, ThrottleConfig *cfg,
35
Error **errp);
36
void throttle_config_to_limits(ThrottleConfig *cfg, ThrottleLimits *var);
37
diff --git a/backends/cryptodev.c b/backends/cryptodev.c
38
index XXXXXXX..XXXXXXX 100644
39
--- a/backends/cryptodev.c
40
+++ b/backends/cryptodev.c
41
@@ -XXX,XX +XXX,XX @@ static void cryptodev_backend_throttle_timer_cb(void *opaque)
42
continue;
43
}
44
45
- throttle_account(&backend->ts, true, ret);
46
+ throttle_account(&backend->ts, THROTTLE_WRITE, ret);
47
cryptodev_backend_operation(backend, op_info);
48
if (throttle_enabled(&backend->tc) &&
49
- throttle_schedule_timer(&backend->ts, &backend->tt, true)) {
50
+ throttle_schedule_timer(&backend->ts, &backend->tt,
51
+ THROTTLE_WRITE)) {
52
break;
53
}
54
}
55
@@ -XXX,XX +XXX,XX @@ int cryptodev_backend_crypto_operation(
56
goto do_account;
57
}
58
59
- if (throttle_schedule_timer(&backend->ts, &backend->tt, true) ||
60
+ if (throttle_schedule_timer(&backend->ts, &backend->tt, THROTTLE_WRITE) ||
61
!QTAILQ_EMPTY(&backend->opinfos)) {
62
QTAILQ_INSERT_TAIL(&backend->opinfos, op_info, next);
63
return 0;
64
@@ -XXX,XX +XXX,XX @@ do_account:
65
return ret;
66
}
67
68
- throttle_account(&backend->ts, true, ret);
69
+ throttle_account(&backend->ts, THROTTLE_WRITE, ret);
70
71
return cryptodev_backend_operation(backend, op_info);
72
}
73
diff --git a/block/throttle-groups.c b/block/throttle-groups.c
74
index XXXXXXX..XXXXXXX 100644
75
--- a/block/throttle-groups.c
76
+++ b/block/throttle-groups.c
77
@@ -XXX,XX +XXX,XX @@ static bool throttle_group_schedule_timer(ThrottleGroupMember *tgm,
78
ThrottleState *ts = tgm->throttle_state;
79
ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
80
ThrottleTimers *tt = &tgm->throttle_timers;
81
+ ThrottleDirection direction = is_write ? THROTTLE_WRITE : THROTTLE_READ;
82
bool must_wait;
83
84
if (qatomic_read(&tgm->io_limits_disabled)) {
85
@@ -XXX,XX +XXX,XX @@ static bool throttle_group_schedule_timer(ThrottleGroupMember *tgm,
86
return true;
87
}
88
89
- must_wait = throttle_schedule_timer(ts, tt, is_write);
90
+ must_wait = throttle_schedule_timer(ts, tt, direction);
91
92
/* If a timer just got armed, set tgm as the current token */
93
if (must_wait) {
94
@@ -XXX,XX +XXX,XX @@ void coroutine_fn throttle_group_co_io_limits_intercept(ThrottleGroupMember *tgm
95
bool must_wait;
96
ThrottleGroupMember *token;
97
ThrottleGroup *tg = container_of(tgm->throttle_state, ThrottleGroup, ts);
98
+ ThrottleDirection direction = is_write ? THROTTLE_WRITE : THROTTLE_READ;
99
100
assert(bytes >= 0);
101
102
@@ -XXX,XX +XXX,XX @@ void coroutine_fn throttle_group_co_io_limits_intercept(ThrottleGroupMember *tgm
103
}
104
105
/* The I/O will be executed, so do the accounting */
106
- throttle_account(tgm->throttle_state, is_write, bytes);
107
+ throttle_account(tgm->throttle_state, direction, bytes);
108
109
/* Schedule the next request */
110
schedule_next_request(tgm, is_write);
111
diff --git a/fsdev/qemu-fsdev-throttle.c b/fsdev/qemu-fsdev-throttle.c
112
index XXXXXXX..XXXXXXX 100644
113
--- a/fsdev/qemu-fsdev-throttle.c
114
+++ b/fsdev/qemu-fsdev-throttle.c
115
@@ -XXX,XX +XXX,XX @@ void fsdev_throttle_init(FsThrottle *fst)
116
void coroutine_fn fsdev_co_throttle_request(FsThrottle *fst, bool is_write,
117
struct iovec *iov, int iovcnt)
118
{
119
+ ThrottleDirection direction = is_write ? THROTTLE_WRITE : THROTTLE_READ;
120
+
121
if (throttle_enabled(&fst->cfg)) {
122
- if (throttle_schedule_timer(&fst->ts, &fst->tt, is_write) ||
123
+ if (throttle_schedule_timer(&fst->ts, &fst->tt, direction) ||
124
!qemu_co_queue_empty(&fst->throttled_reqs[is_write])) {
125
qemu_co_queue_wait(&fst->throttled_reqs[is_write], NULL);
126
}
127
128
- throttle_account(&fst->ts, is_write, iov_size(iov, iovcnt));
129
+ throttle_account(&fst->ts, direction, iov_size(iov, iovcnt));
130
131
if (!qemu_co_queue_empty(&fst->throttled_reqs[is_write]) &&
132
- !throttle_schedule_timer(&fst->ts, &fst->tt, is_write)) {
133
+ !throttle_schedule_timer(&fst->ts, &fst->tt, direction)) {
134
qemu_co_queue_next(&fst->throttled_reqs[is_write]);
135
}
136
}
137
diff --git a/tests/unit/test-throttle.c b/tests/unit/test-throttle.c
138
index XXXXXXX..XXXXXXX 100644
139
--- a/tests/unit/test-throttle.c
140
+++ b/tests/unit/test-throttle.c
141
@@ -XXX,XX +XXX,XX @@ static bool do_test_accounting(bool is_ops, /* are we testing bps or ops */
142
throttle_config(&ts, QEMU_CLOCK_VIRTUAL, &cfg);
143
144
/* account a read */
145
- throttle_account(&ts, false, size);
146
+ throttle_account(&ts, THROTTLE_READ, size);
147
/* account a write */
148
- throttle_account(&ts, true, size);
149
+ throttle_account(&ts, THROTTLE_WRITE, size);
150
151
/* check total result */
152
index = to_test[is_ops][0];
153
diff --git a/util/throttle.c b/util/throttle.c
154
index XXXXXXX..XXXXXXX 100644
155
--- a/util/throttle.c
156
+++ b/util/throttle.c
157
@@ -XXX,XX +XXX,XX @@ int64_t throttle_compute_wait(LeakyBucket *bkt)
158
159
/* This function compute the time that must be waited while this IO
160
*
161
- * @is_write: true if the current IO is a write, false if it's a read
162
+ * @direction: throttle direction
163
* @ret: time to wait
164
*/
165
static int64_t throttle_compute_wait_for(ThrottleState *ts,
166
- bool is_write)
167
+ ThrottleDirection direction)
168
{
169
BucketType to_check[2][4] = { {THROTTLE_BPS_TOTAL,
170
THROTTLE_OPS_TOTAL,
171
@@ -XXX,XX +XXX,XX @@ static int64_t throttle_compute_wait_for(ThrottleState *ts,
172
int i;
173
174
for (i = 0; i < 4; i++) {
175
- BucketType index = to_check[is_write][i];
176
+ BucketType index = to_check[direction][i];
177
wait = throttle_compute_wait(&ts->cfg.buckets[index]);
178
if (wait > max_wait) {
179
max_wait = wait;
180
@@ -XXX,XX +XXX,XX @@ static int64_t throttle_compute_wait_for(ThrottleState *ts,
181
182
/* compute the timer for this type of operation
183
*
184
- * @is_write: the type of operation
185
+ * @direction: throttle direction
186
* @now: the current clock timestamp
187
* @next_timestamp: the resulting timer
188
* @ret: true if a timer must be set
189
*/
190
static bool throttle_compute_timer(ThrottleState *ts,
191
- bool is_write,
192
+ ThrottleDirection direction,
193
int64_t now,
194
int64_t *next_timestamp)
195
{
196
@@ -XXX,XX +XXX,XX @@ static bool throttle_compute_timer(ThrottleState *ts,
197
throttle_do_leak(ts, now);
198
199
/* compute the wait time if any */
200
- wait = throttle_compute_wait_for(ts, is_write);
201
+ wait = throttle_compute_wait_for(ts, direction);
202
203
/* if the code must wait compute when the next timer should fire */
204
if (wait) {
205
@@ -XXX,XX +XXX,XX @@ void throttle_get_config(ThrottleState *ts, ThrottleConfig *cfg)
206
* NOTE: this function is not unit tested due to it's usage of timer_mod
207
*
208
* @tt: the timers structure
209
- * @is_write: the type of operation (read/write)
210
+ * @direction: throttle direction
211
* @ret: true if the timer has been scheduled else false
212
*/
213
bool throttle_schedule_timer(ThrottleState *ts,
214
ThrottleTimers *tt,
215
- bool is_write)
216
+ ThrottleDirection direction)
217
{
218
int64_t now = qemu_clock_get_ns(tt->clock_type);
219
int64_t next_timestamp;
220
QEMUTimer *timer;
221
bool must_wait;
222
223
- timer = is_write ? tt->timers[THROTTLE_WRITE] : tt->timers[THROTTLE_READ];
224
+ assert(direction < THROTTLE_MAX);
225
+ timer = tt->timers[direction];
226
assert(timer);
227
228
must_wait = throttle_compute_timer(ts,
229
- is_write,
230
+ direction,
231
now,
232
&next_timestamp);
233
234
@@ -XXX,XX +XXX,XX @@ bool throttle_schedule_timer(ThrottleState *ts,
235
236
/* do the accounting for this operation
237
*
238
- * @is_write: the type of operation (read/write)
239
+ * @direction: throttle direction
240
* @size: the size of the operation
241
*/
242
-void throttle_account(ThrottleState *ts, bool is_write, uint64_t size)
243
+void throttle_account(ThrottleState *ts, ThrottleDirection direction,
244
+ uint64_t size)
245
{
246
const BucketType bucket_types_size[2][2] = {
247
{ THROTTLE_BPS_TOTAL, THROTTLE_BPS_READ },
248
@@ -XXX,XX +XXX,XX @@ void throttle_account(ThrottleState *ts, bool is_write, uint64_t size)
249
double units = 1.0;
250
unsigned i;
251
252
+ assert(direction < THROTTLE_MAX);
253
/* if cfg.op_size is defined and smaller than size we compute unit count */
254
if (ts->cfg.op_size && size > ts->cfg.op_size) {
255
units = (double) size / ts->cfg.op_size;
256
@@ -XXX,XX +XXX,XX @@ void throttle_account(ThrottleState *ts, bool is_write, uint64_t size)
257
for (i = 0; i < 2; i++) {
258
LeakyBucket *bkt;
259
260
- bkt = &ts->cfg.buckets[bucket_types_size[is_write][i]];
261
+ bkt = &ts->cfg.buckets[bucket_types_size[direction][i]];
262
bkt->level += size;
263
if (bkt->burst_length > 1) {
264
bkt->burst_level += size;
265
}
266
267
- bkt = &ts->cfg.buckets[bucket_types_units[is_write][i]];
268
+ bkt = &ts->cfg.buckets[bucket_types_units[direction][i]];
269
bkt->level += units;
270
if (bkt->burst_length > 1) {
271
bkt->burst_level += units;
272
--
273
2.41.0
diff view generated by jsdifflib
New patch
1
From: zhenwei pi <pizhenwei@bytedance.com>
1
2
3
The first dimension of both to_check and
4
bucket_types_size/bucket_types_units is used as throttle direction,
5
use THROTTLE_MAX instead of hard coded number. Also use ARRAY_SIZE()
6
to avoid hard coded number for the second dimension.
7
8
Hanna noticed that the two array should be static. Yes, turn them
9
into static variables.
10
11
Reviewed-by: Hanna Czenczek <hreitz@redhat.com>
12
Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
13
Message-Id: <20230728022006.1098509-8-pizhenwei@bytedance.com>
14
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
15
---
16
util/throttle.c | 11 ++++++-----
17
1 file changed, 6 insertions(+), 5 deletions(-)
18
19
diff --git a/util/throttle.c b/util/throttle.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/util/throttle.c
22
+++ b/util/throttle.c
23
@@ -XXX,XX +XXX,XX @@ int64_t throttle_compute_wait(LeakyBucket *bkt)
24
static int64_t throttle_compute_wait_for(ThrottleState *ts,
25
ThrottleDirection direction)
26
{
27
- BucketType to_check[2][4] = { {THROTTLE_BPS_TOTAL,
28
+ static const BucketType to_check[THROTTLE_MAX][4] = {
29
+ {THROTTLE_BPS_TOTAL,
30
THROTTLE_OPS_TOTAL,
31
THROTTLE_BPS_READ,
32
THROTTLE_OPS_READ},
33
@@ -XXX,XX +XXX,XX @@ static int64_t throttle_compute_wait_for(ThrottleState *ts,
34
int64_t wait, max_wait = 0;
35
int i;
36
37
- for (i = 0; i < 4; i++) {
38
+ for (i = 0; i < ARRAY_SIZE(to_check[THROTTLE_READ]); i++) {
39
BucketType index = to_check[direction][i];
40
wait = throttle_compute_wait(&ts->cfg.buckets[index]);
41
if (wait > max_wait) {
42
@@ -XXX,XX +XXX,XX @@ bool throttle_schedule_timer(ThrottleState *ts,
43
void throttle_account(ThrottleState *ts, ThrottleDirection direction,
44
uint64_t size)
45
{
46
- const BucketType bucket_types_size[2][2] = {
47
+ static const BucketType bucket_types_size[THROTTLE_MAX][2] = {
48
{ THROTTLE_BPS_TOTAL, THROTTLE_BPS_READ },
49
{ THROTTLE_BPS_TOTAL, THROTTLE_BPS_WRITE }
50
};
51
- const BucketType bucket_types_units[2][2] = {
52
+ static const BucketType bucket_types_units[THROTTLE_MAX][2] = {
53
{ THROTTLE_OPS_TOTAL, THROTTLE_OPS_READ },
54
{ THROTTLE_OPS_TOTAL, THROTTLE_OPS_WRITE }
55
};
56
@@ -XXX,XX +XXX,XX @@ void throttle_account(ThrottleState *ts, ThrottleDirection direction,
57
units = (double) size / ts->cfg.op_size;
58
}
59
60
- for (i = 0; i < 2; i++) {
61
+ for (i = 0; i < ARRAY_SIZE(bucket_types_size[THROTTLE_READ]); i++) {
62
LeakyBucket *bkt;
63
64
bkt = &ts->cfg.buckets[bucket_types_size[direction][i]];
65
--
66
2.41.0
diff view generated by jsdifflib
New patch
1
From: zhenwei pi <pizhenwei@bytedance.com>
1
2
3
'bool is_write' style is obsolete from throttle framework, adapt
4
fsdev to the new style.
5
6
Cc: Greg Kurz <groug@kaod.org>
7
Reviewed-by: Hanna Czenczek <hreitz@redhat.com>
8
Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
9
Message-Id: <20230728022006.1098509-9-pizhenwei@bytedance.com>
10
Reviewed-by: Greg Kurz <groug@kaod.org>
11
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
12
---
13
fsdev/qemu-fsdev-throttle.h | 4 ++--
14
fsdev/qemu-fsdev-throttle.c | 14 +++++++-------
15
hw/9pfs/cofile.c | 4 ++--
16
3 files changed, 11 insertions(+), 11 deletions(-)
17
18
diff --git a/fsdev/qemu-fsdev-throttle.h b/fsdev/qemu-fsdev-throttle.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/fsdev/qemu-fsdev-throttle.h
21
+++ b/fsdev/qemu-fsdev-throttle.h
22
@@ -XXX,XX +XXX,XX @@ typedef struct FsThrottle {
23
ThrottleState ts;
24
ThrottleTimers tt;
25
ThrottleConfig cfg;
26
- CoQueue throttled_reqs[2];
27
+ CoQueue throttled_reqs[THROTTLE_MAX];
28
} FsThrottle;
29
30
int fsdev_throttle_parse_opts(QemuOpts *, FsThrottle *, Error **);
31
32
void fsdev_throttle_init(FsThrottle *);
33
34
-void coroutine_fn fsdev_co_throttle_request(FsThrottle *, bool ,
35
+void coroutine_fn fsdev_co_throttle_request(FsThrottle *, ThrottleDirection ,
36
struct iovec *, int);
37
38
void fsdev_throttle_cleanup(FsThrottle *);
39
diff --git a/fsdev/qemu-fsdev-throttle.c b/fsdev/qemu-fsdev-throttle.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/fsdev/qemu-fsdev-throttle.c
42
+++ b/fsdev/qemu-fsdev-throttle.c
43
@@ -XXX,XX +XXX,XX @@ void fsdev_throttle_init(FsThrottle *fst)
44
}
45
}
46
47
-void coroutine_fn fsdev_co_throttle_request(FsThrottle *fst, bool is_write,
48
+void coroutine_fn fsdev_co_throttle_request(FsThrottle *fst,
49
+ ThrottleDirection direction,
50
struct iovec *iov, int iovcnt)
51
{
52
- ThrottleDirection direction = is_write ? THROTTLE_WRITE : THROTTLE_READ;
53
-
54
+ assert(direction < THROTTLE_MAX);
55
if (throttle_enabled(&fst->cfg)) {
56
if (throttle_schedule_timer(&fst->ts, &fst->tt, direction) ||
57
- !qemu_co_queue_empty(&fst->throttled_reqs[is_write])) {
58
- qemu_co_queue_wait(&fst->throttled_reqs[is_write], NULL);
59
+ !qemu_co_queue_empty(&fst->throttled_reqs[direction])) {
60
+ qemu_co_queue_wait(&fst->throttled_reqs[direction], NULL);
61
}
62
63
throttle_account(&fst->ts, direction, iov_size(iov, iovcnt));
64
65
- if (!qemu_co_queue_empty(&fst->throttled_reqs[is_write]) &&
66
+ if (!qemu_co_queue_empty(&fst->throttled_reqs[direction]) &&
67
!throttle_schedule_timer(&fst->ts, &fst->tt, direction)) {
68
- qemu_co_queue_next(&fst->throttled_reqs[is_write]);
69
+ qemu_co_queue_next(&fst->throttled_reqs[direction]);
70
}
71
}
72
}
73
diff --git a/hw/9pfs/cofile.c b/hw/9pfs/cofile.c
74
index XXXXXXX..XXXXXXX 100644
75
--- a/hw/9pfs/cofile.c
76
+++ b/hw/9pfs/cofile.c
77
@@ -XXX,XX +XXX,XX @@ int coroutine_fn v9fs_co_pwritev(V9fsPDU *pdu, V9fsFidState *fidp,
78
if (v9fs_request_cancelled(pdu)) {
79
return -EINTR;
80
}
81
- fsdev_co_throttle_request(s->ctx.fst, true, iov, iovcnt);
82
+ fsdev_co_throttle_request(s->ctx.fst, THROTTLE_WRITE, iov, iovcnt);
83
v9fs_co_run_in_worker(
84
{
85
err = s->ops->pwritev(&s->ctx, &fidp->fs, iov, iovcnt, offset);
86
@@ -XXX,XX +XXX,XX @@ int coroutine_fn v9fs_co_preadv(V9fsPDU *pdu, V9fsFidState *fidp,
87
if (v9fs_request_cancelled(pdu)) {
88
return -EINTR;
89
}
90
- fsdev_co_throttle_request(s->ctx.fst, false, iov, iovcnt);
91
+ fsdev_co_throttle_request(s->ctx.fst, THROTTLE_READ, iov, iovcnt);
92
v9fs_co_run_in_worker(
93
{
94
err = s->ops->preadv(&s->ctx, &fidp->fs, iov, iovcnt, offset);
95
--
96
2.41.0
diff view generated by jsdifflib
New patch
1
1
From: zhenwei pi <pizhenwei@bytedance.com>
2
3
'bool is_write' style is obsolete from throttle framework, adapt
4
block throttle groups to the new style:
5
- use ThrottleDirection instead of 'bool is_write'. Ex,
6
schedule_next_request(ThrottleGroupMember *tgm, bool is_write)
7
-> schedule_next_request(ThrottleGroupMember *tgm, ThrottleDirection direction)
8
9
- use THROTTLE_MAX instead of hard code. Ex, ThrottleGroupMember *tokens[2]
10
-> ThrottleGroupMember *tokens[THROTTLE_MAX]
11
12
- use ThrottleDirection instead of hard code on iteration. Ex, (i = 0; i < 2; i++)
13
-> for (dir = THROTTLE_READ; dir < THROTTLE_MAX; dir++)
14
15
Use a simple python script to test the new style:
16
#!/usr/bin/python3
17
import subprocess
18
import random
19
import time
20
21
commands = ['virsh blkdeviotune jammy vda --write-bytes-sec ', \
22
'virsh blkdeviotune jammy vda --write-iops-sec ', \
23
'virsh blkdeviotune jammy vda --read-bytes-sec ', \
24
'virsh blkdeviotune jammy vda --read-iops-sec ']
25
26
for loop in range(1, 1000):
27
time.sleep(random.randrange(3, 5))
28
command = commands[random.randrange(0, 3)] + str(random.randrange(0, 1000000))
29
subprocess.run(command, shell=True, check=True)
30
31
This works fine.
32
33
Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
34
Message-Id: <20230728022006.1098509-10-pizhenwei@bytedance.com>
35
Reviewed-by: Hanna Czenczek <hreitz@redhat.com>
36
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
37
---
38
include/block/throttle-groups.h | 6 +-
39
block/block-backend.c | 4 +-
40
block/throttle-groups.c | 161 ++++++++++++++++----------------
41
block/throttle.c | 8 +-
42
4 files changed, 90 insertions(+), 89 deletions(-)
43
44
diff --git a/include/block/throttle-groups.h b/include/block/throttle-groups.h
45
index XXXXXXX..XXXXXXX 100644
46
--- a/include/block/throttle-groups.h
47
+++ b/include/block/throttle-groups.h
48
@@ -XXX,XX +XXX,XX @@ typedef struct ThrottleGroupMember {
49
AioContext *aio_context;
50
/* throttled_reqs_lock protects the CoQueues for throttled requests. */
51
CoMutex throttled_reqs_lock;
52
- CoQueue throttled_reqs[2];
53
+ CoQueue throttled_reqs[THROTTLE_MAX];
54
55
/* Nonzero if the I/O limits are currently being ignored; generally
56
* it is zero. Accessed with atomic operations.
57
@@ -XXX,XX +XXX,XX @@ typedef struct ThrottleGroupMember {
58
* throttle_state tells us if I/O limits are configured. */
59
ThrottleState *throttle_state;
60
ThrottleTimers throttle_timers;
61
- unsigned pending_reqs[2];
62
+ unsigned pending_reqs[THROTTLE_MAX];
63
QLIST_ENTRY(ThrottleGroupMember) round_robin;
64
65
} ThrottleGroupMember;
66
@@ -XXX,XX +XXX,XX @@ void throttle_group_restart_tgm(ThrottleGroupMember *tgm);
67
68
void coroutine_fn throttle_group_co_io_limits_intercept(ThrottleGroupMember *tgm,
69
int64_t bytes,
70
- bool is_write);
71
+ ThrottleDirection direction);
72
void throttle_group_attach_aio_context(ThrottleGroupMember *tgm,
73
AioContext *new_context);
74
void throttle_group_detach_aio_context(ThrottleGroupMember *tgm);
75
diff --git a/block/block-backend.c b/block/block-backend.c
76
index XXXXXXX..XXXXXXX 100644
77
--- a/block/block-backend.c
78
+++ b/block/block-backend.c
79
@@ -XXX,XX +XXX,XX @@ blk_co_do_preadv_part(BlockBackend *blk, int64_t offset, int64_t bytes,
80
/* throttling disk I/O */
81
if (blk->public.throttle_group_member.throttle_state) {
82
throttle_group_co_io_limits_intercept(&blk->public.throttle_group_member,
83
- bytes, false);
84
+ bytes, THROTTLE_READ);
85
}
86
87
ret = bdrv_co_preadv_part(blk->root, offset, bytes, qiov, qiov_offset,
88
@@ -XXX,XX +XXX,XX @@ blk_co_do_pwritev_part(BlockBackend *blk, int64_t offset, int64_t bytes,
89
/* throttling disk I/O */
90
if (blk->public.throttle_group_member.throttle_state) {
91
throttle_group_co_io_limits_intercept(&blk->public.throttle_group_member,
92
- bytes, true);
93
+ bytes, THROTTLE_WRITE);
94
}
95
96
if (!blk->enable_write_cache) {
97
diff --git a/block/throttle-groups.c b/block/throttle-groups.c
98
index XXXXXXX..XXXXXXX 100644
99
--- a/block/throttle-groups.c
100
+++ b/block/throttle-groups.c
101
@@ -XXX,XX +XXX,XX @@
102
103
static void throttle_group_obj_init(Object *obj);
104
static void throttle_group_obj_complete(UserCreatable *obj, Error **errp);
105
-static void timer_cb(ThrottleGroupMember *tgm, bool is_write);
106
+static void timer_cb(ThrottleGroupMember *tgm, ThrottleDirection direction);
107
108
/* The ThrottleGroup structure (with its ThrottleState) is shared
109
* among different ThrottleGroupMembers and it's independent from
110
@@ -XXX,XX +XXX,XX @@ struct ThrottleGroup {
111
QemuMutex lock; /* This lock protects the following four fields */
112
ThrottleState ts;
113
QLIST_HEAD(, ThrottleGroupMember) head;
114
- ThrottleGroupMember *tokens[2];
115
- bool any_timer_armed[2];
116
+ ThrottleGroupMember *tokens[THROTTLE_MAX];
117
+ bool any_timer_armed[THROTTLE_MAX];
118
QEMUClockType clock_type;
119
120
/* This field is protected by the global QEMU mutex */
121
@@ -XXX,XX +XXX,XX @@ static ThrottleGroupMember *throttle_group_next_tgm(ThrottleGroupMember *tgm)
122
* This assumes that tg->lock is held.
123
*
124
* @tgm: the ThrottleGroupMember
125
- * @is_write: the type of operation (read/write)
126
+ * @direction: the ThrottleDirection
127
* @ret: whether the ThrottleGroupMember has pending requests.
128
*/
129
static inline bool tgm_has_pending_reqs(ThrottleGroupMember *tgm,
130
- bool is_write)
131
+ ThrottleDirection direction)
132
{
133
- return tgm->pending_reqs[is_write];
134
+ return tgm->pending_reqs[direction];
135
}
136
137
/* Return the next ThrottleGroupMember in the round-robin sequence with pending
138
@@ -XXX,XX +XXX,XX @@ static inline bool tgm_has_pending_reqs(ThrottleGroupMember *tgm,
139
* This assumes that tg->lock is held.
140
*
141
* @tgm: the current ThrottleGroupMember
142
- * @is_write: the type of operation (read/write)
143
+ * @direction: the ThrottleDirection
144
* @ret: the next ThrottleGroupMember with pending requests, or tgm if
145
* there is none.
146
*/
147
static ThrottleGroupMember *next_throttle_token(ThrottleGroupMember *tgm,
148
- bool is_write)
149
+ ThrottleDirection direction)
150
{
151
ThrottleState *ts = tgm->throttle_state;
152
ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
153
@@ -XXX,XX +XXX,XX @@ static ThrottleGroupMember *next_throttle_token(ThrottleGroupMember *tgm,
154
* it's being drained. Skip the round-robin search and return tgm
155
* immediately if it has pending requests. Otherwise we could be
156
* forcing it to wait for other member's throttled requests. */
157
- if (tgm_has_pending_reqs(tgm, is_write) &&
158
+ if (tgm_has_pending_reqs(tgm, direction) &&
159
qatomic_read(&tgm->io_limits_disabled)) {
160
return tgm;
161
}
162
163
- start = token = tg->tokens[is_write];
164
+ start = token = tg->tokens[direction];
165
166
/* get next bs round in round robin style */
167
token = throttle_group_next_tgm(token);
168
- while (token != start && !tgm_has_pending_reqs(token, is_write)) {
169
+ while (token != start && !tgm_has_pending_reqs(token, direction)) {
170
token = throttle_group_next_tgm(token);
171
}
172
173
@@ -XXX,XX +XXX,XX @@ static ThrottleGroupMember *next_throttle_token(ThrottleGroupMember *tgm,
174
* then decide the token is the current tgm because chances are
175
* the current tgm got the current request queued.
176
*/
177
- if (token == start && !tgm_has_pending_reqs(token, is_write)) {
178
+ if (token == start && !tgm_has_pending_reqs(token, direction)) {
179
token = tgm;
180
}
181
182
/* Either we return the original TGM, or one with pending requests */
183
- assert(token == tgm || tgm_has_pending_reqs(token, is_write));
184
+ assert(token == tgm || tgm_has_pending_reqs(token, direction));
185
186
return token;
187
}
188
@@ -XXX,XX +XXX,XX @@ static ThrottleGroupMember *next_throttle_token(ThrottleGroupMember *tgm,
189
* This assumes that tg->lock is held.
190
*
191
* @tgm: the current ThrottleGroupMember
192
- * @is_write: the type of operation (read/write)
193
+ * @direction: the ThrottleDirection
194
* @ret: whether the I/O request needs to be throttled or not
195
*/
196
static bool throttle_group_schedule_timer(ThrottleGroupMember *tgm,
197
- bool is_write)
198
+ ThrottleDirection direction)
199
{
200
ThrottleState *ts = tgm->throttle_state;
201
ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
202
ThrottleTimers *tt = &tgm->throttle_timers;
203
- ThrottleDirection direction = is_write ? THROTTLE_WRITE : THROTTLE_READ;
204
bool must_wait;
205
206
if (qatomic_read(&tgm->io_limits_disabled)) {
207
@@ -XXX,XX +XXX,XX @@ static bool throttle_group_schedule_timer(ThrottleGroupMember *tgm,
208
}
209
210
/* Check if any of the timers in this group is already armed */
211
- if (tg->any_timer_armed[is_write]) {
212
+ if (tg->any_timer_armed[direction]) {
213
return true;
214
}
215
216
@@ -XXX,XX +XXX,XX @@ static bool throttle_group_schedule_timer(ThrottleGroupMember *tgm,
217
218
/* If a timer just got armed, set tgm as the current token */
219
if (must_wait) {
220
- tg->tokens[is_write] = tgm;
221
- tg->any_timer_armed[is_write] = true;
222
+ tg->tokens[direction] = tgm;
223
+ tg->any_timer_armed[direction] = true;
224
}
225
226
return must_wait;
227
@@ -XXX,XX +XXX,XX @@ static bool throttle_group_schedule_timer(ThrottleGroupMember *tgm,
228
* any request was actually pending.
229
*
230
* @tgm: the current ThrottleGroupMember
231
- * @is_write: the type of operation (read/write)
232
+ * @direction: the ThrottleDirection
233
*/
234
static bool coroutine_fn throttle_group_co_restart_queue(ThrottleGroupMember *tgm,
235
- bool is_write)
236
+ ThrottleDirection direction)
237
{
238
bool ret;
239
240
qemu_co_mutex_lock(&tgm->throttled_reqs_lock);
241
- ret = qemu_co_queue_next(&tgm->throttled_reqs[is_write]);
242
+ ret = qemu_co_queue_next(&tgm->throttled_reqs[direction]);
243
qemu_co_mutex_unlock(&tgm->throttled_reqs_lock);
244
245
return ret;
246
@@ -XXX,XX +XXX,XX @@ static bool coroutine_fn throttle_group_co_restart_queue(ThrottleGroupMember *tg
247
* This assumes that tg->lock is held.
248
*
249
* @tgm: the current ThrottleGroupMember
250
- * @is_write: the type of operation (read/write)
251
+ * @direction: the ThrottleDirection
252
*/
253
-static void schedule_next_request(ThrottleGroupMember *tgm, bool is_write)
254
+static void schedule_next_request(ThrottleGroupMember *tgm,
255
+ ThrottleDirection direction)
256
{
257
ThrottleState *ts = tgm->throttle_state;
258
ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
259
@@ -XXX,XX +XXX,XX @@ static void schedule_next_request(ThrottleGroupMember *tgm, bool is_write)
260
ThrottleGroupMember *token;
261
262
/* Check if there's any pending request to schedule next */
263
- token = next_throttle_token(tgm, is_write);
264
- if (!tgm_has_pending_reqs(token, is_write)) {
265
+ token = next_throttle_token(tgm, direction);
266
+ if (!tgm_has_pending_reqs(token, direction)) {
267
return;
268
}
269
270
/* Set a timer for the request if it needs to be throttled */
271
- must_wait = throttle_group_schedule_timer(token, is_write);
272
+ must_wait = throttle_group_schedule_timer(token, direction);
273
274
/* If it doesn't have to wait, queue it for immediate execution */
275
if (!must_wait) {
276
/* Give preference to requests from the current tgm */
277
if (qemu_in_coroutine() &&
278
- throttle_group_co_restart_queue(tgm, is_write)) {
279
+ throttle_group_co_restart_queue(tgm, direction)) {
280
token = tgm;
281
} else {
282
ThrottleTimers *tt = &token->throttle_timers;
283
int64_t now = qemu_clock_get_ns(tg->clock_type);
284
- timer_mod(tt->timers[is_write], now);
285
- tg->any_timer_armed[is_write] = true;
286
+ timer_mod(tt->timers[direction], now);
287
+ tg->any_timer_armed[direction] = true;
288
}
289
- tg->tokens[is_write] = token;
290
+ tg->tokens[direction] = token;
291
}
292
}
293
294
@@ -XXX,XX +XXX,XX @@ static void schedule_next_request(ThrottleGroupMember *tgm, bool is_write)
295
*
296
* @tgm: the current ThrottleGroupMember
297
* @bytes: the number of bytes for this I/O
298
- * @is_write: the type of operation (read/write)
299
+ * @direction: the ThrottleDirection
300
*/
301
void coroutine_fn throttle_group_co_io_limits_intercept(ThrottleGroupMember *tgm,
302
int64_t bytes,
303
- bool is_write)
304
+ ThrottleDirection direction)
305
{
306
bool must_wait;
307
ThrottleGroupMember *token;
308
ThrottleGroup *tg = container_of(tgm->throttle_state, ThrottleGroup, ts);
309
- ThrottleDirection direction = is_write ? THROTTLE_WRITE : THROTTLE_READ;
310
311
assert(bytes >= 0);
312
+ assert(direction < THROTTLE_MAX);
313
314
qemu_mutex_lock(&tg->lock);
315
316
/* First we check if this I/O has to be throttled. */
317
- token = next_throttle_token(tgm, is_write);
318
- must_wait = throttle_group_schedule_timer(token, is_write);
319
+ token = next_throttle_token(tgm, direction);
320
+ must_wait = throttle_group_schedule_timer(token, direction);
321
322
/* Wait if there's a timer set or queued requests of this type */
323
- if (must_wait || tgm->pending_reqs[is_write]) {
324
- tgm->pending_reqs[is_write]++;
325
+ if (must_wait || tgm->pending_reqs[direction]) {
326
+ tgm->pending_reqs[direction]++;
327
qemu_mutex_unlock(&tg->lock);
328
qemu_co_mutex_lock(&tgm->throttled_reqs_lock);
329
- qemu_co_queue_wait(&tgm->throttled_reqs[is_write],
330
+ qemu_co_queue_wait(&tgm->throttled_reqs[direction],
331
&tgm->throttled_reqs_lock);
332
qemu_co_mutex_unlock(&tgm->throttled_reqs_lock);
333
qemu_mutex_lock(&tg->lock);
334
- tgm->pending_reqs[is_write]--;
335
+ tgm->pending_reqs[direction]--;
336
}
337
338
/* The I/O will be executed, so do the accounting */
339
throttle_account(tgm->throttle_state, direction, bytes);
340
341
/* Schedule the next request */
342
- schedule_next_request(tgm, is_write);
343
+ schedule_next_request(tgm, direction);
344
345
qemu_mutex_unlock(&tg->lock);
346
}
347
348
typedef struct {
349
ThrottleGroupMember *tgm;
350
- bool is_write;
351
+ ThrottleDirection direction;
352
} RestartData;
353
354
static void coroutine_fn throttle_group_restart_queue_entry(void *opaque)
355
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn throttle_group_restart_queue_entry(void *opaque)
356
ThrottleGroupMember *tgm = data->tgm;
357
ThrottleState *ts = tgm->throttle_state;
358
ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
359
- bool is_write = data->is_write;
360
+ ThrottleDirection direction = data->direction;
361
bool empty_queue;
362
363
- empty_queue = !throttle_group_co_restart_queue(tgm, is_write);
364
+ empty_queue = !throttle_group_co_restart_queue(tgm, direction);
365
366
/* If the request queue was empty then we have to take care of
367
* scheduling the next one */
368
if (empty_queue) {
369
qemu_mutex_lock(&tg->lock);
370
- schedule_next_request(tgm, is_write);
371
+ schedule_next_request(tgm, direction);
372
qemu_mutex_unlock(&tg->lock);
373
}
374
375
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn throttle_group_restart_queue_entry(void *opaque)
376
aio_wait_kick();
377
}
378
379
-static void throttle_group_restart_queue(ThrottleGroupMember *tgm, bool is_write)
380
+static void throttle_group_restart_queue(ThrottleGroupMember *tgm,
381
+ ThrottleDirection direction)
382
{
383
Coroutine *co;
384
RestartData *rd = g_new0(RestartData, 1);
385
386
rd->tgm = tgm;
387
- rd->is_write = is_write;
388
+ rd->direction = direction;
389
390
/* This function is called when a timer is fired or when
391
* throttle_group_restart_tgm() is called. Either way, there can
392
* be no timer pending on this tgm at this point */
393
- assert(!timer_pending(tgm->throttle_timers.timers[is_write]));
394
+ assert(!timer_pending(tgm->throttle_timers.timers[direction]));
395
396
qatomic_inc(&tgm->restart_pending);
397
398
@@ -XXX,XX +XXX,XX @@ static void throttle_group_restart_queue(ThrottleGroupMember *tgm, bool is_write
399
400
void throttle_group_restart_tgm(ThrottleGroupMember *tgm)
401
{
402
- int i;
403
+ ThrottleDirection dir;
404
405
if (tgm->throttle_state) {
406
- for (i = 0; i < 2; i++) {
407
- QEMUTimer *t = tgm->throttle_timers.timers[i];
408
+ for (dir = THROTTLE_READ; dir < THROTTLE_MAX; dir++) {
409
+ QEMUTimer *t = tgm->throttle_timers.timers[dir];
410
if (timer_pending(t)) {
411
/* If there's a pending timer on this tgm, fire it now */
412
timer_del(t);
413
- timer_cb(tgm, i);
414
+ timer_cb(tgm, dir);
415
} else {
416
/* Else run the next request from the queue manually */
417
- throttle_group_restart_queue(tgm, i);
418
+ throttle_group_restart_queue(tgm, dir);
419
}
420
}
421
}
422
@@ -XXX,XX +XXX,XX @@ void throttle_group_get_config(ThrottleGroupMember *tgm, ThrottleConfig *cfg)
423
* because it had been throttled.
424
*
425
* @tgm: the ThrottleGroupMember whose request had been throttled
426
- * @is_write: the type of operation (read/write)
427
+ * @direction: the ThrottleDirection
428
*/
429
-static void timer_cb(ThrottleGroupMember *tgm, bool is_write)
430
+static void timer_cb(ThrottleGroupMember *tgm, ThrottleDirection direction)
431
{
432
ThrottleState *ts = tgm->throttle_state;
433
ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
434
435
/* The timer has just been fired, so we can update the flag */
436
qemu_mutex_lock(&tg->lock);
437
- tg->any_timer_armed[is_write] = false;
438
+ tg->any_timer_armed[direction] = false;
439
qemu_mutex_unlock(&tg->lock);
440
441
/* Run the request that was waiting for this timer */
442
- throttle_group_restart_queue(tgm, is_write);
443
+ throttle_group_restart_queue(tgm, direction);
444
}
445
446
static void read_timer_cb(void *opaque)
447
{
448
- timer_cb(opaque, false);
449
+ timer_cb(opaque, THROTTLE_READ);
450
}
451
452
static void write_timer_cb(void *opaque)
453
{
454
- timer_cb(opaque, true);
455
+ timer_cb(opaque, THROTTLE_WRITE);
456
}
457
458
/* Register a ThrottleGroupMember from the throttling group, also initializing
459
@@ -XXX,XX +XXX,XX @@ void throttle_group_register_tgm(ThrottleGroupMember *tgm,
460
const char *groupname,
461
AioContext *ctx)
462
{
463
- int i;
464
+ ThrottleDirection dir;
465
ThrottleState *ts = throttle_group_incref(groupname);
466
ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
467
468
@@ -XXX,XX +XXX,XX @@ void throttle_group_register_tgm(ThrottleGroupMember *tgm,
469
470
QEMU_LOCK_GUARD(&tg->lock);
471
/* If the ThrottleGroup is new set this ThrottleGroupMember as the token */
472
- for (i = 0; i < 2; i++) {
473
- if (!tg->tokens[i]) {
474
- tg->tokens[i] = tgm;
475
+ for (dir = THROTTLE_READ; dir < THROTTLE_MAX; dir++) {
476
+ if (!tg->tokens[dir]) {
477
+ tg->tokens[dir] = tgm;
478
}
479
+ qemu_co_queue_init(&tgm->throttled_reqs[dir]);
480
}
481
482
QLIST_INSERT_HEAD(&tg->head, tgm, round_robin);
483
@@ -XXX,XX +XXX,XX @@ void throttle_group_register_tgm(ThrottleGroupMember *tgm,
484
write_timer_cb,
485
tgm);
486
qemu_co_mutex_init(&tgm->throttled_reqs_lock);
487
- qemu_co_queue_init(&tgm->throttled_reqs[0]);
488
- qemu_co_queue_init(&tgm->throttled_reqs[1]);
489
}
490
491
/* Unregister a ThrottleGroupMember from its group, removing it from the list,
492
@@ -XXX,XX +XXX,XX @@ void throttle_group_unregister_tgm(ThrottleGroupMember *tgm)
493
ThrottleState *ts = tgm->throttle_state;
494
ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
495
ThrottleGroupMember *token;
496
- int i;
497
+ ThrottleDirection dir;
498
499
if (!ts) {
500
/* Discard already unregistered tgm */
501
@@ -XXX,XX +XXX,XX @@ void throttle_group_unregister_tgm(ThrottleGroupMember *tgm)
502
AIO_WAIT_WHILE(tgm->aio_context, qatomic_read(&tgm->restart_pending) > 0);
503
504
WITH_QEMU_LOCK_GUARD(&tg->lock) {
505
- for (i = 0; i < 2; i++) {
506
- assert(tgm->pending_reqs[i] == 0);
507
- assert(qemu_co_queue_empty(&tgm->throttled_reqs[i]));
508
- assert(!timer_pending(tgm->throttle_timers.timers[i]));
509
- if (tg->tokens[i] == tgm) {
510
+ for (dir = THROTTLE_READ; dir < THROTTLE_MAX; dir++) {
511
+ assert(tgm->pending_reqs[dir] == 0);
512
+ assert(qemu_co_queue_empty(&tgm->throttled_reqs[dir]));
513
+ assert(!timer_pending(tgm->throttle_timers.timers[dir]));
514
+ if (tg->tokens[dir] == tgm) {
515
token = throttle_group_next_tgm(tgm);
516
/* Take care of the case where this is the last tgm in the group */
517
if (token == tgm) {
518
token = NULL;
519
}
520
- tg->tokens[i] = token;
521
+ tg->tokens[dir] = token;
522
}
523
}
524
525
@@ -XXX,XX +XXX,XX @@ void throttle_group_detach_aio_context(ThrottleGroupMember *tgm)
526
{
527
ThrottleGroup *tg = container_of(tgm->throttle_state, ThrottleGroup, ts);
528
ThrottleTimers *tt = &tgm->throttle_timers;
529
- int i;
530
+ ThrottleDirection dir;
531
532
/* Requests must have been drained */
533
- assert(tgm->pending_reqs[0] == 0 && tgm->pending_reqs[1] == 0);
534
- assert(qemu_co_queue_empty(&tgm->throttled_reqs[0]));
535
- assert(qemu_co_queue_empty(&tgm->throttled_reqs[1]));
536
+ for (dir = THROTTLE_READ; dir < THROTTLE_MAX; dir++) {
537
+ assert(tgm->pending_reqs[dir] == 0);
538
+ assert(qemu_co_queue_empty(&tgm->throttled_reqs[dir]));
539
+ }
540
541
/* Kick off next ThrottleGroupMember, if necessary */
542
WITH_QEMU_LOCK_GUARD(&tg->lock) {
543
- for (i = 0; i < 2; i++) {
544
- if (timer_pending(tt->timers[i])) {
545
- tg->any_timer_armed[i] = false;
546
- schedule_next_request(tgm, i);
547
+ for (dir = THROTTLE_READ; dir < THROTTLE_MAX; dir++) {
548
+ if (timer_pending(tt->timers[dir])) {
549
+ tg->any_timer_armed[dir] = false;
550
+ schedule_next_request(tgm, dir);
551
}
552
}
553
}
554
diff --git a/block/throttle.c b/block/throttle.c
555
index XXXXXXX..XXXXXXX 100644
556
--- a/block/throttle.c
557
+++ b/block/throttle.c
558
@@ -XXX,XX +XXX,XX @@ throttle_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
559
{
560
561
ThrottleGroupMember *tgm = bs->opaque;
562
- throttle_group_co_io_limits_intercept(tgm, bytes, false);
563
+ throttle_group_co_io_limits_intercept(tgm, bytes, THROTTLE_READ);
564
565
return bdrv_co_preadv(bs->file, offset, bytes, qiov, flags);
566
}
567
@@ -XXX,XX +XXX,XX @@ throttle_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
568
QEMUIOVector *qiov, BdrvRequestFlags flags)
569
{
570
ThrottleGroupMember *tgm = bs->opaque;
571
- throttle_group_co_io_limits_intercept(tgm, bytes, true);
572
+ throttle_group_co_io_limits_intercept(tgm, bytes, THROTTLE_WRITE);
573
574
return bdrv_co_pwritev(bs->file, offset, bytes, qiov, flags);
575
}
576
@@ -XXX,XX +XXX,XX @@ throttle_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes,
577
BdrvRequestFlags flags)
578
{
579
ThrottleGroupMember *tgm = bs->opaque;
580
- throttle_group_co_io_limits_intercept(tgm, bytes, true);
581
+ throttle_group_co_io_limits_intercept(tgm, bytes, THROTTLE_WRITE);
582
583
return bdrv_co_pwrite_zeroes(bs->file, offset, bytes, flags);
584
}
585
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn GRAPH_RDLOCK
586
throttle_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
587
{
588
ThrottleGroupMember *tgm = bs->opaque;
589
- throttle_group_co_io_limits_intercept(tgm, bytes, true);
590
+ throttle_group_co_io_limits_intercept(tgm, bytes, THROTTLE_WRITE);
591
592
return bdrv_co_pdiscard(bs->file, offset, bytes);
593
}
594
--
595
2.41.0
diff view generated by jsdifflib
New patch
1
bs->bl.zoned is what indicates whether the zone information is present
2
and valid; it is the only thing that raw_refresh_zoned_limits() sets if
3
CONFIG_BLKZONED is not defined, and it is also the only thing that it
4
sets if CONFIG_BLKZONED is defined, but there are no zones.
1
5
6
Make sure that it is always set to BLK_Z_NONE if there is an error
7
anywhere in raw_refresh_zoned_limits() so that we do not accidentally
8
announce zones while our information is incomplete or invalid.
9
10
This also fixes a memory leak in the last error path in
11
raw_refresh_zoned_limits().
12
13
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
14
Message-Id: <20230824155345.109765-2-hreitz@redhat.com>
15
Reviewed-by: Sam Li <faithilikerun@gmail.com>
16
---
17
block/file-posix.c | 21 ++++++++++++---------
18
1 file changed, 12 insertions(+), 9 deletions(-)
19
20
diff --git a/block/file-posix.c b/block/file-posix.c
21
index XXXXXXX..XXXXXXX 100644
22
--- a/block/file-posix.c
23
+++ b/block/file-posix.c
24
@@ -XXX,XX +XXX,XX @@ static void raw_refresh_zoned_limits(BlockDriverState *bs, struct stat *st,
25
BlockZoneModel zoned;
26
int ret;
27
28
- bs->bl.zoned = BLK_Z_NONE;
29
-
30
ret = get_sysfs_zoned_model(st, &zoned);
31
if (ret < 0 || zoned == BLK_Z_NONE) {
32
- return;
33
+ goto no_zoned;
34
}
35
bs->bl.zoned = zoned;
36
37
@@ -XXX,XX +XXX,XX @@ static void raw_refresh_zoned_limits(BlockDriverState *bs, struct stat *st,
38
if (ret < 0) {
39
error_setg_errno(errp, -ret, "Unable to read chunk_sectors "
40
"sysfs attribute");
41
- return;
42
+ goto no_zoned;
43
} else if (!ret) {
44
error_setg(errp, "Read 0 from chunk_sectors sysfs attribute");
45
- return;
46
+ goto no_zoned;
47
}
48
bs->bl.zone_size = ret << BDRV_SECTOR_BITS;
49
50
@@ -XXX,XX +XXX,XX @@ static void raw_refresh_zoned_limits(BlockDriverState *bs, struct stat *st,
51
if (ret < 0) {
52
error_setg_errno(errp, -ret, "Unable to read nr_zones "
53
"sysfs attribute");
54
- return;
55
+ goto no_zoned;
56
} else if (!ret) {
57
error_setg(errp, "Read 0 from nr_zones sysfs attribute");
58
- return;
59
+ goto no_zoned;
60
}
61
bs->bl.nr_zones = ret;
62
63
@@ -XXX,XX +XXX,XX @@ static void raw_refresh_zoned_limits(BlockDriverState *bs, struct stat *st,
64
ret = get_zones_wp(bs, s->fd, 0, bs->bl.nr_zones, 0);
65
if (ret < 0) {
66
error_setg_errno(errp, -ret, "report wps failed");
67
- bs->wps = NULL;
68
- return;
69
+ goto no_zoned;
70
}
71
qemu_co_mutex_init(&bs->wps->colock);
72
+ return;
73
+
74
+no_zoned:
75
+ bs->bl.zoned = BLK_Z_NONE;
76
+ g_free(bs->wps);
77
+ bs->wps = NULL;
78
}
79
#else /* !defined(CONFIG_BLKZONED) */
80
static void raw_refresh_zoned_limits(BlockDriverState *bs, struct stat *st,
81
--
82
2.41.0
diff view generated by jsdifflib
New patch
1
Instead of checking bs->wps or bs->bl.zone_size for whether zone
2
information is present, check bs->bl.zoned. That is the flag that
3
raw_refresh_zoned_limits() reliably sets to indicate zone support. If
4
it is set to something other than BLK_Z_NONE, other values and objects
5
like bs->wps and bs->bl.zone_size must be non-null/zero and valid; if it
6
is not, we cannot rely on their validity.
1
7
8
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
9
Message-Id: <20230824155345.109765-3-hreitz@redhat.com>
10
Reviewed-by: Sam Li <faithilikerun@gmail.com>
11
---
12
block/file-posix.c | 12 +++++++-----
13
1 file changed, 7 insertions(+), 5 deletions(-)
14
15
diff --git a/block/file-posix.c b/block/file-posix.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/block/file-posix.c
18
+++ b/block/file-posix.c
19
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset,
20
if (fd_open(bs) < 0)
21
return -EIO;
22
#if defined(CONFIG_BLKZONED)
23
- if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) && bs->wps) {
24
+ if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) &&
25
+ bs->bl.zoned != BLK_Z_NONE) {
26
qemu_co_mutex_lock(&bs->wps->colock);
27
- if (type & QEMU_AIO_ZONE_APPEND && bs->bl.zone_size) {
28
+ if (type & QEMU_AIO_ZONE_APPEND) {
29
int index = offset / bs->bl.zone_size;
30
offset = bs->wps->wp[index];
31
}
32
@@ -XXX,XX +XXX,XX @@ out:
33
{
34
BlockZoneWps *wps = bs->wps;
35
if (ret == 0) {
36
- if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND))
37
- && wps && bs->bl.zone_size) {
38
+ if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) &&
39
+ bs->bl.zoned != BLK_Z_NONE) {
40
uint64_t *wp = &wps->wp[offset / bs->bl.zone_size];
41
if (!BDRV_ZT_IS_CONV(*wp)) {
42
if (type & QEMU_AIO_ZONE_APPEND) {
43
@@ -XXX,XX +XXX,XX @@ out:
44
}
45
}
46
47
- if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) && wps) {
48
+ if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) &&
49
+ bs->blk.zoned != BLK_Z_NONE) {
50
qemu_co_mutex_unlock(&wps->colock);
51
}
52
}
53
--
54
2.41.0
diff view generated by jsdifflib
New patch
1
We must check that zone information is present before running
2
update_zones_wp().
1
3
4
Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=2234374
5
Fixes: Coverity CID 1512459
6
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
7
Message-Id: <20230824155345.109765-4-hreitz@redhat.com>
8
Reviewed-by: Sam Li <faithilikerun@gmail.com>
9
---
10
block/file-posix.c | 3 ++-
11
1 file changed, 2 insertions(+), 1 deletion(-)
12
13
diff --git a/block/file-posix.c b/block/file-posix.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/block/file-posix.c
16
+++ b/block/file-posix.c
17
@@ -XXX,XX +XXX,XX @@ out:
18
}
19
}
20
} else {
21
- if (type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) {
22
+ if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) &&
23
+ bs->bl.zoned != BLK_Z_NONE) {
24
update_zones_wp(bs, s->fd, 0, 1);
25
}
26
}
27
--
28
2.41.0
diff view generated by jsdifflib
New patch
1
We duplicate the same condition three times here, pull it out to the top
2
level.
1
3
4
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
5
Message-Id: <20230824155345.109765-5-hreitz@redhat.com>
6
Reviewed-by: Sam Li <faithilikerun@gmail.com>
7
---
8
block/file-posix.c | 18 +++++-------------
9
1 file changed, 5 insertions(+), 13 deletions(-)
10
11
diff --git a/block/file-posix.c b/block/file-posix.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/block/file-posix.c
14
+++ b/block/file-posix.c
15
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset,
16
17
out:
18
#if defined(CONFIG_BLKZONED)
19
-{
20
- BlockZoneWps *wps = bs->wps;
21
- if (ret == 0) {
22
- if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) &&
23
- bs->bl.zoned != BLK_Z_NONE) {
24
+ if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) &&
25
+ bs->bl.zoned != BLK_Z_NONE) {
26
+ BlockZoneWps *wps = bs->wps;
27
+ if (ret == 0) {
28
uint64_t *wp = &wps->wp[offset / bs->bl.zone_size];
29
if (!BDRV_ZT_IS_CONV(*wp)) {
30
if (type & QEMU_AIO_ZONE_APPEND) {
31
@@ -XXX,XX +XXX,XX @@ out:
32
*wp = offset + bytes;
33
}
34
}
35
- }
36
- } else {
37
- if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) &&
38
- bs->bl.zoned != BLK_Z_NONE) {
39
+ } else {
40
update_zones_wp(bs, s->fd, 0, 1);
41
}
42
- }
43
44
- if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) &&
45
- bs->blk.zoned != BLK_Z_NONE) {
46
qemu_co_mutex_unlock(&wps->colock);
47
}
48
-}
49
#endif
50
return ret;
51
}
52
--
53
2.41.0
diff view generated by jsdifflib
1
From: Nicolas Saenz Julienne <nsaenzju@redhat.com>
1
This is a regression test for
2
https://bugzilla.redhat.com/show_bug.cgi?id=2234374.
2
3
3
Introduce the 'event-loop-base' abstract class, it'll hold the
4
All this test needs to do is trigger an I/O error inside of file-posix
4
properties common to all event loops and provide the necessary hooks for
5
(specifically raw_co_prw()). One reliable way to do this without
5
their creation and maintenance. Then have iothread inherit from it.
6
requiring special privileges is to use a FUSE export, which allows us to
7
inject any error that we want, e.g. via blkdebug.
6
8
7
EventLoopBaseClass is defined as user creatable and provides a hook for
9
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
8
its children to attach themselves to the user creatable class 'complete'
10
Message-Id: <20230824155345.109765-6-hreitz@redhat.com>
9
function. It also provides an update_params() callback to propagate
11
[hreitz: Fixed test to be skipped when there is no FUSE support, to
10
property changes onto its children.
12
suppress fusermount's allow_other warning, and to be skipped
13
with $IMGOPTSSYNTAX enabled]
14
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
15
---
16
tests/qemu-iotests/tests/file-io-error | 119 +++++++++++++++++++++
17
tests/qemu-iotests/tests/file-io-error.out | 33 ++++++
18
2 files changed, 152 insertions(+)
19
create mode 100755 tests/qemu-iotests/tests/file-io-error
20
create mode 100644 tests/qemu-iotests/tests/file-io-error.out
11
21
12
The new 'event-loop-base' class will live in the root directory. It is
22
diff --git a/tests/qemu-iotests/tests/file-io-error b/tests/qemu-iotests/tests/file-io-error
13
built on its own using the 'link_whole' option (there are no direct
23
new file mode 100755
14
function dependencies between the class and its children, it all happens
24
index XXXXXXX..XXXXXXX
15
trough 'constructor' magic). And also imposes new compilation
25
--- /dev/null
16
dependencies:
26
+++ b/tests/qemu-iotests/tests/file-io-error
17
18
qom <- event-loop-base <- blockdev (iothread.c)
19
20
And in subsequent patches:
21
22
qom <- event-loop-base <- qemuutil (util/main-loop.c)
23
24
All this forced some amount of reordering in meson.build:
25
26
- Moved qom build definition before qemuutil. Doing it the other way
27
around (i.e. moving qemuutil after qom) isn't possible as a lot of
28
core libraries that live in between the two depend on it.
29
30
- Process the 'hw' subdir earlier, as it introduces files into the
31
'qom' source set.
32
33
No functional changes intended.
34
35
Signed-off-by: Nicolas Saenz Julienne <nsaenzju@redhat.com>
36
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
37
Acked-by: Markus Armbruster <armbru@redhat.com>
38
Message-id: 20220425075723.20019-2-nsaenzju@redhat.com
39
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
40
---
41
qapi/qom.json | 22 +++++--
42
meson.build | 23 ++++---
43
include/sysemu/event-loop-base.h | 36 +++++++++++
44
include/sysemu/iothread.h | 6 +-
45
event-loop-base.c | 104 +++++++++++++++++++++++++++++++
46
iothread.c | 65 ++++++-------------
47
6 files changed, 192 insertions(+), 64 deletions(-)
48
create mode 100644 include/sysemu/event-loop-base.h
49
create mode 100644 event-loop-base.c
50
51
diff --git a/qapi/qom.json b/qapi/qom.json
52
index XXXXXXX..XXXXXXX 100644
53
--- a/qapi/qom.json
54
+++ b/qapi/qom.json
55
@@ -XXX,XX +XXX,XX @@
27
@@ -XXX,XX +XXX,XX @@
56
'*repeat': 'bool',
28
+#!/usr/bin/env bash
57
'*grab-toggle': 'GrabToggleKeys' } }
29
+# group: rw
58
59
+##
60
+# @EventLoopBaseProperties:
61
+#
30
+#
62
+# Common properties for event loops
31
+# Produce an I/O error in file-posix, and hope that it is not catastrophic.
32
+# Regression test for: https://bugzilla.redhat.com/show_bug.cgi?id=2234374
63
+#
33
+#
64
+# @aio-max-batch: maximum number of requests in a batch for the AIO engine,
34
+# Copyright (C) 2023 Red Hat, Inc.
65
+# 0 means that the engine will use its default.
66
+# (default: 0)
67
+#
35
+#
68
+# Since: 7.1
36
+# This program is free software; you can redistribute it and/or modify
69
+##
37
+# it under the terms of the GNU General Public License as published by
70
+{ 'struct': 'EventLoopBaseProperties',
38
+# the Free Software Foundation; either version 2 of the License, or
71
+ 'data': { '*aio-max-batch': 'int' } }
39
+# (at your option) any later version.
40
+#
41
+# This program is distributed in the hope that it will be useful,
42
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
43
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
44
+# GNU General Public License for more details.
45
+#
46
+# You should have received a copy of the GNU General Public License
47
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
48
+#
72
+
49
+
73
##
50
+seq=$(basename "$0")
74
# @IothreadProperties:
51
+echo "QA output created by $seq"
75
#
76
@@ -XXX,XX +XXX,XX @@
77
# algorithm detects it is spending too long polling without
78
# encountering events. 0 selects a default behaviour (default: 0)
79
#
80
-# @aio-max-batch: maximum number of requests in a batch for the AIO engine,
81
-# 0 means that the engine will use its default
82
-# (default:0, since 6.1)
83
+# The @aio-max-batch option is available since 6.1.
84
#
85
# Since: 2.0
86
##
87
{ 'struct': 'IothreadProperties',
88
+ 'base': 'EventLoopBaseProperties',
89
'data': { '*poll-max-ns': 'int',
90
'*poll-grow': 'int',
91
- '*poll-shrink': 'int',
92
- '*aio-max-batch': 'int' } }
93
+ '*poll-shrink': 'int' } }
94
95
##
96
# @MemoryBackendProperties:
97
diff --git a/meson.build b/meson.build
98
index XXXXXXX..XXXXXXX 100644
99
--- a/meson.build
100
+++ b/meson.build
101
@@ -XXX,XX +XXX,XX @@ subdir('qom')
102
subdir('authz')
103
subdir('crypto')
104
subdir('ui')
105
+subdir('hw')
106
107
108
if enable_modules
109
@@ -XXX,XX +XXX,XX @@ if enable_modules
110
modulecommon = declare_dependency(link_whole: libmodulecommon, compile_args: '-DBUILD_DSO')
111
endif
112
113
+qom_ss = qom_ss.apply(config_host, strict: false)
114
+libqom = static_library('qom', qom_ss.sources() + genh,
115
+ dependencies: [qom_ss.dependencies()],
116
+ name_suffix: 'fa')
117
+qom = declare_dependency(link_whole: libqom)
118
+
52
+
119
+event_loop_base = files('event-loop-base.c')
53
+status=1    # failure is the default!
120
+event_loop_base = static_library('event-loop-base', sources: event_loop_base + genh,
121
+ build_by_default: true)
122
+event_loop_base = declare_dependency(link_whole: event_loop_base,
123
+ dependencies: [qom])
124
+
54
+
125
stub_ss = stub_ss.apply(config_all, strict: false)
55
+_cleanup()
126
56
+{
127
util_ss.add_all(trace_ss)
57
+ _cleanup_qemu
128
@@ -XXX,XX +XXX,XX @@ subdir('monitor')
58
+ rm -f "$TEST_DIR/fuse-export"
129
subdir('net')
59
+}
130
subdir('replay')
60
+trap "_cleanup; exit \$status" 0 1 2 3 15
131
subdir('semihosting')
61
+
132
-subdir('hw')
62
+# get standard environment, filters and checks
133
subdir('tcg')
63
+. ../common.rc
134
subdir('fpu')
64
+. ../common.filter
135
subdir('accel')
65
+. ../common.qemu
136
@@ -XXX,XX +XXX,XX @@ qemu_syms = custom_target('qemu.syms', output: 'qemu.syms',
66
+
137
capture: true,
67
+# Format-agnostic (we do not use any), but we do test the file protocol
138
command: [undefsym, nm, '@INPUT@'])
68
+_supported_proto file
139
69
+_require_drivers blkdebug null-co
140
-qom_ss = qom_ss.apply(config_host, strict: false)
70
+
141
-libqom = static_library('qom', qom_ss.sources() + genh,
71
+if [ "$IMGOPTSSYNTAX" = "true" ]; then
142
- dependencies: [qom_ss.dependencies()],
72
+ # We need `$QEMU_IO -f file` to work; IMGOPTSSYNTAX uses --image-opts,
143
- name_suffix: 'fa')
73
+ # breaking -f.
144
-
74
+ _unsupported_fmt $IMGFMT
145
-qom = declare_dependency(link_whole: libqom)
75
+fi
146
-
76
+
147
authz_ss = authz_ss.apply(config_host, strict: false)
77
+# This is a regression test of a bug in which flie-posix would access zone
148
libauthz = static_library('authz', authz_ss.sources() + genh,
78
+# information in case of an I/O error even when there is no zone information,
149
dependencies: [authz_ss.dependencies()],
79
+# resulting in a division by zero.
150
@@ -XXX,XX +XXX,XX @@ libblockdev = static_library('blockdev', blockdev_ss.sources() + genh,
80
+# To reproduce the problem, we need to trigger an I/O error inside of
151
build_by_default: false)
81
+# file-posix, which can be done (rootless) by providing a FUSE export that
152
82
+# presents only errors when accessed.
153
blockdev = declare_dependency(link_whole: [libblockdev],
83
+
154
- dependencies: [block])
84
+_launch_qemu
155
+ dependencies: [block, event_loop_base])
85
+_send_qemu_cmd $QEMU_HANDLE \
156
86
+ "{'execute': 'qmp_capabilities'}" \
157
qmp_ss = qmp_ss.apply(config_host, strict: false)
87
+ 'return'
158
libqmp = static_library('qmp', qmp_ss.sources() + genh,
88
+
159
diff --git a/include/sysemu/event-loop-base.h b/include/sysemu/event-loop-base.h
89
+_send_qemu_cmd $QEMU_HANDLE \
90
+ "{'execute': 'blockdev-add',
91
+ 'arguments': {
92
+ 'driver': 'blkdebug',
93
+ 'node-name': 'node0',
94
+ 'inject-error': [{'event': 'none'}],
95
+ 'image': {
96
+ 'driver': 'null-co'
97
+ }
98
+ }}" \
99
+ 'return'
100
+
101
+# FUSE mountpoint must exist and be a regular file
102
+touch "$TEST_DIR/fuse-export"
103
+
104
+# The grep -v to filter fusermount's (benign) error when /etc/fuse.conf does
105
+# not contain user_allow_other and the subsequent check for missing FUSE support
106
+# have both been taken from iotest 308.
107
+output=$(_send_qemu_cmd $QEMU_HANDLE \
108
+ "{'execute': 'block-export-add',
109
+ 'arguments': {
110
+ 'id': 'exp0',
111
+ 'type': 'fuse',
112
+ 'node-name': 'node0',
113
+ 'mountpoint': '$TEST_DIR/fuse-export',
114
+ 'writable': true
115
+ }}" \
116
+ 'return' \
117
+ | grep -v 'option allow_other only allowed if')
118
+
119
+if echo "$output" | grep -q "Parameter 'type' does not accept value 'fuse'"; then
120
+ _notrun 'No FUSE support'
121
+fi
122
+echo "$output"
123
+
124
+echo
125
+# This should fail, but gracefully, i.e. just print an I/O error, not crash.
126
+$QEMU_IO -f file -c 'write 0 64M' "$TEST_DIR/fuse-export" | _filter_qemu_io
127
+echo
128
+
129
+_send_qemu_cmd $QEMU_HANDLE \
130
+ "{'execute': 'block-export-del',
131
+ 'arguments': {'id': 'exp0'}}" \
132
+ 'return'
133
+
134
+_send_qemu_cmd $QEMU_HANDLE \
135
+ '' \
136
+ 'BLOCK_EXPORT_DELETED'
137
+
138
+_send_qemu_cmd $QEMU_HANDLE \
139
+ "{'execute': 'blockdev-del',
140
+ 'arguments': {'node-name': 'node0'}}" \
141
+ 'return'
142
+
143
+# success, all done
144
+echo "*** done"
145
+rm -f $seq.full
146
+status=0
147
diff --git a/tests/qemu-iotests/tests/file-io-error.out b/tests/qemu-iotests/tests/file-io-error.out
160
new file mode 100644
148
new file mode 100644
161
index XXXXXXX..XXXXXXX
149
index XXXXXXX..XXXXXXX
162
--- /dev/null
150
--- /dev/null
163
+++ b/include/sysemu/event-loop-base.h
151
+++ b/tests/qemu-iotests/tests/file-io-error.out
164
@@ -XXX,XX +XXX,XX @@
152
@@ -XXX,XX +XXX,XX @@
165
+/*
153
+QA output created by file-io-error
166
+ * QEMU event-loop backend
154
+{'execute': 'qmp_capabilities'}
167
+ *
155
+{"return": {}}
168
+ * Copyright (C) 2022 Red Hat Inc
156
+{'execute': 'blockdev-add',
169
+ *
157
+ 'arguments': {
170
+ * Authors:
158
+ 'driver': 'blkdebug',
171
+ * Nicolas Saenz Julienne <nsaenzju@redhat.com>
159
+ 'node-name': 'node0',
172
+ *
160
+ 'inject-error': [{'event': 'none'}],
173
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
161
+ 'image': {
174
+ * See the COPYING file in the top-level directory.
162
+ 'driver': 'null-co'
175
+ */
163
+ }
176
+#ifndef QEMU_EVENT_LOOP_BASE_H
164
+ }}
177
+#define QEMU_EVENT_LOOP_BASE_H
165
+{"return": {}}
166
+{'execute': 'block-export-add',
167
+ 'arguments': {
168
+ 'id': 'exp0',
169
+ 'type': 'fuse',
170
+ 'node-name': 'node0',
171
+ 'mountpoint': 'TEST_DIR/fuse-export',
172
+ 'writable': true
173
+ }}
174
+{"return": {}}
178
+
175
+
179
+#include "qom/object.h"
176
+write failed: Input/output error
180
+#include "block/aio.h"
181
+#include "qemu/typedefs.h"
182
+
177
+
183
+#define TYPE_EVENT_LOOP_BASE "event-loop-base"
178
+{'execute': 'block-export-del',
184
+OBJECT_DECLARE_TYPE(EventLoopBase, EventLoopBaseClass,
179
+ 'arguments': {'id': 'exp0'}}
185
+ EVENT_LOOP_BASE)
180
+{"return": {}}
186
+
181
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_EXPORT_DELETED", "data": {"id": "exp0"}}
187
+struct EventLoopBaseClass {
182
+{'execute': 'blockdev-del',
188
+ ObjectClass parent_class;
183
+ 'arguments': {'node-name': 'node0'}}
189
+
184
+{"return": {}}
190
+ void (*init)(EventLoopBase *base, Error **errp);
185
+*** done
191
+ void (*update_params)(EventLoopBase *base, Error **errp);
192
+};
193
+
194
+struct EventLoopBase {
195
+ Object parent;
196
+
197
+ /* AioContext AIO engine parameters */
198
+ int64_t aio_max_batch;
199
+};
200
+#endif
201
diff --git a/include/sysemu/iothread.h b/include/sysemu/iothread.h
202
index XXXXXXX..XXXXXXX 100644
203
--- a/include/sysemu/iothread.h
204
+++ b/include/sysemu/iothread.h
205
@@ -XXX,XX +XXX,XX @@
206
#include "block/aio.h"
207
#include "qemu/thread.h"
208
#include "qom/object.h"
209
+#include "sysemu/event-loop-base.h"
210
211
#define TYPE_IOTHREAD "iothread"
212
213
struct IOThread {
214
- Object parent_obj;
215
+ EventLoopBase parent_obj;
216
217
QemuThread thread;
218
AioContext *ctx;
219
@@ -XXX,XX +XXX,XX @@ struct IOThread {
220
int64_t poll_max_ns;
221
int64_t poll_grow;
222
int64_t poll_shrink;
223
-
224
- /* AioContext AIO engine parameters */
225
- int64_t aio_max_batch;
226
};
227
typedef struct IOThread IOThread;
228
229
diff --git a/event-loop-base.c b/event-loop-base.c
230
new file mode 100644
231
index XXXXXXX..XXXXXXX
232
--- /dev/null
233
+++ b/event-loop-base.c
234
@@ -XXX,XX +XXX,XX @@
235
+/*
236
+ * QEMU event-loop base
237
+ *
238
+ * Copyright (C) 2022 Red Hat Inc
239
+ *
240
+ * Authors:
241
+ * Stefan Hajnoczi <stefanha@redhat.com>
242
+ * Nicolas Saenz Julienne <nsaenzju@redhat.com>
243
+ *
244
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
245
+ * See the COPYING file in the top-level directory.
246
+ */
247
+
248
+#include "qemu/osdep.h"
249
+#include "qom/object_interfaces.h"
250
+#include "qapi/error.h"
251
+#include "sysemu/event-loop-base.h"
252
+
253
+typedef struct {
254
+ const char *name;
255
+ ptrdiff_t offset; /* field's byte offset in EventLoopBase struct */
256
+} EventLoopBaseParamInfo;
257
+
258
+static EventLoopBaseParamInfo aio_max_batch_info = {
259
+ "aio-max-batch", offsetof(EventLoopBase, aio_max_batch),
260
+};
261
+
262
+static void event_loop_base_get_param(Object *obj, Visitor *v,
263
+ const char *name, void *opaque, Error **errp)
264
+{
265
+ EventLoopBase *event_loop_base = EVENT_LOOP_BASE(obj);
266
+ EventLoopBaseParamInfo *info = opaque;
267
+ int64_t *field = (void *)event_loop_base + info->offset;
268
+
269
+ visit_type_int64(v, name, field, errp);
270
+}
271
+
272
+static void event_loop_base_set_param(Object *obj, Visitor *v,
273
+ const char *name, void *opaque, Error **errp)
274
+{
275
+ EventLoopBaseClass *bc = EVENT_LOOP_BASE_GET_CLASS(obj);
276
+ EventLoopBase *base = EVENT_LOOP_BASE(obj);
277
+ EventLoopBaseParamInfo *info = opaque;
278
+ int64_t *field = (void *)base + info->offset;
279
+ int64_t value;
280
+
281
+ if (!visit_type_int64(v, name, &value, errp)) {
282
+ return;
283
+ }
284
+
285
+ if (value < 0) {
286
+ error_setg(errp, "%s value must be in range [0, %" PRId64 "]",
287
+ info->name, INT64_MAX);
288
+ return;
289
+ }
290
+
291
+ *field = value;
292
+
293
+ if (bc->update_params) {
294
+ bc->update_params(base, errp);
295
+ }
296
+
297
+ return;
298
+}
299
+
300
+static void event_loop_base_complete(UserCreatable *uc, Error **errp)
301
+{
302
+ EventLoopBaseClass *bc = EVENT_LOOP_BASE_GET_CLASS(uc);
303
+ EventLoopBase *base = EVENT_LOOP_BASE(uc);
304
+
305
+ if (bc->init) {
306
+ bc->init(base, errp);
307
+ }
308
+}
309
+
310
+static void event_loop_base_class_init(ObjectClass *klass, void *class_data)
311
+{
312
+ UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass);
313
+ ucc->complete = event_loop_base_complete;
314
+
315
+ object_class_property_add(klass, "aio-max-batch", "int",
316
+ event_loop_base_get_param,
317
+ event_loop_base_set_param,
318
+ NULL, &aio_max_batch_info);
319
+}
320
+
321
+static const TypeInfo event_loop_base_info = {
322
+ .name = TYPE_EVENT_LOOP_BASE,
323
+ .parent = TYPE_OBJECT,
324
+ .instance_size = sizeof(EventLoopBase),
325
+ .class_size = sizeof(EventLoopBaseClass),
326
+ .class_init = event_loop_base_class_init,
327
+ .abstract = true,
328
+ .interfaces = (InterfaceInfo[]) {
329
+ { TYPE_USER_CREATABLE },
330
+ { }
331
+ }
332
+};
333
+
334
+static void register_types(void)
335
+{
336
+ type_register_static(&event_loop_base_info);
337
+}
338
+type_init(register_types);
339
diff --git a/iothread.c b/iothread.c
340
index XXXXXXX..XXXXXXX 100644
341
--- a/iothread.c
342
+++ b/iothread.c
343
@@ -XXX,XX +XXX,XX @@
344
#include "qemu/module.h"
345
#include "block/aio.h"
346
#include "block/block.h"
347
+#include "sysemu/event-loop-base.h"
348
#include "sysemu/iothread.h"
349
#include "qapi/error.h"
350
#include "qapi/qapi-commands-misc.h"
351
@@ -XXX,XX +XXX,XX @@ static void iothread_init_gcontext(IOThread *iothread)
352
iothread->main_loop = g_main_loop_new(iothread->worker_context, TRUE);
353
}
354
355
-static void iothread_set_aio_context_params(IOThread *iothread, Error **errp)
356
+static void iothread_set_aio_context_params(EventLoopBase *base, Error **errp)
357
{
358
+ IOThread *iothread = IOTHREAD(base);
359
ERRP_GUARD();
360
361
+ if (!iothread->ctx) {
362
+ return;
363
+ }
364
+
365
aio_context_set_poll_params(iothread->ctx,
366
iothread->poll_max_ns,
367
iothread->poll_grow,
368
@@ -XXX,XX +XXX,XX @@ static void iothread_set_aio_context_params(IOThread *iothread, Error **errp)
369
}
370
371
aio_context_set_aio_params(iothread->ctx,
372
- iothread->aio_max_batch,
373
+ iothread->parent_obj.aio_max_batch,
374
errp);
375
}
376
377
-static void iothread_complete(UserCreatable *obj, Error **errp)
378
+
379
+static void iothread_init(EventLoopBase *base, Error **errp)
380
{
381
Error *local_error = NULL;
382
- IOThread *iothread = IOTHREAD(obj);
383
+ IOThread *iothread = IOTHREAD(base);
384
char *thread_name;
385
386
iothread->stopping = false;
387
@@ -XXX,XX +XXX,XX @@ static void iothread_complete(UserCreatable *obj, Error **errp)
388
*/
389
iothread_init_gcontext(iothread);
390
391
- iothread_set_aio_context_params(iothread, &local_error);
392
+ iothread_set_aio_context_params(base, &local_error);
393
if (local_error) {
394
error_propagate(errp, local_error);
395
aio_context_unref(iothread->ctx);
396
@@ -XXX,XX +XXX,XX @@ static void iothread_complete(UserCreatable *obj, Error **errp)
397
* to inherit.
398
*/
399
thread_name = g_strdup_printf("IO %s",
400
- object_get_canonical_path_component(OBJECT(obj)));
401
+ object_get_canonical_path_component(OBJECT(base)));
402
qemu_thread_create(&iothread->thread, thread_name, iothread_run,
403
iothread, QEMU_THREAD_JOINABLE);
404
g_free(thread_name);
405
@@ -XXX,XX +XXX,XX @@ static IOThreadParamInfo poll_grow_info = {
406
static IOThreadParamInfo poll_shrink_info = {
407
"poll-shrink", offsetof(IOThread, poll_shrink),
408
};
409
-static IOThreadParamInfo aio_max_batch_info = {
410
- "aio-max-batch", offsetof(IOThread, aio_max_batch),
411
-};
412
413
static void iothread_get_param(Object *obj, Visitor *v,
414
const char *name, IOThreadParamInfo *info, Error **errp)
415
@@ -XXX,XX +XXX,XX @@ static void iothread_set_poll_param(Object *obj, Visitor *v,
416
}
417
}
418
419
-static void iothread_get_aio_param(Object *obj, Visitor *v,
420
- const char *name, void *opaque, Error **errp)
421
-{
422
- IOThreadParamInfo *info = opaque;
423
-
424
- iothread_get_param(obj, v, name, info, errp);
425
-}
426
-
427
-static void iothread_set_aio_param(Object *obj, Visitor *v,
428
- const char *name, void *opaque, Error **errp)
429
-{
430
- IOThread *iothread = IOTHREAD(obj);
431
- IOThreadParamInfo *info = opaque;
432
-
433
- if (!iothread_set_param(obj, v, name, info, errp)) {
434
- return;
435
- }
436
-
437
- if (iothread->ctx) {
438
- aio_context_set_aio_params(iothread->ctx,
439
- iothread->aio_max_batch,
440
- errp);
441
- }
442
-}
443
-
444
static void iothread_class_init(ObjectClass *klass, void *class_data)
445
{
446
- UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass);
447
- ucc->complete = iothread_complete;
448
+ EventLoopBaseClass *bc = EVENT_LOOP_BASE_CLASS(klass);
449
+
450
+ bc->init = iothread_init;
451
+ bc->update_params = iothread_set_aio_context_params;
452
453
object_class_property_add(klass, "poll-max-ns", "int",
454
iothread_get_poll_param,
455
@@ -XXX,XX +XXX,XX @@ static void iothread_class_init(ObjectClass *klass, void *class_data)
456
iothread_get_poll_param,
457
iothread_set_poll_param,
458
NULL, &poll_shrink_info);
459
- object_class_property_add(klass, "aio-max-batch", "int",
460
- iothread_get_aio_param,
461
- iothread_set_aio_param,
462
- NULL, &aio_max_batch_info);
463
}
464
465
static const TypeInfo iothread_info = {
466
.name = TYPE_IOTHREAD,
467
- .parent = TYPE_OBJECT,
468
+ .parent = TYPE_EVENT_LOOP_BASE,
469
.class_init = iothread_class_init,
470
.instance_size = sizeof(IOThread),
471
.instance_init = iothread_instance_init,
472
.instance_finalize = iothread_instance_finalize,
473
- .interfaces = (InterfaceInfo[]) {
474
- {TYPE_USER_CREATABLE},
475
- {}
476
- },
477
};
478
479
static void iothread_register_types(void)
480
@@ -XXX,XX +XXX,XX @@ static int query_one_iothread(Object *object, void *opaque)
481
info->poll_max_ns = iothread->poll_max_ns;
482
info->poll_grow = iothread->poll_grow;
483
info->poll_shrink = iothread->poll_shrink;
484
- info->aio_max_batch = iothread->aio_max_batch;
485
+ info->aio_max_batch = iothread->parent_obj.aio_max_batch;
486
487
QAPI_LIST_APPEND(*tail, info);
488
return 0;
489
--
186
--
490
2.35.1
187
2.41.0
diff view generated by jsdifflib