1
The following changes since commit 64175afc695c0672876fbbfc31b299c86d562cb4:
1
The following changes since commit f5fe7c17ac4e309e47e78f0f9761aebc8d2f2c81:
2
2
3
arm_gicv3: Fix ICC_BPR1 reset value when EL3 not implemented (2017-06-07 17:21:44 +0100)
3
Merge tag 'pull-tcg-20230823-2' of https://gitlab.com/rth7680/qemu into staging (2023-08-28 16:07:04 -0400)
4
4
5
are available in the git repository at:
5
are available in the Git repository at:
6
6
7
git://github.com/codyprime/qemu-kvm-jtc.git tags/block-pull-request
7
https://gitlab.com/hreitz/qemu.git tags/pull-block-2023-09-01
8
8
9
for you to fetch changes up to 56faeb9bb6872b3f926b3b3e0452a70beea10af2:
9
for you to fetch changes up to 380448464dd89291cf7fd7434be6c225482a334d:
10
10
11
block/gluster.c: Handle qdict_array_entries() failure (2017-06-09 08:41:29 -0400)
11
tests/file-io-error: New test (2023-08-29 13:01:24 +0200)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Gluster patch
14
Block patches
15
16
- Fix for file-posix's zoning code crashing on I/O errors
17
- Throttling refactoring
18
15
----------------------------------------------------------------
19
----------------------------------------------------------------
20
Hanna Czenczek (5):
21
file-posix: Clear bs->bl.zoned on error
22
file-posix: Check bs->bl.zoned for zone info
23
file-posix: Fix zone update in I/O error path
24
file-posix: Simplify raw_co_prw's 'out' zone code
25
tests/file-io-error: New test
16
26
17
Peter Maydell (1):
27
Zhenwei Pi (9):
18
block/gluster.c: Handle qdict_array_entries() failure
28
throttle: introduce enum ThrottleDirection
29
test-throttle: use enum ThrottleDirection
30
throttle: support read-only and write-only
31
test-throttle: test read only and write only
32
cryptodev: use NULL throttle timer cb for read direction
33
throttle: use enum ThrottleDirection instead of bool is_write
34
throttle: use THROTTLE_MAX/ARRAY_SIZE for hard code
35
fsdev: Use ThrottleDirection instread of bool is_write
36
block/throttle-groups: Use ThrottleDirection instread of bool is_write
19
37
20
block/gluster.c | 3 +--
38
fsdev/qemu-fsdev-throttle.h | 4 +-
21
1 file changed, 1 insertion(+), 2 deletions(-)
39
include/block/throttle-groups.h | 6 +-
40
include/qemu/throttle.h | 16 +-
41
backends/cryptodev.c | 12 +-
42
block/block-backend.c | 4 +-
43
block/file-posix.c | 42 +++---
44
block/throttle-groups.c | 163 +++++++++++----------
45
block/throttle.c | 8 +-
46
fsdev/qemu-fsdev-throttle.c | 18 ++-
47
hw/9pfs/cofile.c | 4 +-
48
tests/unit/test-throttle.c | 76 +++++++++-
49
util/throttle.c | 84 +++++++----
50
tests/qemu-iotests/tests/file-io-error | 119 +++++++++++++++
51
tests/qemu-iotests/tests/file-io-error.out | 33 +++++
52
14 files changed, 418 insertions(+), 171 deletions(-)
53
create mode 100755 tests/qemu-iotests/tests/file-io-error
54
create mode 100644 tests/qemu-iotests/tests/file-io-error.out
22
55
23
--
56
--
24
2.9.3
57
2.41.0
25
26
diff view generated by jsdifflib
New patch
1
From: zhenwei pi <pizhenwei@bytedance.com>
1
2
3
Use enum ThrottleDirection instead of number index.
4
5
Reviewed-by: Alberto Garcia <berto@igalia.com>
6
Reviewed-by: Hanna Czenczek <hreitz@redhat.com>
7
Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
8
Message-Id: <20230728022006.1098509-2-pizhenwei@bytedance.com>
9
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
10
---
11
include/qemu/throttle.h | 11 ++++++++---
12
util/throttle.c | 16 +++++++++-------
13
2 files changed, 17 insertions(+), 10 deletions(-)
14
15
diff --git a/include/qemu/throttle.h b/include/qemu/throttle.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/include/qemu/throttle.h
18
+++ b/include/qemu/throttle.h
19
@@ -XXX,XX +XXX,XX @@ typedef struct ThrottleState {
20
int64_t previous_leak; /* timestamp of the last leak done */
21
} ThrottleState;
22
23
+typedef enum {
24
+ THROTTLE_READ = 0,
25
+ THROTTLE_WRITE,
26
+ THROTTLE_MAX
27
+} ThrottleDirection;
28
+
29
typedef struct ThrottleTimers {
30
- QEMUTimer *timers[2]; /* timers used to do the throttling */
31
+ QEMUTimer *timers[THROTTLE_MAX]; /* timers used to do the throttling */
32
QEMUClockType clock_type; /* the clock used */
33
34
/* Callbacks */
35
- QEMUTimerCB *read_timer_cb;
36
- QEMUTimerCB *write_timer_cb;
37
+ QEMUTimerCB *timer_cb[THROTTLE_MAX];
38
void *timer_opaque;
39
} ThrottleTimers;
40
41
diff --git a/util/throttle.c b/util/throttle.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/util/throttle.c
44
+++ b/util/throttle.c
45
@@ -XXX,XX +XXX,XX @@ static bool throttle_compute_timer(ThrottleState *ts,
46
void throttle_timers_attach_aio_context(ThrottleTimers *tt,
47
AioContext *new_context)
48
{
49
- tt->timers[0] = aio_timer_new(new_context, tt->clock_type, SCALE_NS,
50
- tt->read_timer_cb, tt->timer_opaque);
51
- tt->timers[1] = aio_timer_new(new_context, tt->clock_type, SCALE_NS,
52
- tt->write_timer_cb, tt->timer_opaque);
53
+ tt->timers[THROTTLE_READ] =
54
+ aio_timer_new(new_context, tt->clock_type, SCALE_NS,
55
+ tt->timer_cb[THROTTLE_READ], tt->timer_opaque);
56
+ tt->timers[THROTTLE_WRITE] =
57
+ aio_timer_new(new_context, tt->clock_type, SCALE_NS,
58
+ tt->timer_cb[THROTTLE_WRITE], tt->timer_opaque);
59
}
60
61
/*
62
@@ -XXX,XX +XXX,XX @@ void throttle_timers_init(ThrottleTimers *tt,
63
memset(tt, 0, sizeof(ThrottleTimers));
64
65
tt->clock_type = clock_type;
66
- tt->read_timer_cb = read_timer_cb;
67
- tt->write_timer_cb = write_timer_cb;
68
+ tt->timer_cb[THROTTLE_READ] = read_timer_cb;
69
+ tt->timer_cb[THROTTLE_WRITE] = write_timer_cb;
70
tt->timer_opaque = timer_opaque;
71
throttle_timers_attach_aio_context(tt, aio_context);
72
}
73
@@ -XXX,XX +XXX,XX @@ void throttle_timers_detach_aio_context(ThrottleTimers *tt)
74
{
75
int i;
76
77
- for (i = 0; i < 2; i++) {
78
+ for (i = 0; i < THROTTLE_MAX; i++) {
79
throttle_timer_destroy(&tt->timers[i]);
80
}
81
}
82
--
83
2.41.0
diff view generated by jsdifflib
New patch
1
From: zhenwei pi <pizhenwei@bytedance.com>
1
2
3
Use enum ThrottleDirection instead in the throttle test codes.
4
5
Reviewed-by: Alberto Garcia <berto@igalia.com>
6
Reviewed-by: Hanna Czenczek <hreitz@redhat.com>
7
Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
8
Message-Id: <20230728022006.1098509-3-pizhenwei@bytedance.com>
9
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
10
---
11
tests/unit/test-throttle.c | 6 +++---
12
1 file changed, 3 insertions(+), 3 deletions(-)
13
14
diff --git a/tests/unit/test-throttle.c b/tests/unit/test-throttle.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/tests/unit/test-throttle.c
17
+++ b/tests/unit/test-throttle.c
18
@@ -XXX,XX +XXX,XX @@ static void test_init(void)
19
20
/* check initialized fields */
21
g_assert(tt->clock_type == QEMU_CLOCK_VIRTUAL);
22
- g_assert(tt->timers[0]);
23
- g_assert(tt->timers[1]);
24
+ g_assert(tt->timers[THROTTLE_READ]);
25
+ g_assert(tt->timers[THROTTLE_WRITE]);
26
27
/* check other fields where cleared */
28
g_assert(!ts.previous_leak);
29
@@ -XXX,XX +XXX,XX @@ static void test_destroy(void)
30
throttle_timers_init(tt, ctx, QEMU_CLOCK_VIRTUAL,
31
read_timer_cb, write_timer_cb, &ts);
32
throttle_timers_destroy(tt);
33
- for (i = 0; i < 2; i++) {
34
+ for (i = 0; i < THROTTLE_MAX; i++) {
35
g_assert(!tt->timers[i]);
36
}
37
}
38
--
39
2.41.0
diff view generated by jsdifflib
New patch
1
From: zhenwei pi <pizhenwei@bytedance.com>
1
2
3
Only one direction is necessary in several scenarios:
4
- a read-only disk
5
- operations on a device are considered as *write* only. For example,
6
encrypt/decrypt/sign/verify operations on a cryptodev use a single
7
*write* timer(read timer callback is defined, but never invoked).
8
9
Allow a single direction in throttle, this reduces memory, and uplayer
10
does not need a dummy callback any more.
11
12
Reviewed-by: Alberto Garcia <berto@igalia.com>
13
Reviewed-by: Hanna Czenczek <hreitz@redhat.com>
14
Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
15
Message-Id: <20230728022006.1098509-4-pizhenwei@bytedance.com>
16
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
17
---
18
util/throttle.c | 42 ++++++++++++++++++++++++++++--------------
19
1 file changed, 28 insertions(+), 14 deletions(-)
20
21
diff --git a/util/throttle.c b/util/throttle.c
22
index XXXXXXX..XXXXXXX 100644
23
--- a/util/throttle.c
24
+++ b/util/throttle.c
25
@@ -XXX,XX +XXX,XX @@ static bool throttle_compute_timer(ThrottleState *ts,
26
void throttle_timers_attach_aio_context(ThrottleTimers *tt,
27
AioContext *new_context)
28
{
29
- tt->timers[THROTTLE_READ] =
30
- aio_timer_new(new_context, tt->clock_type, SCALE_NS,
31
- tt->timer_cb[THROTTLE_READ], tt->timer_opaque);
32
- tt->timers[THROTTLE_WRITE] =
33
- aio_timer_new(new_context, tt->clock_type, SCALE_NS,
34
- tt->timer_cb[THROTTLE_WRITE], tt->timer_opaque);
35
+ ThrottleDirection dir;
36
+
37
+ for (dir = THROTTLE_READ; dir < THROTTLE_MAX; dir++) {
38
+ if (tt->timer_cb[dir]) {
39
+ tt->timers[dir] =
40
+ aio_timer_new(new_context, tt->clock_type, SCALE_NS,
41
+ tt->timer_cb[dir], tt->timer_opaque);
42
+ }
43
+ }
44
}
45
46
/*
47
@@ -XXX,XX +XXX,XX @@ void throttle_timers_init(ThrottleTimers *tt,
48
QEMUTimerCB *write_timer_cb,
49
void *timer_opaque)
50
{
51
+ assert(read_timer_cb || write_timer_cb);
52
memset(tt, 0, sizeof(ThrottleTimers));
53
54
tt->clock_type = clock_type;
55
@@ -XXX,XX +XXX,XX @@ void throttle_timers_init(ThrottleTimers *tt,
56
/* destroy a timer */
57
static void throttle_timer_destroy(QEMUTimer **timer)
58
{
59
- assert(*timer != NULL);
60
+ if (*timer == NULL) {
61
+ return;
62
+ }
63
64
timer_free(*timer);
65
*timer = NULL;
66
@@ -XXX,XX +XXX,XX @@ static void throttle_timer_destroy(QEMUTimer **timer)
67
/* Remove timers from event loop */
68
void throttle_timers_detach_aio_context(ThrottleTimers *tt)
69
{
70
- int i;
71
+ ThrottleDirection dir;
72
73
- for (i = 0; i < THROTTLE_MAX; i++) {
74
- throttle_timer_destroy(&tt->timers[i]);
75
+ for (dir = THROTTLE_READ; dir < THROTTLE_MAX; dir++) {
76
+ throttle_timer_destroy(&tt->timers[dir]);
77
}
78
}
79
80
@@ -XXX,XX +XXX,XX @@ void throttle_timers_destroy(ThrottleTimers *tt)
81
/* is any throttling timer configured */
82
bool throttle_timers_are_initialized(ThrottleTimers *tt)
83
{
84
- if (tt->timers[0]) {
85
- return true;
86
+ ThrottleDirection dir;
87
+
88
+ for (dir = THROTTLE_READ; dir < THROTTLE_MAX; dir++) {
89
+ if (tt->timers[dir]) {
90
+ return true;
91
+ }
92
}
93
94
return false;
95
@@ -XXX,XX +XXX,XX @@ bool throttle_schedule_timer(ThrottleState *ts,
96
{
97
int64_t now = qemu_clock_get_ns(tt->clock_type);
98
int64_t next_timestamp;
99
+ QEMUTimer *timer;
100
bool must_wait;
101
102
+ timer = is_write ? tt->timers[THROTTLE_WRITE] : tt->timers[THROTTLE_READ];
103
+ assert(timer);
104
+
105
must_wait = throttle_compute_timer(ts,
106
is_write,
107
now,
108
@@ -XXX,XX +XXX,XX @@ bool throttle_schedule_timer(ThrottleState *ts,
109
}
110
111
/* request throttled and timer pending -> do nothing */
112
- if (timer_pending(tt->timers[is_write])) {
113
+ if (timer_pending(timer)) {
114
return true;
115
}
116
117
/* request throttled and timer not pending -> arm timer */
118
- timer_mod(tt->timers[is_write], next_timestamp);
119
+ timer_mod(timer, next_timestamp);
120
return true;
121
}
122
123
--
124
2.41.0
diff view generated by jsdifflib
New patch
1
From: zhenwei pi <pizhenwei@bytedance.com>
1
2
3
Reviewed-by: Alberto Garcia <berto@igalia.com>
4
Reviewed-by: Hanna Czenczek <hreitz@redhat.com>
5
Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
6
Message-Id: <20230728022006.1098509-5-pizhenwei@bytedance.com>
7
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
8
---
9
tests/unit/test-throttle.c | 66 ++++++++++++++++++++++++++++++++++++++
10
1 file changed, 66 insertions(+)
11
12
diff --git a/tests/unit/test-throttle.c b/tests/unit/test-throttle.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tests/unit/test-throttle.c
15
+++ b/tests/unit/test-throttle.c
16
@@ -XXX,XX +XXX,XX @@ static void test_init(void)
17
throttle_timers_destroy(tt);
18
}
19
20
+static void test_init_readonly(void)
21
+{
22
+ int i;
23
+
24
+ tt = &tgm.throttle_timers;
25
+
26
+ /* fill the structures with crap */
27
+ memset(&ts, 1, sizeof(ts));
28
+ memset(tt, 1, sizeof(*tt));
29
+
30
+ /* init structures */
31
+ throttle_init(&ts);
32
+ throttle_timers_init(tt, ctx, QEMU_CLOCK_VIRTUAL,
33
+ read_timer_cb, NULL, &ts);
34
+
35
+ /* check initialized fields */
36
+ g_assert(tt->clock_type == QEMU_CLOCK_VIRTUAL);
37
+ g_assert(tt->timers[THROTTLE_READ]);
38
+ g_assert(!tt->timers[THROTTLE_WRITE]);
39
+
40
+ /* check other fields where cleared */
41
+ g_assert(!ts.previous_leak);
42
+ g_assert(!ts.cfg.op_size);
43
+ for (i = 0; i < BUCKETS_COUNT; i++) {
44
+ g_assert(!ts.cfg.buckets[i].avg);
45
+ g_assert(!ts.cfg.buckets[i].max);
46
+ g_assert(!ts.cfg.buckets[i].level);
47
+ }
48
+
49
+ throttle_timers_destroy(tt);
50
+}
51
+
52
+static void test_init_writeonly(void)
53
+{
54
+ int i;
55
+
56
+ tt = &tgm.throttle_timers;
57
+
58
+ /* fill the structures with crap */
59
+ memset(&ts, 1, sizeof(ts));
60
+ memset(tt, 1, sizeof(*tt));
61
+
62
+ /* init structures */
63
+ throttle_init(&ts);
64
+ throttle_timers_init(tt, ctx, QEMU_CLOCK_VIRTUAL,
65
+ NULL, write_timer_cb, &ts);
66
+
67
+ /* check initialized fields */
68
+ g_assert(tt->clock_type == QEMU_CLOCK_VIRTUAL);
69
+ g_assert(!tt->timers[THROTTLE_READ]);
70
+ g_assert(tt->timers[THROTTLE_WRITE]);
71
+
72
+ /* check other fields where cleared */
73
+ g_assert(!ts.previous_leak);
74
+ g_assert(!ts.cfg.op_size);
75
+ for (i = 0; i < BUCKETS_COUNT; i++) {
76
+ g_assert(!ts.cfg.buckets[i].avg);
77
+ g_assert(!ts.cfg.buckets[i].max);
78
+ g_assert(!ts.cfg.buckets[i].level);
79
+ }
80
+
81
+ throttle_timers_destroy(tt);
82
+}
83
+
84
static void test_destroy(void)
85
{
86
int i;
87
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
88
g_test_add_func("/throttle/leak_bucket", test_leak_bucket);
89
g_test_add_func("/throttle/compute_wait", test_compute_wait);
90
g_test_add_func("/throttle/init", test_init);
91
+ g_test_add_func("/throttle/init_readonly", test_init_readonly);
92
+ g_test_add_func("/throttle/init_writeonly", test_init_writeonly);
93
g_test_add_func("/throttle/destroy", test_destroy);
94
g_test_add_func("/throttle/have_timer", test_have_timer);
95
g_test_add_func("/throttle/detach_attach", test_detach_attach);
96
--
97
2.41.0
diff view generated by jsdifflib
1
From: Peter Maydell <peter.maydell@linaro.org>
1
From: zhenwei pi <pizhenwei@bytedance.com>
2
2
3
In qemu_gluster_parse_json(), the call to qdict_array_entries()
3
Operations on a cryptodev are considered as *write* only, the callback
4
could return a negative error code, which we were ignoring
4
of read direction is never invoked. Use NULL instead of an unreachable
5
because we assigned the result to an unsigned variable.
5
path(cryptodev_backend_throttle_timer_cb on read direction).
6
Fix this by using the 'int' type instead, which matches the
7
return type of qdict_array_entries() and also the type
8
we use for the loop enumeration variable 'i'.
9
6
10
(Spotted by Coverity, CID 1360960.)
7
The dummy read timer(never invoked) is already removed here, it means
8
that the 'FIXME' tag is no longer needed.
11
9
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
10
Reviewed-by: Alberto Garcia <berto@igalia.com>
13
Reviewed-by: Eric Blake <eblake@redhat.com>
11
Reviewed-by: Hanna Czenczek <hreitz@redhat.com>
14
Reviewed-by: Jeff Cody <jcody@redhat.com>
12
Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
15
Message-id: 1496682098-1540-1-git-send-email-peter.maydell@linaro.org
13
Message-Id: <20230728022006.1098509-6-pizhenwei@bytedance.com>
16
Signed-off-by: Jeff Cody <jcody@redhat.com>
14
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
17
---
15
---
18
block/gluster.c | 3 +--
16
backends/cryptodev.c | 3 +--
19
1 file changed, 1 insertion(+), 2 deletions(-)
17
1 file changed, 1 insertion(+), 2 deletions(-)
20
18
21
diff --git a/block/gluster.c b/block/gluster.c
19
diff --git a/backends/cryptodev.c b/backends/cryptodev.c
22
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
23
--- a/block/gluster.c
21
--- a/backends/cryptodev.c
24
+++ b/block/gluster.c
22
+++ b/backends/cryptodev.c
25
@@ -XXX,XX +XXX,XX @@ static int qemu_gluster_parse_json(BlockdevOptionsGluster *gconf,
23
@@ -XXX,XX +XXX,XX @@ static void cryptodev_backend_set_throttle(CryptoDevBackend *backend, int field,
26
Error *local_err = NULL;
24
if (!enabled) {
27
char *str = NULL;
25
throttle_init(&backend->ts);
28
const char *ptr;
26
throttle_timers_init(&backend->tt, qemu_get_aio_context(),
29
- size_t num_servers;
27
- QEMU_CLOCK_REALTIME,
30
- int i, type;
28
- cryptodev_backend_throttle_timer_cb, /* FIXME */
31
+ int i, type, num_servers;
29
+ QEMU_CLOCK_REALTIME, NULL,
32
30
cryptodev_backend_throttle_timer_cb, backend);
33
/* create opts info from runtime_json_opts list */
31
}
34
opts = qemu_opts_create(&runtime_json_opts, NULL, 0, &error_abort);
32
35
--
33
--
36
2.9.3
34
2.41.0
37
38
diff view generated by jsdifflib
New patch
1
From: zhenwei pi <pizhenwei@bytedance.com>
1
2
3
enum ThrottleDirection is already there, use ThrottleDirection instead
4
of 'bool is_write' for throttle API, also modify related codes from
5
block, fsdev, cryptodev and tests.
6
7
Reviewed-by: Hanna Czenczek <hreitz@redhat.com>
8
Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
9
Message-Id: <20230728022006.1098509-7-pizhenwei@bytedance.com>
10
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
11
---
12
include/qemu/throttle.h | 5 +++--
13
backends/cryptodev.c | 9 +++++----
14
block/throttle-groups.c | 6 ++++--
15
fsdev/qemu-fsdev-throttle.c | 8 +++++---
16
tests/unit/test-throttle.c | 4 ++--
17
util/throttle.c | 31 +++++++++++++++++--------------
18
6 files changed, 36 insertions(+), 27 deletions(-)
19
20
diff --git a/include/qemu/throttle.h b/include/qemu/throttle.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/include/qemu/throttle.h
23
+++ b/include/qemu/throttle.h
24
@@ -XXX,XX +XXX,XX @@ void throttle_config_init(ThrottleConfig *cfg);
25
/* usage */
26
bool throttle_schedule_timer(ThrottleState *ts,
27
ThrottleTimers *tt,
28
- bool is_write);
29
+ ThrottleDirection direction);
30
31
-void throttle_account(ThrottleState *ts, bool is_write, uint64_t size);
32
+void throttle_account(ThrottleState *ts, ThrottleDirection direction,
33
+ uint64_t size);
34
void throttle_limits_to_config(ThrottleLimits *arg, ThrottleConfig *cfg,
35
Error **errp);
36
void throttle_config_to_limits(ThrottleConfig *cfg, ThrottleLimits *var);
37
diff --git a/backends/cryptodev.c b/backends/cryptodev.c
38
index XXXXXXX..XXXXXXX 100644
39
--- a/backends/cryptodev.c
40
+++ b/backends/cryptodev.c
41
@@ -XXX,XX +XXX,XX @@ static void cryptodev_backend_throttle_timer_cb(void *opaque)
42
continue;
43
}
44
45
- throttle_account(&backend->ts, true, ret);
46
+ throttle_account(&backend->ts, THROTTLE_WRITE, ret);
47
cryptodev_backend_operation(backend, op_info);
48
if (throttle_enabled(&backend->tc) &&
49
- throttle_schedule_timer(&backend->ts, &backend->tt, true)) {
50
+ throttle_schedule_timer(&backend->ts, &backend->tt,
51
+ THROTTLE_WRITE)) {
52
break;
53
}
54
}
55
@@ -XXX,XX +XXX,XX @@ int cryptodev_backend_crypto_operation(
56
goto do_account;
57
}
58
59
- if (throttle_schedule_timer(&backend->ts, &backend->tt, true) ||
60
+ if (throttle_schedule_timer(&backend->ts, &backend->tt, THROTTLE_WRITE) ||
61
!QTAILQ_EMPTY(&backend->opinfos)) {
62
QTAILQ_INSERT_TAIL(&backend->opinfos, op_info, next);
63
return 0;
64
@@ -XXX,XX +XXX,XX @@ do_account:
65
return ret;
66
}
67
68
- throttle_account(&backend->ts, true, ret);
69
+ throttle_account(&backend->ts, THROTTLE_WRITE, ret);
70
71
return cryptodev_backend_operation(backend, op_info);
72
}
73
diff --git a/block/throttle-groups.c b/block/throttle-groups.c
74
index XXXXXXX..XXXXXXX 100644
75
--- a/block/throttle-groups.c
76
+++ b/block/throttle-groups.c
77
@@ -XXX,XX +XXX,XX @@ static bool throttle_group_schedule_timer(ThrottleGroupMember *tgm,
78
ThrottleState *ts = tgm->throttle_state;
79
ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
80
ThrottleTimers *tt = &tgm->throttle_timers;
81
+ ThrottleDirection direction = is_write ? THROTTLE_WRITE : THROTTLE_READ;
82
bool must_wait;
83
84
if (qatomic_read(&tgm->io_limits_disabled)) {
85
@@ -XXX,XX +XXX,XX @@ static bool throttle_group_schedule_timer(ThrottleGroupMember *tgm,
86
return true;
87
}
88
89
- must_wait = throttle_schedule_timer(ts, tt, is_write);
90
+ must_wait = throttle_schedule_timer(ts, tt, direction);
91
92
/* If a timer just got armed, set tgm as the current token */
93
if (must_wait) {
94
@@ -XXX,XX +XXX,XX @@ void coroutine_fn throttle_group_co_io_limits_intercept(ThrottleGroupMember *tgm
95
bool must_wait;
96
ThrottleGroupMember *token;
97
ThrottleGroup *tg = container_of(tgm->throttle_state, ThrottleGroup, ts);
98
+ ThrottleDirection direction = is_write ? THROTTLE_WRITE : THROTTLE_READ;
99
100
assert(bytes >= 0);
101
102
@@ -XXX,XX +XXX,XX @@ void coroutine_fn throttle_group_co_io_limits_intercept(ThrottleGroupMember *tgm
103
}
104
105
/* The I/O will be executed, so do the accounting */
106
- throttle_account(tgm->throttle_state, is_write, bytes);
107
+ throttle_account(tgm->throttle_state, direction, bytes);
108
109
/* Schedule the next request */
110
schedule_next_request(tgm, is_write);
111
diff --git a/fsdev/qemu-fsdev-throttle.c b/fsdev/qemu-fsdev-throttle.c
112
index XXXXXXX..XXXXXXX 100644
113
--- a/fsdev/qemu-fsdev-throttle.c
114
+++ b/fsdev/qemu-fsdev-throttle.c
115
@@ -XXX,XX +XXX,XX @@ void fsdev_throttle_init(FsThrottle *fst)
116
void coroutine_fn fsdev_co_throttle_request(FsThrottle *fst, bool is_write,
117
struct iovec *iov, int iovcnt)
118
{
119
+ ThrottleDirection direction = is_write ? THROTTLE_WRITE : THROTTLE_READ;
120
+
121
if (throttle_enabled(&fst->cfg)) {
122
- if (throttle_schedule_timer(&fst->ts, &fst->tt, is_write) ||
123
+ if (throttle_schedule_timer(&fst->ts, &fst->tt, direction) ||
124
!qemu_co_queue_empty(&fst->throttled_reqs[is_write])) {
125
qemu_co_queue_wait(&fst->throttled_reqs[is_write], NULL);
126
}
127
128
- throttle_account(&fst->ts, is_write, iov_size(iov, iovcnt));
129
+ throttle_account(&fst->ts, direction, iov_size(iov, iovcnt));
130
131
if (!qemu_co_queue_empty(&fst->throttled_reqs[is_write]) &&
132
- !throttle_schedule_timer(&fst->ts, &fst->tt, is_write)) {
133
+ !throttle_schedule_timer(&fst->ts, &fst->tt, direction)) {
134
qemu_co_queue_next(&fst->throttled_reqs[is_write]);
135
}
136
}
137
diff --git a/tests/unit/test-throttle.c b/tests/unit/test-throttle.c
138
index XXXXXXX..XXXXXXX 100644
139
--- a/tests/unit/test-throttle.c
140
+++ b/tests/unit/test-throttle.c
141
@@ -XXX,XX +XXX,XX @@ static bool do_test_accounting(bool is_ops, /* are we testing bps or ops */
142
throttle_config(&ts, QEMU_CLOCK_VIRTUAL, &cfg);
143
144
/* account a read */
145
- throttle_account(&ts, false, size);
146
+ throttle_account(&ts, THROTTLE_READ, size);
147
/* account a write */
148
- throttle_account(&ts, true, size);
149
+ throttle_account(&ts, THROTTLE_WRITE, size);
150
151
/* check total result */
152
index = to_test[is_ops][0];
153
diff --git a/util/throttle.c b/util/throttle.c
154
index XXXXXXX..XXXXXXX 100644
155
--- a/util/throttle.c
156
+++ b/util/throttle.c
157
@@ -XXX,XX +XXX,XX @@ int64_t throttle_compute_wait(LeakyBucket *bkt)
158
159
/* This function compute the time that must be waited while this IO
160
*
161
- * @is_write: true if the current IO is a write, false if it's a read
162
+ * @direction: throttle direction
163
* @ret: time to wait
164
*/
165
static int64_t throttle_compute_wait_for(ThrottleState *ts,
166
- bool is_write)
167
+ ThrottleDirection direction)
168
{
169
BucketType to_check[2][4] = { {THROTTLE_BPS_TOTAL,
170
THROTTLE_OPS_TOTAL,
171
@@ -XXX,XX +XXX,XX @@ static int64_t throttle_compute_wait_for(ThrottleState *ts,
172
int i;
173
174
for (i = 0; i < 4; i++) {
175
- BucketType index = to_check[is_write][i];
176
+ BucketType index = to_check[direction][i];
177
wait = throttle_compute_wait(&ts->cfg.buckets[index]);
178
if (wait > max_wait) {
179
max_wait = wait;
180
@@ -XXX,XX +XXX,XX @@ static int64_t throttle_compute_wait_for(ThrottleState *ts,
181
182
/* compute the timer for this type of operation
183
*
184
- * @is_write: the type of operation
185
+ * @direction: throttle direction
186
* @now: the current clock timestamp
187
* @next_timestamp: the resulting timer
188
* @ret: true if a timer must be set
189
*/
190
static bool throttle_compute_timer(ThrottleState *ts,
191
- bool is_write,
192
+ ThrottleDirection direction,
193
int64_t now,
194
int64_t *next_timestamp)
195
{
196
@@ -XXX,XX +XXX,XX @@ static bool throttle_compute_timer(ThrottleState *ts,
197
throttle_do_leak(ts, now);
198
199
/* compute the wait time if any */
200
- wait = throttle_compute_wait_for(ts, is_write);
201
+ wait = throttle_compute_wait_for(ts, direction);
202
203
/* if the code must wait compute when the next timer should fire */
204
if (wait) {
205
@@ -XXX,XX +XXX,XX @@ void throttle_get_config(ThrottleState *ts, ThrottleConfig *cfg)
206
* NOTE: this function is not unit tested due to it's usage of timer_mod
207
*
208
* @tt: the timers structure
209
- * @is_write: the type of operation (read/write)
210
+ * @direction: throttle direction
211
* @ret: true if the timer has been scheduled else false
212
*/
213
bool throttle_schedule_timer(ThrottleState *ts,
214
ThrottleTimers *tt,
215
- bool is_write)
216
+ ThrottleDirection direction)
217
{
218
int64_t now = qemu_clock_get_ns(tt->clock_type);
219
int64_t next_timestamp;
220
QEMUTimer *timer;
221
bool must_wait;
222
223
- timer = is_write ? tt->timers[THROTTLE_WRITE] : tt->timers[THROTTLE_READ];
224
+ assert(direction < THROTTLE_MAX);
225
+ timer = tt->timers[direction];
226
assert(timer);
227
228
must_wait = throttle_compute_timer(ts,
229
- is_write,
230
+ direction,
231
now,
232
&next_timestamp);
233
234
@@ -XXX,XX +XXX,XX @@ bool throttle_schedule_timer(ThrottleState *ts,
235
236
/* do the accounting for this operation
237
*
238
- * @is_write: the type of operation (read/write)
239
+ * @direction: throttle direction
240
* @size: the size of the operation
241
*/
242
-void throttle_account(ThrottleState *ts, bool is_write, uint64_t size)
243
+void throttle_account(ThrottleState *ts, ThrottleDirection direction,
244
+ uint64_t size)
245
{
246
const BucketType bucket_types_size[2][2] = {
247
{ THROTTLE_BPS_TOTAL, THROTTLE_BPS_READ },
248
@@ -XXX,XX +XXX,XX @@ void throttle_account(ThrottleState *ts, bool is_write, uint64_t size)
249
double units = 1.0;
250
unsigned i;
251
252
+ assert(direction < THROTTLE_MAX);
253
/* if cfg.op_size is defined and smaller than size we compute unit count */
254
if (ts->cfg.op_size && size > ts->cfg.op_size) {
255
units = (double) size / ts->cfg.op_size;
256
@@ -XXX,XX +XXX,XX @@ void throttle_account(ThrottleState *ts, bool is_write, uint64_t size)
257
for (i = 0; i < 2; i++) {
258
LeakyBucket *bkt;
259
260
- bkt = &ts->cfg.buckets[bucket_types_size[is_write][i]];
261
+ bkt = &ts->cfg.buckets[bucket_types_size[direction][i]];
262
bkt->level += size;
263
if (bkt->burst_length > 1) {
264
bkt->burst_level += size;
265
}
266
267
- bkt = &ts->cfg.buckets[bucket_types_units[is_write][i]];
268
+ bkt = &ts->cfg.buckets[bucket_types_units[direction][i]];
269
bkt->level += units;
270
if (bkt->burst_length > 1) {
271
bkt->burst_level += units;
272
--
273
2.41.0
diff view generated by jsdifflib
New patch
1
From: zhenwei pi <pizhenwei@bytedance.com>
1
2
3
The first dimension of both to_check and
4
bucket_types_size/bucket_types_units is used as throttle direction,
5
use THROTTLE_MAX instead of hard coded number. Also use ARRAY_SIZE()
6
to avoid hard coded number for the second dimension.
7
8
Hanna noticed that the two array should be static. Yes, turn them
9
into static variables.
10
11
Reviewed-by: Hanna Czenczek <hreitz@redhat.com>
12
Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
13
Message-Id: <20230728022006.1098509-8-pizhenwei@bytedance.com>
14
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
15
---
16
util/throttle.c | 11 ++++++-----
17
1 file changed, 6 insertions(+), 5 deletions(-)
18
19
diff --git a/util/throttle.c b/util/throttle.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/util/throttle.c
22
+++ b/util/throttle.c
23
@@ -XXX,XX +XXX,XX @@ int64_t throttle_compute_wait(LeakyBucket *bkt)
24
static int64_t throttle_compute_wait_for(ThrottleState *ts,
25
ThrottleDirection direction)
26
{
27
- BucketType to_check[2][4] = { {THROTTLE_BPS_TOTAL,
28
+ static const BucketType to_check[THROTTLE_MAX][4] = {
29
+ {THROTTLE_BPS_TOTAL,
30
THROTTLE_OPS_TOTAL,
31
THROTTLE_BPS_READ,
32
THROTTLE_OPS_READ},
33
@@ -XXX,XX +XXX,XX @@ static int64_t throttle_compute_wait_for(ThrottleState *ts,
34
int64_t wait, max_wait = 0;
35
int i;
36
37
- for (i = 0; i < 4; i++) {
38
+ for (i = 0; i < ARRAY_SIZE(to_check[THROTTLE_READ]); i++) {
39
BucketType index = to_check[direction][i];
40
wait = throttle_compute_wait(&ts->cfg.buckets[index]);
41
if (wait > max_wait) {
42
@@ -XXX,XX +XXX,XX @@ bool throttle_schedule_timer(ThrottleState *ts,
43
void throttle_account(ThrottleState *ts, ThrottleDirection direction,
44
uint64_t size)
45
{
46
- const BucketType bucket_types_size[2][2] = {
47
+ static const BucketType bucket_types_size[THROTTLE_MAX][2] = {
48
{ THROTTLE_BPS_TOTAL, THROTTLE_BPS_READ },
49
{ THROTTLE_BPS_TOTAL, THROTTLE_BPS_WRITE }
50
};
51
- const BucketType bucket_types_units[2][2] = {
52
+ static const BucketType bucket_types_units[THROTTLE_MAX][2] = {
53
{ THROTTLE_OPS_TOTAL, THROTTLE_OPS_READ },
54
{ THROTTLE_OPS_TOTAL, THROTTLE_OPS_WRITE }
55
};
56
@@ -XXX,XX +XXX,XX @@ void throttle_account(ThrottleState *ts, ThrottleDirection direction,
57
units = (double) size / ts->cfg.op_size;
58
}
59
60
- for (i = 0; i < 2; i++) {
61
+ for (i = 0; i < ARRAY_SIZE(bucket_types_size[THROTTLE_READ]); i++) {
62
LeakyBucket *bkt;
63
64
bkt = &ts->cfg.buckets[bucket_types_size[direction][i]];
65
--
66
2.41.0
diff view generated by jsdifflib
New patch
1
From: zhenwei pi <pizhenwei@bytedance.com>
1
2
3
'bool is_write' style is obsolete from throttle framework, adapt
4
fsdev to the new style.
5
6
Cc: Greg Kurz <groug@kaod.org>
7
Reviewed-by: Hanna Czenczek <hreitz@redhat.com>
8
Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
9
Message-Id: <20230728022006.1098509-9-pizhenwei@bytedance.com>
10
Reviewed-by: Greg Kurz <groug@kaod.org>
11
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
12
---
13
fsdev/qemu-fsdev-throttle.h | 4 ++--
14
fsdev/qemu-fsdev-throttle.c | 14 +++++++-------
15
hw/9pfs/cofile.c | 4 ++--
16
3 files changed, 11 insertions(+), 11 deletions(-)
17
18
diff --git a/fsdev/qemu-fsdev-throttle.h b/fsdev/qemu-fsdev-throttle.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/fsdev/qemu-fsdev-throttle.h
21
+++ b/fsdev/qemu-fsdev-throttle.h
22
@@ -XXX,XX +XXX,XX @@ typedef struct FsThrottle {
23
ThrottleState ts;
24
ThrottleTimers tt;
25
ThrottleConfig cfg;
26
- CoQueue throttled_reqs[2];
27
+ CoQueue throttled_reqs[THROTTLE_MAX];
28
} FsThrottle;
29
30
int fsdev_throttle_parse_opts(QemuOpts *, FsThrottle *, Error **);
31
32
void fsdev_throttle_init(FsThrottle *);
33
34
-void coroutine_fn fsdev_co_throttle_request(FsThrottle *, bool ,
35
+void coroutine_fn fsdev_co_throttle_request(FsThrottle *, ThrottleDirection ,
36
struct iovec *, int);
37
38
void fsdev_throttle_cleanup(FsThrottle *);
39
diff --git a/fsdev/qemu-fsdev-throttle.c b/fsdev/qemu-fsdev-throttle.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/fsdev/qemu-fsdev-throttle.c
42
+++ b/fsdev/qemu-fsdev-throttle.c
43
@@ -XXX,XX +XXX,XX @@ void fsdev_throttle_init(FsThrottle *fst)
44
}
45
}
46
47
-void coroutine_fn fsdev_co_throttle_request(FsThrottle *fst, bool is_write,
48
+void coroutine_fn fsdev_co_throttle_request(FsThrottle *fst,
49
+ ThrottleDirection direction,
50
struct iovec *iov, int iovcnt)
51
{
52
- ThrottleDirection direction = is_write ? THROTTLE_WRITE : THROTTLE_READ;
53
-
54
+ assert(direction < THROTTLE_MAX);
55
if (throttle_enabled(&fst->cfg)) {
56
if (throttle_schedule_timer(&fst->ts, &fst->tt, direction) ||
57
- !qemu_co_queue_empty(&fst->throttled_reqs[is_write])) {
58
- qemu_co_queue_wait(&fst->throttled_reqs[is_write], NULL);
59
+ !qemu_co_queue_empty(&fst->throttled_reqs[direction])) {
60
+ qemu_co_queue_wait(&fst->throttled_reqs[direction], NULL);
61
}
62
63
throttle_account(&fst->ts, direction, iov_size(iov, iovcnt));
64
65
- if (!qemu_co_queue_empty(&fst->throttled_reqs[is_write]) &&
66
+ if (!qemu_co_queue_empty(&fst->throttled_reqs[direction]) &&
67
!throttle_schedule_timer(&fst->ts, &fst->tt, direction)) {
68
- qemu_co_queue_next(&fst->throttled_reqs[is_write]);
69
+ qemu_co_queue_next(&fst->throttled_reqs[direction]);
70
}
71
}
72
}
73
diff --git a/hw/9pfs/cofile.c b/hw/9pfs/cofile.c
74
index XXXXXXX..XXXXXXX 100644
75
--- a/hw/9pfs/cofile.c
76
+++ b/hw/9pfs/cofile.c
77
@@ -XXX,XX +XXX,XX @@ int coroutine_fn v9fs_co_pwritev(V9fsPDU *pdu, V9fsFidState *fidp,
78
if (v9fs_request_cancelled(pdu)) {
79
return -EINTR;
80
}
81
- fsdev_co_throttle_request(s->ctx.fst, true, iov, iovcnt);
82
+ fsdev_co_throttle_request(s->ctx.fst, THROTTLE_WRITE, iov, iovcnt);
83
v9fs_co_run_in_worker(
84
{
85
err = s->ops->pwritev(&s->ctx, &fidp->fs, iov, iovcnt, offset);
86
@@ -XXX,XX +XXX,XX @@ int coroutine_fn v9fs_co_preadv(V9fsPDU *pdu, V9fsFidState *fidp,
87
if (v9fs_request_cancelled(pdu)) {
88
return -EINTR;
89
}
90
- fsdev_co_throttle_request(s->ctx.fst, false, iov, iovcnt);
91
+ fsdev_co_throttle_request(s->ctx.fst, THROTTLE_READ, iov, iovcnt);
92
v9fs_co_run_in_worker(
93
{
94
err = s->ops->preadv(&s->ctx, &fidp->fs, iov, iovcnt, offset);
95
--
96
2.41.0
diff view generated by jsdifflib
New patch
1
1
From: zhenwei pi <pizhenwei@bytedance.com>
2
3
'bool is_write' style is obsolete from throttle framework, adapt
4
block throttle groups to the new style:
5
- use ThrottleDirection instead of 'bool is_write'. Ex,
6
schedule_next_request(ThrottleGroupMember *tgm, bool is_write)
7
-> schedule_next_request(ThrottleGroupMember *tgm, ThrottleDirection direction)
8
9
- use THROTTLE_MAX instead of hard code. Ex, ThrottleGroupMember *tokens[2]
10
-> ThrottleGroupMember *tokens[THROTTLE_MAX]
11
12
- use ThrottleDirection instead of hard code on iteration. Ex, (i = 0; i < 2; i++)
13
-> for (dir = THROTTLE_READ; dir < THROTTLE_MAX; dir++)
14
15
Use a simple python script to test the new style:
16
#!/usr/bin/python3
17
import subprocess
18
import random
19
import time
20
21
commands = ['virsh blkdeviotune jammy vda --write-bytes-sec ', \
22
'virsh blkdeviotune jammy vda --write-iops-sec ', \
23
'virsh blkdeviotune jammy vda --read-bytes-sec ', \
24
'virsh blkdeviotune jammy vda --read-iops-sec ']
25
26
for loop in range(1, 1000):
27
time.sleep(random.randrange(3, 5))
28
command = commands[random.randrange(0, 3)] + str(random.randrange(0, 1000000))
29
subprocess.run(command, shell=True, check=True)
30
31
This works fine.
32
33
Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
34
Message-Id: <20230728022006.1098509-10-pizhenwei@bytedance.com>
35
Reviewed-by: Hanna Czenczek <hreitz@redhat.com>
36
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
37
---
38
include/block/throttle-groups.h | 6 +-
39
block/block-backend.c | 4 +-
40
block/throttle-groups.c | 161 ++++++++++++++++----------------
41
block/throttle.c | 8 +-
42
4 files changed, 90 insertions(+), 89 deletions(-)
43
44
diff --git a/include/block/throttle-groups.h b/include/block/throttle-groups.h
45
index XXXXXXX..XXXXXXX 100644
46
--- a/include/block/throttle-groups.h
47
+++ b/include/block/throttle-groups.h
48
@@ -XXX,XX +XXX,XX @@ typedef struct ThrottleGroupMember {
49
AioContext *aio_context;
50
/* throttled_reqs_lock protects the CoQueues for throttled requests. */
51
CoMutex throttled_reqs_lock;
52
- CoQueue throttled_reqs[2];
53
+ CoQueue throttled_reqs[THROTTLE_MAX];
54
55
/* Nonzero if the I/O limits are currently being ignored; generally
56
* it is zero. Accessed with atomic operations.
57
@@ -XXX,XX +XXX,XX @@ typedef struct ThrottleGroupMember {
58
* throttle_state tells us if I/O limits are configured. */
59
ThrottleState *throttle_state;
60
ThrottleTimers throttle_timers;
61
- unsigned pending_reqs[2];
62
+ unsigned pending_reqs[THROTTLE_MAX];
63
QLIST_ENTRY(ThrottleGroupMember) round_robin;
64
65
} ThrottleGroupMember;
66
@@ -XXX,XX +XXX,XX @@ void throttle_group_restart_tgm(ThrottleGroupMember *tgm);
67
68
void coroutine_fn throttle_group_co_io_limits_intercept(ThrottleGroupMember *tgm,
69
int64_t bytes,
70
- bool is_write);
71
+ ThrottleDirection direction);
72
void throttle_group_attach_aio_context(ThrottleGroupMember *tgm,
73
AioContext *new_context);
74
void throttle_group_detach_aio_context(ThrottleGroupMember *tgm);
75
diff --git a/block/block-backend.c b/block/block-backend.c
76
index XXXXXXX..XXXXXXX 100644
77
--- a/block/block-backend.c
78
+++ b/block/block-backend.c
79
@@ -XXX,XX +XXX,XX @@ blk_co_do_preadv_part(BlockBackend *blk, int64_t offset, int64_t bytes,
80
/* throttling disk I/O */
81
if (blk->public.throttle_group_member.throttle_state) {
82
throttle_group_co_io_limits_intercept(&blk->public.throttle_group_member,
83
- bytes, false);
84
+ bytes, THROTTLE_READ);
85
}
86
87
ret = bdrv_co_preadv_part(blk->root, offset, bytes, qiov, qiov_offset,
88
@@ -XXX,XX +XXX,XX @@ blk_co_do_pwritev_part(BlockBackend *blk, int64_t offset, int64_t bytes,
89
/* throttling disk I/O */
90
if (blk->public.throttle_group_member.throttle_state) {
91
throttle_group_co_io_limits_intercept(&blk->public.throttle_group_member,
92
- bytes, true);
93
+ bytes, THROTTLE_WRITE);
94
}
95
96
if (!blk->enable_write_cache) {
97
diff --git a/block/throttle-groups.c b/block/throttle-groups.c
98
index XXXXXXX..XXXXXXX 100644
99
--- a/block/throttle-groups.c
100
+++ b/block/throttle-groups.c
101
@@ -XXX,XX +XXX,XX @@
102
103
static void throttle_group_obj_init(Object *obj);
104
static void throttle_group_obj_complete(UserCreatable *obj, Error **errp);
105
-static void timer_cb(ThrottleGroupMember *tgm, bool is_write);
106
+static void timer_cb(ThrottleGroupMember *tgm, ThrottleDirection direction);
107
108
/* The ThrottleGroup structure (with its ThrottleState) is shared
109
* among different ThrottleGroupMembers and it's independent from
110
@@ -XXX,XX +XXX,XX @@ struct ThrottleGroup {
111
QemuMutex lock; /* This lock protects the following four fields */
112
ThrottleState ts;
113
QLIST_HEAD(, ThrottleGroupMember) head;
114
- ThrottleGroupMember *tokens[2];
115
- bool any_timer_armed[2];
116
+ ThrottleGroupMember *tokens[THROTTLE_MAX];
117
+ bool any_timer_armed[THROTTLE_MAX];
118
QEMUClockType clock_type;
119
120
/* This field is protected by the global QEMU mutex */
121
@@ -XXX,XX +XXX,XX @@ static ThrottleGroupMember *throttle_group_next_tgm(ThrottleGroupMember *tgm)
122
* This assumes that tg->lock is held.
123
*
124
* @tgm: the ThrottleGroupMember
125
- * @is_write: the type of operation (read/write)
126
+ * @direction: the ThrottleDirection
127
* @ret: whether the ThrottleGroupMember has pending requests.
128
*/
129
static inline bool tgm_has_pending_reqs(ThrottleGroupMember *tgm,
130
- bool is_write)
131
+ ThrottleDirection direction)
132
{
133
- return tgm->pending_reqs[is_write];
134
+ return tgm->pending_reqs[direction];
135
}
136
137
/* Return the next ThrottleGroupMember in the round-robin sequence with pending
138
@@ -XXX,XX +XXX,XX @@ static inline bool tgm_has_pending_reqs(ThrottleGroupMember *tgm,
139
* This assumes that tg->lock is held.
140
*
141
* @tgm: the current ThrottleGroupMember
142
- * @is_write: the type of operation (read/write)
143
+ * @direction: the ThrottleDirection
144
* @ret: the next ThrottleGroupMember with pending requests, or tgm if
145
* there is none.
146
*/
147
static ThrottleGroupMember *next_throttle_token(ThrottleGroupMember *tgm,
148
- bool is_write)
149
+ ThrottleDirection direction)
150
{
151
ThrottleState *ts = tgm->throttle_state;
152
ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
153
@@ -XXX,XX +XXX,XX @@ static ThrottleGroupMember *next_throttle_token(ThrottleGroupMember *tgm,
154
* it's being drained. Skip the round-robin search and return tgm
155
* immediately if it has pending requests. Otherwise we could be
156
* forcing it to wait for other member's throttled requests. */
157
- if (tgm_has_pending_reqs(tgm, is_write) &&
158
+ if (tgm_has_pending_reqs(tgm, direction) &&
159
qatomic_read(&tgm->io_limits_disabled)) {
160
return tgm;
161
}
162
163
- start = token = tg->tokens[is_write];
164
+ start = token = tg->tokens[direction];
165
166
/* get next bs round in round robin style */
167
token = throttle_group_next_tgm(token);
168
- while (token != start && !tgm_has_pending_reqs(token, is_write)) {
169
+ while (token != start && !tgm_has_pending_reqs(token, direction)) {
170
token = throttle_group_next_tgm(token);
171
}
172
173
@@ -XXX,XX +XXX,XX @@ static ThrottleGroupMember *next_throttle_token(ThrottleGroupMember *tgm,
174
* then decide the token is the current tgm because chances are
175
* the current tgm got the current request queued.
176
*/
177
- if (token == start && !tgm_has_pending_reqs(token, is_write)) {
178
+ if (token == start && !tgm_has_pending_reqs(token, direction)) {
179
token = tgm;
180
}
181
182
/* Either we return the original TGM, or one with pending requests */
183
- assert(token == tgm || tgm_has_pending_reqs(token, is_write));
184
+ assert(token == tgm || tgm_has_pending_reqs(token, direction));
185
186
return token;
187
}
188
@@ -XXX,XX +XXX,XX @@ static ThrottleGroupMember *next_throttle_token(ThrottleGroupMember *tgm,
189
* This assumes that tg->lock is held.
190
*
191
* @tgm: the current ThrottleGroupMember
192
- * @is_write: the type of operation (read/write)
193
+ * @direction: the ThrottleDirection
194
* @ret: whether the I/O request needs to be throttled or not
195
*/
196
static bool throttle_group_schedule_timer(ThrottleGroupMember *tgm,
197
- bool is_write)
198
+ ThrottleDirection direction)
199
{
200
ThrottleState *ts = tgm->throttle_state;
201
ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
202
ThrottleTimers *tt = &tgm->throttle_timers;
203
- ThrottleDirection direction = is_write ? THROTTLE_WRITE : THROTTLE_READ;
204
bool must_wait;
205
206
if (qatomic_read(&tgm->io_limits_disabled)) {
207
@@ -XXX,XX +XXX,XX @@ static bool throttle_group_schedule_timer(ThrottleGroupMember *tgm,
208
}
209
210
/* Check if any of the timers in this group is already armed */
211
- if (tg->any_timer_armed[is_write]) {
212
+ if (tg->any_timer_armed[direction]) {
213
return true;
214
}
215
216
@@ -XXX,XX +XXX,XX @@ static bool throttle_group_schedule_timer(ThrottleGroupMember *tgm,
217
218
/* If a timer just got armed, set tgm as the current token */
219
if (must_wait) {
220
- tg->tokens[is_write] = tgm;
221
- tg->any_timer_armed[is_write] = true;
222
+ tg->tokens[direction] = tgm;
223
+ tg->any_timer_armed[direction] = true;
224
}
225
226
return must_wait;
227
@@ -XXX,XX +XXX,XX @@ static bool throttle_group_schedule_timer(ThrottleGroupMember *tgm,
228
* any request was actually pending.
229
*
230
* @tgm: the current ThrottleGroupMember
231
- * @is_write: the type of operation (read/write)
232
+ * @direction: the ThrottleDirection
233
*/
234
static bool coroutine_fn throttle_group_co_restart_queue(ThrottleGroupMember *tgm,
235
- bool is_write)
236
+ ThrottleDirection direction)
237
{
238
bool ret;
239
240
qemu_co_mutex_lock(&tgm->throttled_reqs_lock);
241
- ret = qemu_co_queue_next(&tgm->throttled_reqs[is_write]);
242
+ ret = qemu_co_queue_next(&tgm->throttled_reqs[direction]);
243
qemu_co_mutex_unlock(&tgm->throttled_reqs_lock);
244
245
return ret;
246
@@ -XXX,XX +XXX,XX @@ static bool coroutine_fn throttle_group_co_restart_queue(ThrottleGroupMember *tg
247
* This assumes that tg->lock is held.
248
*
249
* @tgm: the current ThrottleGroupMember
250
- * @is_write: the type of operation (read/write)
251
+ * @direction: the ThrottleDirection
252
*/
253
-static void schedule_next_request(ThrottleGroupMember *tgm, bool is_write)
254
+static void schedule_next_request(ThrottleGroupMember *tgm,
255
+ ThrottleDirection direction)
256
{
257
ThrottleState *ts = tgm->throttle_state;
258
ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
259
@@ -XXX,XX +XXX,XX @@ static void schedule_next_request(ThrottleGroupMember *tgm, bool is_write)
260
ThrottleGroupMember *token;
261
262
/* Check if there's any pending request to schedule next */
263
- token = next_throttle_token(tgm, is_write);
264
- if (!tgm_has_pending_reqs(token, is_write)) {
265
+ token = next_throttle_token(tgm, direction);
266
+ if (!tgm_has_pending_reqs(token, direction)) {
267
return;
268
}
269
270
/* Set a timer for the request if it needs to be throttled */
271
- must_wait = throttle_group_schedule_timer(token, is_write);
272
+ must_wait = throttle_group_schedule_timer(token, direction);
273
274
/* If it doesn't have to wait, queue it for immediate execution */
275
if (!must_wait) {
276
/* Give preference to requests from the current tgm */
277
if (qemu_in_coroutine() &&
278
- throttle_group_co_restart_queue(tgm, is_write)) {
279
+ throttle_group_co_restart_queue(tgm, direction)) {
280
token = tgm;
281
} else {
282
ThrottleTimers *tt = &token->throttle_timers;
283
int64_t now = qemu_clock_get_ns(tg->clock_type);
284
- timer_mod(tt->timers[is_write], now);
285
- tg->any_timer_armed[is_write] = true;
286
+ timer_mod(tt->timers[direction], now);
287
+ tg->any_timer_armed[direction] = true;
288
}
289
- tg->tokens[is_write] = token;
290
+ tg->tokens[direction] = token;
291
}
292
}
293
294
@@ -XXX,XX +XXX,XX @@ static void schedule_next_request(ThrottleGroupMember *tgm, bool is_write)
295
*
296
* @tgm: the current ThrottleGroupMember
297
* @bytes: the number of bytes for this I/O
298
- * @is_write: the type of operation (read/write)
299
+ * @direction: the ThrottleDirection
300
*/
301
void coroutine_fn throttle_group_co_io_limits_intercept(ThrottleGroupMember *tgm,
302
int64_t bytes,
303
- bool is_write)
304
+ ThrottleDirection direction)
305
{
306
bool must_wait;
307
ThrottleGroupMember *token;
308
ThrottleGroup *tg = container_of(tgm->throttle_state, ThrottleGroup, ts);
309
- ThrottleDirection direction = is_write ? THROTTLE_WRITE : THROTTLE_READ;
310
311
assert(bytes >= 0);
312
+ assert(direction < THROTTLE_MAX);
313
314
qemu_mutex_lock(&tg->lock);
315
316
/* First we check if this I/O has to be throttled. */
317
- token = next_throttle_token(tgm, is_write);
318
- must_wait = throttle_group_schedule_timer(token, is_write);
319
+ token = next_throttle_token(tgm, direction);
320
+ must_wait = throttle_group_schedule_timer(token, direction);
321
322
/* Wait if there's a timer set or queued requests of this type */
323
- if (must_wait || tgm->pending_reqs[is_write]) {
324
- tgm->pending_reqs[is_write]++;
325
+ if (must_wait || tgm->pending_reqs[direction]) {
326
+ tgm->pending_reqs[direction]++;
327
qemu_mutex_unlock(&tg->lock);
328
qemu_co_mutex_lock(&tgm->throttled_reqs_lock);
329
- qemu_co_queue_wait(&tgm->throttled_reqs[is_write],
330
+ qemu_co_queue_wait(&tgm->throttled_reqs[direction],
331
&tgm->throttled_reqs_lock);
332
qemu_co_mutex_unlock(&tgm->throttled_reqs_lock);
333
qemu_mutex_lock(&tg->lock);
334
- tgm->pending_reqs[is_write]--;
335
+ tgm->pending_reqs[direction]--;
336
}
337
338
/* The I/O will be executed, so do the accounting */
339
throttle_account(tgm->throttle_state, direction, bytes);
340
341
/* Schedule the next request */
342
- schedule_next_request(tgm, is_write);
343
+ schedule_next_request(tgm, direction);
344
345
qemu_mutex_unlock(&tg->lock);
346
}
347
348
typedef struct {
349
ThrottleGroupMember *tgm;
350
- bool is_write;
351
+ ThrottleDirection direction;
352
} RestartData;
353
354
static void coroutine_fn throttle_group_restart_queue_entry(void *opaque)
355
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn throttle_group_restart_queue_entry(void *opaque)
356
ThrottleGroupMember *tgm = data->tgm;
357
ThrottleState *ts = tgm->throttle_state;
358
ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
359
- bool is_write = data->is_write;
360
+ ThrottleDirection direction = data->direction;
361
bool empty_queue;
362
363
- empty_queue = !throttle_group_co_restart_queue(tgm, is_write);
364
+ empty_queue = !throttle_group_co_restart_queue(tgm, direction);
365
366
/* If the request queue was empty then we have to take care of
367
* scheduling the next one */
368
if (empty_queue) {
369
qemu_mutex_lock(&tg->lock);
370
- schedule_next_request(tgm, is_write);
371
+ schedule_next_request(tgm, direction);
372
qemu_mutex_unlock(&tg->lock);
373
}
374
375
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn throttle_group_restart_queue_entry(void *opaque)
376
aio_wait_kick();
377
}
378
379
-static void throttle_group_restart_queue(ThrottleGroupMember *tgm, bool is_write)
380
+static void throttle_group_restart_queue(ThrottleGroupMember *tgm,
381
+ ThrottleDirection direction)
382
{
383
Coroutine *co;
384
RestartData *rd = g_new0(RestartData, 1);
385
386
rd->tgm = tgm;
387
- rd->is_write = is_write;
388
+ rd->direction = direction;
389
390
/* This function is called when a timer is fired or when
391
* throttle_group_restart_tgm() is called. Either way, there can
392
* be no timer pending on this tgm at this point */
393
- assert(!timer_pending(tgm->throttle_timers.timers[is_write]));
394
+ assert(!timer_pending(tgm->throttle_timers.timers[direction]));
395
396
qatomic_inc(&tgm->restart_pending);
397
398
@@ -XXX,XX +XXX,XX @@ static void throttle_group_restart_queue(ThrottleGroupMember *tgm, bool is_write
399
400
void throttle_group_restart_tgm(ThrottleGroupMember *tgm)
401
{
402
- int i;
403
+ ThrottleDirection dir;
404
405
if (tgm->throttle_state) {
406
- for (i = 0; i < 2; i++) {
407
- QEMUTimer *t = tgm->throttle_timers.timers[i];
408
+ for (dir = THROTTLE_READ; dir < THROTTLE_MAX; dir++) {
409
+ QEMUTimer *t = tgm->throttle_timers.timers[dir];
410
if (timer_pending(t)) {
411
/* If there's a pending timer on this tgm, fire it now */
412
timer_del(t);
413
- timer_cb(tgm, i);
414
+ timer_cb(tgm, dir);
415
} else {
416
/* Else run the next request from the queue manually */
417
- throttle_group_restart_queue(tgm, i);
418
+ throttle_group_restart_queue(tgm, dir);
419
}
420
}
421
}
422
@@ -XXX,XX +XXX,XX @@ void throttle_group_get_config(ThrottleGroupMember *tgm, ThrottleConfig *cfg)
423
* because it had been throttled.
424
*
425
* @tgm: the ThrottleGroupMember whose request had been throttled
426
- * @is_write: the type of operation (read/write)
427
+ * @direction: the ThrottleDirection
428
*/
429
-static void timer_cb(ThrottleGroupMember *tgm, bool is_write)
430
+static void timer_cb(ThrottleGroupMember *tgm, ThrottleDirection direction)
431
{
432
ThrottleState *ts = tgm->throttle_state;
433
ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
434
435
/* The timer has just been fired, so we can update the flag */
436
qemu_mutex_lock(&tg->lock);
437
- tg->any_timer_armed[is_write] = false;
438
+ tg->any_timer_armed[direction] = false;
439
qemu_mutex_unlock(&tg->lock);
440
441
/* Run the request that was waiting for this timer */
442
- throttle_group_restart_queue(tgm, is_write);
443
+ throttle_group_restart_queue(tgm, direction);
444
}
445
446
static void read_timer_cb(void *opaque)
447
{
448
- timer_cb(opaque, false);
449
+ timer_cb(opaque, THROTTLE_READ);
450
}
451
452
static void write_timer_cb(void *opaque)
453
{
454
- timer_cb(opaque, true);
455
+ timer_cb(opaque, THROTTLE_WRITE);
456
}
457
458
/* Register a ThrottleGroupMember from the throttling group, also initializing
459
@@ -XXX,XX +XXX,XX @@ void throttle_group_register_tgm(ThrottleGroupMember *tgm,
460
const char *groupname,
461
AioContext *ctx)
462
{
463
- int i;
464
+ ThrottleDirection dir;
465
ThrottleState *ts = throttle_group_incref(groupname);
466
ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
467
468
@@ -XXX,XX +XXX,XX @@ void throttle_group_register_tgm(ThrottleGroupMember *tgm,
469
470
QEMU_LOCK_GUARD(&tg->lock);
471
/* If the ThrottleGroup is new set this ThrottleGroupMember as the token */
472
- for (i = 0; i < 2; i++) {
473
- if (!tg->tokens[i]) {
474
- tg->tokens[i] = tgm;
475
+ for (dir = THROTTLE_READ; dir < THROTTLE_MAX; dir++) {
476
+ if (!tg->tokens[dir]) {
477
+ tg->tokens[dir] = tgm;
478
}
479
+ qemu_co_queue_init(&tgm->throttled_reqs[dir]);
480
}
481
482
QLIST_INSERT_HEAD(&tg->head, tgm, round_robin);
483
@@ -XXX,XX +XXX,XX @@ void throttle_group_register_tgm(ThrottleGroupMember *tgm,
484
write_timer_cb,
485
tgm);
486
qemu_co_mutex_init(&tgm->throttled_reqs_lock);
487
- qemu_co_queue_init(&tgm->throttled_reqs[0]);
488
- qemu_co_queue_init(&tgm->throttled_reqs[1]);
489
}
490
491
/* Unregister a ThrottleGroupMember from its group, removing it from the list,
492
@@ -XXX,XX +XXX,XX @@ void throttle_group_unregister_tgm(ThrottleGroupMember *tgm)
493
ThrottleState *ts = tgm->throttle_state;
494
ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
495
ThrottleGroupMember *token;
496
- int i;
497
+ ThrottleDirection dir;
498
499
if (!ts) {
500
/* Discard already unregistered tgm */
501
@@ -XXX,XX +XXX,XX @@ void throttle_group_unregister_tgm(ThrottleGroupMember *tgm)
502
AIO_WAIT_WHILE(tgm->aio_context, qatomic_read(&tgm->restart_pending) > 0);
503
504
WITH_QEMU_LOCK_GUARD(&tg->lock) {
505
- for (i = 0; i < 2; i++) {
506
- assert(tgm->pending_reqs[i] == 0);
507
- assert(qemu_co_queue_empty(&tgm->throttled_reqs[i]));
508
- assert(!timer_pending(tgm->throttle_timers.timers[i]));
509
- if (tg->tokens[i] == tgm) {
510
+ for (dir = THROTTLE_READ; dir < THROTTLE_MAX; dir++) {
511
+ assert(tgm->pending_reqs[dir] == 0);
512
+ assert(qemu_co_queue_empty(&tgm->throttled_reqs[dir]));
513
+ assert(!timer_pending(tgm->throttle_timers.timers[dir]));
514
+ if (tg->tokens[dir] == tgm) {
515
token = throttle_group_next_tgm(tgm);
516
/* Take care of the case where this is the last tgm in the group */
517
if (token == tgm) {
518
token = NULL;
519
}
520
- tg->tokens[i] = token;
521
+ tg->tokens[dir] = token;
522
}
523
}
524
525
@@ -XXX,XX +XXX,XX @@ void throttle_group_detach_aio_context(ThrottleGroupMember *tgm)
526
{
527
ThrottleGroup *tg = container_of(tgm->throttle_state, ThrottleGroup, ts);
528
ThrottleTimers *tt = &tgm->throttle_timers;
529
- int i;
530
+ ThrottleDirection dir;
531
532
/* Requests must have been drained */
533
- assert(tgm->pending_reqs[0] == 0 && tgm->pending_reqs[1] == 0);
534
- assert(qemu_co_queue_empty(&tgm->throttled_reqs[0]));
535
- assert(qemu_co_queue_empty(&tgm->throttled_reqs[1]));
536
+ for (dir = THROTTLE_READ; dir < THROTTLE_MAX; dir++) {
537
+ assert(tgm->pending_reqs[dir] == 0);
538
+ assert(qemu_co_queue_empty(&tgm->throttled_reqs[dir]));
539
+ }
540
541
/* Kick off next ThrottleGroupMember, if necessary */
542
WITH_QEMU_LOCK_GUARD(&tg->lock) {
543
- for (i = 0; i < 2; i++) {
544
- if (timer_pending(tt->timers[i])) {
545
- tg->any_timer_armed[i] = false;
546
- schedule_next_request(tgm, i);
547
+ for (dir = THROTTLE_READ; dir < THROTTLE_MAX; dir++) {
548
+ if (timer_pending(tt->timers[dir])) {
549
+ tg->any_timer_armed[dir] = false;
550
+ schedule_next_request(tgm, dir);
551
}
552
}
553
}
554
diff --git a/block/throttle.c b/block/throttle.c
555
index XXXXXXX..XXXXXXX 100644
556
--- a/block/throttle.c
557
+++ b/block/throttle.c
558
@@ -XXX,XX +XXX,XX @@ throttle_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
559
{
560
561
ThrottleGroupMember *tgm = bs->opaque;
562
- throttle_group_co_io_limits_intercept(tgm, bytes, false);
563
+ throttle_group_co_io_limits_intercept(tgm, bytes, THROTTLE_READ);
564
565
return bdrv_co_preadv(bs->file, offset, bytes, qiov, flags);
566
}
567
@@ -XXX,XX +XXX,XX @@ throttle_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
568
QEMUIOVector *qiov, BdrvRequestFlags flags)
569
{
570
ThrottleGroupMember *tgm = bs->opaque;
571
- throttle_group_co_io_limits_intercept(tgm, bytes, true);
572
+ throttle_group_co_io_limits_intercept(tgm, bytes, THROTTLE_WRITE);
573
574
return bdrv_co_pwritev(bs->file, offset, bytes, qiov, flags);
575
}
576
@@ -XXX,XX +XXX,XX @@ throttle_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes,
577
BdrvRequestFlags flags)
578
{
579
ThrottleGroupMember *tgm = bs->opaque;
580
- throttle_group_co_io_limits_intercept(tgm, bytes, true);
581
+ throttle_group_co_io_limits_intercept(tgm, bytes, THROTTLE_WRITE);
582
583
return bdrv_co_pwrite_zeroes(bs->file, offset, bytes, flags);
584
}
585
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn GRAPH_RDLOCK
586
throttle_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
587
{
588
ThrottleGroupMember *tgm = bs->opaque;
589
- throttle_group_co_io_limits_intercept(tgm, bytes, true);
590
+ throttle_group_co_io_limits_intercept(tgm, bytes, THROTTLE_WRITE);
591
592
return bdrv_co_pdiscard(bs->file, offset, bytes);
593
}
594
--
595
2.41.0
diff view generated by jsdifflib
New patch
1
bs->bl.zoned is what indicates whether the zone information is present
2
and valid; it is the only thing that raw_refresh_zoned_limits() sets if
3
CONFIG_BLKZONED is not defined, and it is also the only thing that it
4
sets if CONFIG_BLKZONED is defined, but there are no zones.
1
5
6
Make sure that it is always set to BLK_Z_NONE if there is an error
7
anywhere in raw_refresh_zoned_limits() so that we do not accidentally
8
announce zones while our information is incomplete or invalid.
9
10
This also fixes a memory leak in the last error path in
11
raw_refresh_zoned_limits().
12
13
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
14
Message-Id: <20230824155345.109765-2-hreitz@redhat.com>
15
Reviewed-by: Sam Li <faithilikerun@gmail.com>
16
---
17
block/file-posix.c | 21 ++++++++++++---------
18
1 file changed, 12 insertions(+), 9 deletions(-)
19
20
diff --git a/block/file-posix.c b/block/file-posix.c
21
index XXXXXXX..XXXXXXX 100644
22
--- a/block/file-posix.c
23
+++ b/block/file-posix.c
24
@@ -XXX,XX +XXX,XX @@ static void raw_refresh_zoned_limits(BlockDriverState *bs, struct stat *st,
25
BlockZoneModel zoned;
26
int ret;
27
28
- bs->bl.zoned = BLK_Z_NONE;
29
-
30
ret = get_sysfs_zoned_model(st, &zoned);
31
if (ret < 0 || zoned == BLK_Z_NONE) {
32
- return;
33
+ goto no_zoned;
34
}
35
bs->bl.zoned = zoned;
36
37
@@ -XXX,XX +XXX,XX @@ static void raw_refresh_zoned_limits(BlockDriverState *bs, struct stat *st,
38
if (ret < 0) {
39
error_setg_errno(errp, -ret, "Unable to read chunk_sectors "
40
"sysfs attribute");
41
- return;
42
+ goto no_zoned;
43
} else if (!ret) {
44
error_setg(errp, "Read 0 from chunk_sectors sysfs attribute");
45
- return;
46
+ goto no_zoned;
47
}
48
bs->bl.zone_size = ret << BDRV_SECTOR_BITS;
49
50
@@ -XXX,XX +XXX,XX @@ static void raw_refresh_zoned_limits(BlockDriverState *bs, struct stat *st,
51
if (ret < 0) {
52
error_setg_errno(errp, -ret, "Unable to read nr_zones "
53
"sysfs attribute");
54
- return;
55
+ goto no_zoned;
56
} else if (!ret) {
57
error_setg(errp, "Read 0 from nr_zones sysfs attribute");
58
- return;
59
+ goto no_zoned;
60
}
61
bs->bl.nr_zones = ret;
62
63
@@ -XXX,XX +XXX,XX @@ static void raw_refresh_zoned_limits(BlockDriverState *bs, struct stat *st,
64
ret = get_zones_wp(bs, s->fd, 0, bs->bl.nr_zones, 0);
65
if (ret < 0) {
66
error_setg_errno(errp, -ret, "report wps failed");
67
- bs->wps = NULL;
68
- return;
69
+ goto no_zoned;
70
}
71
qemu_co_mutex_init(&bs->wps->colock);
72
+ return;
73
+
74
+no_zoned:
75
+ bs->bl.zoned = BLK_Z_NONE;
76
+ g_free(bs->wps);
77
+ bs->wps = NULL;
78
}
79
#else /* !defined(CONFIG_BLKZONED) */
80
static void raw_refresh_zoned_limits(BlockDriverState *bs, struct stat *st,
81
--
82
2.41.0
diff view generated by jsdifflib
New patch
1
Instead of checking bs->wps or bs->bl.zone_size for whether zone
2
information is present, check bs->bl.zoned. That is the flag that
3
raw_refresh_zoned_limits() reliably sets to indicate zone support. If
4
it is set to something other than BLK_Z_NONE, other values and objects
5
like bs->wps and bs->bl.zone_size must be non-null/zero and valid; if it
6
is not, we cannot rely on their validity.
1
7
8
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
9
Message-Id: <20230824155345.109765-3-hreitz@redhat.com>
10
Reviewed-by: Sam Li <faithilikerun@gmail.com>
11
---
12
block/file-posix.c | 12 +++++++-----
13
1 file changed, 7 insertions(+), 5 deletions(-)
14
15
diff --git a/block/file-posix.c b/block/file-posix.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/block/file-posix.c
18
+++ b/block/file-posix.c
19
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset,
20
if (fd_open(bs) < 0)
21
return -EIO;
22
#if defined(CONFIG_BLKZONED)
23
- if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) && bs->wps) {
24
+ if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) &&
25
+ bs->bl.zoned != BLK_Z_NONE) {
26
qemu_co_mutex_lock(&bs->wps->colock);
27
- if (type & QEMU_AIO_ZONE_APPEND && bs->bl.zone_size) {
28
+ if (type & QEMU_AIO_ZONE_APPEND) {
29
int index = offset / bs->bl.zone_size;
30
offset = bs->wps->wp[index];
31
}
32
@@ -XXX,XX +XXX,XX @@ out:
33
{
34
BlockZoneWps *wps = bs->wps;
35
if (ret == 0) {
36
- if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND))
37
- && wps && bs->bl.zone_size) {
38
+ if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) &&
39
+ bs->bl.zoned != BLK_Z_NONE) {
40
uint64_t *wp = &wps->wp[offset / bs->bl.zone_size];
41
if (!BDRV_ZT_IS_CONV(*wp)) {
42
if (type & QEMU_AIO_ZONE_APPEND) {
43
@@ -XXX,XX +XXX,XX @@ out:
44
}
45
}
46
47
- if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) && wps) {
48
+ if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) &&
49
+ bs->blk.zoned != BLK_Z_NONE) {
50
qemu_co_mutex_unlock(&wps->colock);
51
}
52
}
53
--
54
2.41.0
diff view generated by jsdifflib
New patch
1
We must check that zone information is present before running
2
update_zones_wp().
1
3
4
Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=2234374
5
Fixes: Coverity CID 1512459
6
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
7
Message-Id: <20230824155345.109765-4-hreitz@redhat.com>
8
Reviewed-by: Sam Li <faithilikerun@gmail.com>
9
---
10
block/file-posix.c | 3 ++-
11
1 file changed, 2 insertions(+), 1 deletion(-)
12
13
diff --git a/block/file-posix.c b/block/file-posix.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/block/file-posix.c
16
+++ b/block/file-posix.c
17
@@ -XXX,XX +XXX,XX @@ out:
18
}
19
}
20
} else {
21
- if (type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) {
22
+ if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) &&
23
+ bs->bl.zoned != BLK_Z_NONE) {
24
update_zones_wp(bs, s->fd, 0, 1);
25
}
26
}
27
--
28
2.41.0
diff view generated by jsdifflib
New patch
1
We duplicate the same condition three times here, pull it out to the top
2
level.
1
3
4
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
5
Message-Id: <20230824155345.109765-5-hreitz@redhat.com>
6
Reviewed-by: Sam Li <faithilikerun@gmail.com>
7
---
8
block/file-posix.c | 18 +++++-------------
9
1 file changed, 5 insertions(+), 13 deletions(-)
10
11
diff --git a/block/file-posix.c b/block/file-posix.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/block/file-posix.c
14
+++ b/block/file-posix.c
15
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset,
16
17
out:
18
#if defined(CONFIG_BLKZONED)
19
-{
20
- BlockZoneWps *wps = bs->wps;
21
- if (ret == 0) {
22
- if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) &&
23
- bs->bl.zoned != BLK_Z_NONE) {
24
+ if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) &&
25
+ bs->bl.zoned != BLK_Z_NONE) {
26
+ BlockZoneWps *wps = bs->wps;
27
+ if (ret == 0) {
28
uint64_t *wp = &wps->wp[offset / bs->bl.zone_size];
29
if (!BDRV_ZT_IS_CONV(*wp)) {
30
if (type & QEMU_AIO_ZONE_APPEND) {
31
@@ -XXX,XX +XXX,XX @@ out:
32
*wp = offset + bytes;
33
}
34
}
35
- }
36
- } else {
37
- if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) &&
38
- bs->bl.zoned != BLK_Z_NONE) {
39
+ } else {
40
update_zones_wp(bs, s->fd, 0, 1);
41
}
42
- }
43
44
- if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) &&
45
- bs->blk.zoned != BLK_Z_NONE) {
46
qemu_co_mutex_unlock(&wps->colock);
47
}
48
-}
49
#endif
50
return ret;
51
}
52
--
53
2.41.0
diff view generated by jsdifflib
New patch
1
This is a regression test for
2
https://bugzilla.redhat.com/show_bug.cgi?id=2234374.
1
3
4
All this test needs to do is trigger an I/O error inside of file-posix
5
(specifically raw_co_prw()). One reliable way to do this without
6
requiring special privileges is to use a FUSE export, which allows us to
7
inject any error that we want, e.g. via blkdebug.
8
9
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
10
Message-Id: <20230824155345.109765-6-hreitz@redhat.com>
11
[hreitz: Fixed test to be skipped when there is no FUSE support, to
12
suppress fusermount's allow_other warning, and to be skipped
13
with $IMGOPTSSYNTAX enabled]
14
Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
15
---
16
tests/qemu-iotests/tests/file-io-error | 119 +++++++++++++++++++++
17
tests/qemu-iotests/tests/file-io-error.out | 33 ++++++
18
2 files changed, 152 insertions(+)
19
create mode 100755 tests/qemu-iotests/tests/file-io-error
20
create mode 100644 tests/qemu-iotests/tests/file-io-error.out
21
22
diff --git a/tests/qemu-iotests/tests/file-io-error b/tests/qemu-iotests/tests/file-io-error
23
new file mode 100755
24
index XXXXXXX..XXXXXXX
25
--- /dev/null
26
+++ b/tests/qemu-iotests/tests/file-io-error
27
@@ -XXX,XX +XXX,XX @@
28
+#!/usr/bin/env bash
29
+# group: rw
30
+#
31
+# Produce an I/O error in file-posix, and hope that it is not catastrophic.
32
+# Regression test for: https://bugzilla.redhat.com/show_bug.cgi?id=2234374
33
+#
34
+# Copyright (C) 2023 Red Hat, Inc.
35
+#
36
+# This program is free software; you can redistribute it and/or modify
37
+# it under the terms of the GNU General Public License as published by
38
+# the Free Software Foundation; either version 2 of the License, or
39
+# (at your option) any later version.
40
+#
41
+# This program is distributed in the hope that it will be useful,
42
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
43
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
44
+# GNU General Public License for more details.
45
+#
46
+# You should have received a copy of the GNU General Public License
47
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
48
+#
49
+
50
+seq=$(basename "$0")
51
+echo "QA output created by $seq"
52
+
53
+status=1    # failure is the default!
54
+
55
+_cleanup()
56
+{
57
+ _cleanup_qemu
58
+ rm -f "$TEST_DIR/fuse-export"
59
+}
60
+trap "_cleanup; exit \$status" 0 1 2 3 15
61
+
62
+# get standard environment, filters and checks
63
+. ../common.rc
64
+. ../common.filter
65
+. ../common.qemu
66
+
67
+# Format-agnostic (we do not use any), but we do test the file protocol
68
+_supported_proto file
69
+_require_drivers blkdebug null-co
70
+
71
+if [ "$IMGOPTSSYNTAX" = "true" ]; then
72
+ # We need `$QEMU_IO -f file` to work; IMGOPTSSYNTAX uses --image-opts,
73
+ # breaking -f.
74
+ _unsupported_fmt $IMGFMT
75
+fi
76
+
77
+# This is a regression test of a bug in which flie-posix would access zone
78
+# information in case of an I/O error even when there is no zone information,
79
+# resulting in a division by zero.
80
+# To reproduce the problem, we need to trigger an I/O error inside of
81
+# file-posix, which can be done (rootless) by providing a FUSE export that
82
+# presents only errors when accessed.
83
+
84
+_launch_qemu
85
+_send_qemu_cmd $QEMU_HANDLE \
86
+ "{'execute': 'qmp_capabilities'}" \
87
+ 'return'
88
+
89
+_send_qemu_cmd $QEMU_HANDLE \
90
+ "{'execute': 'blockdev-add',
91
+ 'arguments': {
92
+ 'driver': 'blkdebug',
93
+ 'node-name': 'node0',
94
+ 'inject-error': [{'event': 'none'}],
95
+ 'image': {
96
+ 'driver': 'null-co'
97
+ }
98
+ }}" \
99
+ 'return'
100
+
101
+# FUSE mountpoint must exist and be a regular file
102
+touch "$TEST_DIR/fuse-export"
103
+
104
+# The grep -v to filter fusermount's (benign) error when /etc/fuse.conf does
105
+# not contain user_allow_other and the subsequent check for missing FUSE support
106
+# have both been taken from iotest 308.
107
+output=$(_send_qemu_cmd $QEMU_HANDLE \
108
+ "{'execute': 'block-export-add',
109
+ 'arguments': {
110
+ 'id': 'exp0',
111
+ 'type': 'fuse',
112
+ 'node-name': 'node0',
113
+ 'mountpoint': '$TEST_DIR/fuse-export',
114
+ 'writable': true
115
+ }}" \
116
+ 'return' \
117
+ | grep -v 'option allow_other only allowed if')
118
+
119
+if echo "$output" | grep -q "Parameter 'type' does not accept value 'fuse'"; then
120
+ _notrun 'No FUSE support'
121
+fi
122
+echo "$output"
123
+
124
+echo
125
+# This should fail, but gracefully, i.e. just print an I/O error, not crash.
126
+$QEMU_IO -f file -c 'write 0 64M' "$TEST_DIR/fuse-export" | _filter_qemu_io
127
+echo
128
+
129
+_send_qemu_cmd $QEMU_HANDLE \
130
+ "{'execute': 'block-export-del',
131
+ 'arguments': {'id': 'exp0'}}" \
132
+ 'return'
133
+
134
+_send_qemu_cmd $QEMU_HANDLE \
135
+ '' \
136
+ 'BLOCK_EXPORT_DELETED'
137
+
138
+_send_qemu_cmd $QEMU_HANDLE \
139
+ "{'execute': 'blockdev-del',
140
+ 'arguments': {'node-name': 'node0'}}" \
141
+ 'return'
142
+
143
+# success, all done
144
+echo "*** done"
145
+rm -f $seq.full
146
+status=0
147
diff --git a/tests/qemu-iotests/tests/file-io-error.out b/tests/qemu-iotests/tests/file-io-error.out
148
new file mode 100644
149
index XXXXXXX..XXXXXXX
150
--- /dev/null
151
+++ b/tests/qemu-iotests/tests/file-io-error.out
152
@@ -XXX,XX +XXX,XX @@
153
+QA output created by file-io-error
154
+{'execute': 'qmp_capabilities'}
155
+{"return": {}}
156
+{'execute': 'blockdev-add',
157
+ 'arguments': {
158
+ 'driver': 'blkdebug',
159
+ 'node-name': 'node0',
160
+ 'inject-error': [{'event': 'none'}],
161
+ 'image': {
162
+ 'driver': 'null-co'
163
+ }
164
+ }}
165
+{"return": {}}
166
+{'execute': 'block-export-add',
167
+ 'arguments': {
168
+ 'id': 'exp0',
169
+ 'type': 'fuse',
170
+ 'node-name': 'node0',
171
+ 'mountpoint': 'TEST_DIR/fuse-export',
172
+ 'writable': true
173
+ }}
174
+{"return": {}}
175
+
176
+write failed: Input/output error
177
+
178
+{'execute': 'block-export-del',
179
+ 'arguments': {'id': 'exp0'}}
180
+{"return": {}}
181
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_EXPORT_DELETED", "data": {"id": "exp0"}}
182
+{'execute': 'blockdev-del',
183
+ 'arguments': {'node-name': 'node0'}}
184
+{"return": {}}
185
+*** done
186
--
187
2.41.0
diff view generated by jsdifflib