1
The following changes since commit 8048082f7a11040a366942a2de8abb4c3d0020c9:
1
The following changes since commit e4ae62b802cec437f877f2cadc4ef059cc0eca76:
2
2
3
Merge remote-tracking branch 'remotes/stefanberger/tags/pull-tpm-2017-11-15-1' into staging (2017-11-16 11:34:24 +0000)
3
Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging (2018-03-09 17:28:16 +0000)
4
4
5
are available in the git repository at:
5
are available in the Git repository at:
6
6
7
git://github.com/stefanha/qemu.git tags/block-pull-request
7
git://github.com/stefanha/qemu.git tags/block-pull-request
8
8
9
for you to fetch changes up to 341e0b5658681f46680024cdbfc998717d85cc35:
9
for you to fetch changes up to 7376eda7c2e0451e819e81bd05fabc56a9deb946:
10
10
11
throttle-groups: forget timer and schedule next TGM on detach (2017-11-16 14:12:57 +0000)
11
block: make BDRV_POLL_WHILE() re-entrancy safe (2018-03-12 11:07:37 +0000)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
14
15
----------------------------------------------------------------
15
----------------------------------------------------------------
16
16
17
Stefan Hajnoczi (1):
17
Stefan Hajnoczi (1):
18
throttle-groups: forget timer and schedule next TGM on detach
18
block: make BDRV_POLL_WHILE() re-entrancy safe
19
19
20
block/throttle-groups.c | 12 ++++++++++++
20
include/block/aio-wait.h | 61 ++++++++++++++++++++++++------------------------
21
1 file changed, 12 insertions(+)
21
util/aio-wait.c | 2 +-
22
2 files changed, 31 insertions(+), 32 deletions(-)
22
23
23
--
24
--
24
2.13.6
25
2.14.3
25
26
26
27
diff view generated by jsdifflib
1
tg->any_timer_armed[] must be cleared when detaching pending timers from
1
Nested BDRV_POLL_WHILE() calls can occur. Currently
2
the AioContext. Failure to do so leads to hung I/O because it looks
2
assert(!wait_->wakeup) fails in AIO_WAIT_WHILE() when this happens.
3
like there are still timers pending when in fact they have been removed.
4
3
5
Other ThrottleGroupMembers might have requests pending too so it's
4
This patch converts the bool wait_->need_kick flag to an unsigned
6
necessary to schedule the next TGM so it can set a timer.
5
wait_->num_waiters counter.
7
6
8
This patch fixes hung I/O when QEMU is launched with drives that are in
7
Nesting works correctly because outer AIO_WAIT_WHILE() callers evaluate
9
the same throttling group:
8
the condition again after the inner caller completes (invoking the inner
9
caller counts as aio_poll() progress).
10
10
11
(guest)$ dd if=/dev/zero of=/dev/vdb oflag=direct bs=512 &
11
Reported-by: "fuweiwei (C)" <fuweiwei2@huawei.com>
12
(guest)$ dd if=/dev/zero of=/dev/vdc oflag=direct bs=512 &
12
Reviewed-by: Eric Blake <eblake@redhat.com>
13
(qemu) stop
14
(qemu) cont
15
...I/O is stuck...
16
17
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
13
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
18
Message-id: 20171116112150.27607-1-stefanha@redhat.com
14
Message-id: 20180307124619.6218-1-stefanha@redhat.com
15
Cc: Paolo Bonzini <pbonzini@redhat.com>
19
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
16
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
20
---
17
---
21
block/throttle-groups.c | 12 ++++++++++++
18
include/block/aio-wait.h | 61 ++++++++++++++++++++++++------------------------
22
1 file changed, 12 insertions(+)
19
util/aio-wait.c | 2 +-
20
2 files changed, 31 insertions(+), 32 deletions(-)
23
21
24
diff --git a/block/throttle-groups.c b/block/throttle-groups.c
22
diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h
25
index XXXXXXX..XXXXXXX 100644
23
index XXXXXXX..XXXXXXX 100644
26
--- a/block/throttle-groups.c
24
--- a/include/block/aio-wait.h
27
+++ b/block/throttle-groups.c
25
+++ b/include/block/aio-wait.h
28
@@ -XXX,XX +XXX,XX @@ void throttle_group_attach_aio_context(ThrottleGroupMember *tgm,
26
@@ -XXX,XX +XXX,XX @@
29
27
* }
30
void throttle_group_detach_aio_context(ThrottleGroupMember *tgm)
28
*/
29
typedef struct {
30
- /* Is the main loop waiting for a kick? Accessed with atomic ops. */
31
- bool need_kick;
32
+ /* Number of waiting AIO_WAIT_WHILE() callers. Accessed with atomic ops. */
33
+ unsigned num_waiters;
34
} AioWait;
35
36
/**
37
@@ -XXX,XX +XXX,XX @@ typedef struct {
38
* wait on conditions between two IOThreads since that could lead to deadlock,
39
* go via the main loop instead.
40
*/
41
-#define AIO_WAIT_WHILE(wait, ctx, cond) ({ \
42
- bool waited_ = false; \
43
- bool busy_ = true; \
44
- AioWait *wait_ = (wait); \
45
- AioContext *ctx_ = (ctx); \
46
- if (in_aio_context_home_thread(ctx_)) { \
47
- while ((cond) || busy_) { \
48
- busy_ = aio_poll(ctx_, (cond)); \
49
- waited_ |= !!(cond) | busy_; \
50
- } \
51
- } else { \
52
- assert(qemu_get_current_aio_context() == \
53
- qemu_get_aio_context()); \
54
- assert(!wait_->need_kick); \
55
- /* Set wait_->need_kick before evaluating cond. */ \
56
- atomic_mb_set(&wait_->need_kick, true); \
57
- while (busy_) { \
58
- if ((cond)) { \
59
- waited_ = busy_ = true; \
60
- aio_context_release(ctx_); \
61
- aio_poll(qemu_get_aio_context(), true); \
62
- aio_context_acquire(ctx_); \
63
- } else { \
64
- busy_ = aio_poll(ctx_, false); \
65
- waited_ |= busy_; \
66
- } \
67
- } \
68
- atomic_set(&wait_->need_kick, false); \
69
- } \
70
+#define AIO_WAIT_WHILE(wait, ctx, cond) ({ \
71
+ bool waited_ = false; \
72
+ bool busy_ = true; \
73
+ AioWait *wait_ = (wait); \
74
+ AioContext *ctx_ = (ctx); \
75
+ if (in_aio_context_home_thread(ctx_)) { \
76
+ while ((cond) || busy_) { \
77
+ busy_ = aio_poll(ctx_, (cond)); \
78
+ waited_ |= !!(cond) | busy_; \
79
+ } \
80
+ } else { \
81
+ assert(qemu_get_current_aio_context() == \
82
+ qemu_get_aio_context()); \
83
+ /* Increment wait_->num_waiters before evaluating cond. */ \
84
+ atomic_inc(&wait_->num_waiters); \
85
+ while (busy_) { \
86
+ if ((cond)) { \
87
+ waited_ = busy_ = true; \
88
+ aio_context_release(ctx_); \
89
+ aio_poll(qemu_get_aio_context(), true); \
90
+ aio_context_acquire(ctx_); \
91
+ } else { \
92
+ busy_ = aio_poll(ctx_, false); \
93
+ waited_ |= busy_; \
94
+ } \
95
+ } \
96
+ atomic_dec(&wait_->num_waiters); \
97
+ } \
98
waited_; })
99
100
/**
101
diff --git a/util/aio-wait.c b/util/aio-wait.c
102
index XXXXXXX..XXXXXXX 100644
103
--- a/util/aio-wait.c
104
+++ b/util/aio-wait.c
105
@@ -XXX,XX +XXX,XX @@ static void dummy_bh_cb(void *opaque)
106
void aio_wait_kick(AioWait *wait)
31
{
107
{
32
+ ThrottleGroup *tg = container_of(tgm->throttle_state, ThrottleGroup, ts);
108
/* The barrier (or an atomic op) is in the caller. */
33
ThrottleTimers *tt = &tgm->throttle_timers;
109
- if (atomic_read(&wait->need_kick)) {
34
+ int i;
110
+ if (atomic_read(&wait->num_waiters)) {
35
111
aio_bh_schedule_oneshot(qemu_get_aio_context(), dummy_bh_cb, NULL);
36
/* Requests must have been drained */
112
}
37
assert(tgm->pending_reqs[0] == 0 && tgm->pending_reqs[1] == 0);
38
assert(qemu_co_queue_empty(&tgm->throttled_reqs[0]));
39
assert(qemu_co_queue_empty(&tgm->throttled_reqs[1]));
40
41
+ /* Kick off next ThrottleGroupMember, if necessary */
42
+ qemu_mutex_lock(&tg->lock);
43
+ for (i = 0; i < 2; i++) {
44
+ if (timer_pending(tt->timers[i])) {
45
+ tg->any_timer_armed[i] = false;
46
+ schedule_next_request(tgm, i);
47
+ }
48
+ }
49
+ qemu_mutex_unlock(&tg->lock);
50
+
51
throttle_timers_detach_aio_context(tt);
52
tgm->aio_context = NULL;
53
}
113
}
54
--
114
--
55
2.13.6
115
2.14.3
56
116
57
117
diff view generated by jsdifflib