1
The following changes since commit 64175afc695c0672876fbbfc31b299c86d562cb4:
1
The following changes since commit b0fbe46ad82982b289a44ee2495b59b0bad8a842:
2
2
3
arm_gicv3: Fix ICC_BPR1 reset value when EL3 not implemented (2017-06-07 17:21:44 +0100)
3
Update version for v2.11.0-rc0 release (2017-11-07 16:05:28 +0000)
4
4
5
are available in the git repository at:
5
are available in the git repository at:
6
6
7
git://github.com/codyprime/qemu-kvm-jtc.git tags/block-pull-request
7
git://github.com/stefanha/qemu.git tags/block-pull-request
8
8
9
for you to fetch changes up to 56faeb9bb6872b3f926b3b3e0452a70beea10af2:
9
for you to fetch changes up to ef6dada8b44e1e7c4bec5c1115903af9af415b50:
10
10
11
block/gluster.c: Handle qdict_array_entries() failure (2017-06-09 08:41:29 -0400)
11
util/async: use atomic_mb_set in qemu_bh_cancel (2017-11-08 19:09:15 +0000)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Gluster patch
14
Pull request
15
16
v2:
17
* v1 emails 2/3 and 3/3 weren't sent due to an email failure
18
* Included Sergio's updated wording in the commit description
19
15
----------------------------------------------------------------
20
----------------------------------------------------------------
16
21
17
Peter Maydell (1):
22
Sergio Lopez (1):
18
block/gluster.c: Handle qdict_array_entries() failure
23
util/async: use atomic_mb_set in qemu_bh_cancel
19
24
20
block/gluster.c | 3 +--
25
Stefan Hajnoczi (1):
21
1 file changed, 1 insertion(+), 2 deletions(-)
26
tests-aio-multithread: fix /aio/multi/schedule race condition
27
28
tests/test-aio-multithread.c | 5 ++---
29
util/async.c | 2 +-
30
2 files changed, 3 insertions(+), 4 deletions(-)
22
31
23
--
32
--
24
2.9.3
33
2.13.6
25
34
26
35
diff view generated by jsdifflib
New patch
1
test_multi_co_schedule_entry() set to_schedule[id] in the final loop
2
iteration before terminating the coroutine. There is a race condition
3
where the main thread attempts to enter the terminating or terminated
4
coroutine when signalling coroutines to stop:
1
5
6
atomic_mb_set(&now_stopping, true);
7
for (i = 0; i < NUM_CONTEXTS; i++) {
8
ctx_run(i, finish_cb, NULL); <--- enters dead coroutine!
9
to_schedule[i] = NULL;
10
}
11
12
Make sure only to set to_schedule[id] if this coroutine really needs to
13
be scheduled!
14
15
Reported-by: "R.Nageswara Sastry" <nasastry@in.ibm.com>
16
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
17
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
18
Message-id: 20171106190233.1175-1-stefanha@redhat.com
19
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
20
---
21
tests/test-aio-multithread.c | 5 ++---
22
1 file changed, 2 insertions(+), 3 deletions(-)
23
24
diff --git a/tests/test-aio-multithread.c b/tests/test-aio-multithread.c
25
index XXXXXXX..XXXXXXX 100644
26
--- a/tests/test-aio-multithread.c
27
+++ b/tests/test-aio-multithread.c
28
@@ -XXX,XX +XXX,XX @@ static void finish_cb(void *opaque)
29
static coroutine_fn void test_multi_co_schedule_entry(void *opaque)
30
{
31
g_assert(to_schedule[id] == NULL);
32
- atomic_mb_set(&to_schedule[id], qemu_coroutine_self());
33
34
while (!atomic_mb_read(&now_stopping)) {
35
int n;
36
37
n = g_test_rand_int_range(0, NUM_CONTEXTS);
38
schedule_next(n);
39
+
40
+ atomic_mb_set(&to_schedule[id], qemu_coroutine_self());
41
qemu_coroutine_yield();
42
-
43
g_assert(to_schedule[id] == NULL);
44
- atomic_mb_set(&to_schedule[id], qemu_coroutine_self());
45
}
46
}
47
48
--
49
2.13.6
50
51
diff view generated by jsdifflib
1
From: Peter Maydell <peter.maydell@linaro.org>
1
From: Sergio Lopez <slp@redhat.com>
2
2
3
In qemu_gluster_parse_json(), the call to qdict_array_entries()
3
Commit b7a745d added a qemu_bh_cancel call to the completion function
4
could return a negative error code, which we were ignoring
4
as an optimization to prevent it from unnecessarily rescheduling itself.
5
because we assigned the result to an unsigned variable.
6
Fix this by using the 'int' type instead, which matches the
7
return type of qdict_array_entries() and also the type
8
we use for the loop enumeration variable 'i'.
9
5
10
(Spotted by Coverity, CID 1360960.)
6
This completion function is scheduled from worker_thread, after setting
7
the state of a ThreadPoolElement to THREAD_DONE.
11
8
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
9
This was considered to be safe, as the completion function restarts the
13
Reviewed-by: Eric Blake <eblake@redhat.com>
10
loop just after the call to qemu_bh_cancel. But, as this loop lacks a HW
14
Reviewed-by: Jeff Cody <jcody@redhat.com>
11
memory barrier, the read of req->state may actually happen _before_ the
15
Message-id: 1496682098-1540-1-git-send-email-peter.maydell@linaro.org
12
call, seeing it still as THREAD_QUEUED, and ending the completion
16
Signed-off-by: Jeff Cody <jcody@redhat.com>
13
function without having processed a pending TPE linked at pool->head:
14
15
worker thread | I/O thread
16
------------------------------------------------------------------------
17
| speculatively read req->state
18
req->state = THREAD_DONE; |
19
qemu_bh_schedule(p->completion_bh) |
20
bh->scheduled = 1; |
21
| qemu_bh_cancel(p->completion_bh)
22
| bh->scheduled = 0;
23
| if (req->state == THREAD_DONE)
24
| // sees THREAD_QUEUED
25
26
The source of the misunderstanding was that qemu_bh_cancel is now being
27
used by the _consumer_ rather than the producer, and therefore now needs
28
to have acquire semantics just like e.g. aio_bh_poll.
29
30
In some situations, if there are no other independent requests in the
31
same aio context that could eventually trigger the scheduling of the
32
completion function, the omitted TPE and all operations pending on it
33
will get stuck forever.
34
35
[Added Sergio's updated wording about the HW memory barrier.
36
--Stefan]
37
38
Signed-off-by: Sergio Lopez <slp@redhat.com>
39
Message-id: 20171108063447.2842-1-slp@redhat.com
40
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
17
---
41
---
18
block/gluster.c | 3 +--
42
util/async.c | 2 +-
19
1 file changed, 1 insertion(+), 2 deletions(-)
43
1 file changed, 1 insertion(+), 1 deletion(-)
20
44
21
diff --git a/block/gluster.c b/block/gluster.c
45
diff --git a/util/async.c b/util/async.c
22
index XXXXXXX..XXXXXXX 100644
46
index XXXXXXX..XXXXXXX 100644
23
--- a/block/gluster.c
47
--- a/util/async.c
24
+++ b/block/gluster.c
48
+++ b/util/async.c
25
@@ -XXX,XX +XXX,XX @@ static int qemu_gluster_parse_json(BlockdevOptionsGluster *gconf,
49
@@ -XXX,XX +XXX,XX @@ void qemu_bh_schedule(QEMUBH *bh)
26
Error *local_err = NULL;
50
*/
27
char *str = NULL;
51
void qemu_bh_cancel(QEMUBH *bh)
28
const char *ptr;
52
{
29
- size_t num_servers;
53
- bh->scheduled = 0;
30
- int i, type;
54
+ atomic_mb_set(&bh->scheduled, 0);
31
+ int i, type, num_servers;
55
}
32
56
33
/* create opts info from runtime_json_opts list */
57
/* This func is async.The bottom half will do the delete action at the finial
34
opts = qemu_opts_create(&runtime_json_opts, NULL, 0, &error_abort);
35
--
58
--
36
2.9.3
59
2.13.6
37
60
38
61
diff view generated by jsdifflib