1
The following changes since commit 64175afc695c0672876fbbfc31b299c86d562cb4:
1
The following changes since commit bec9c64ef7be8063f1192608b83877bc5c9ea217:
2
2
3
arm_gicv3: Fix ICC_BPR1 reset value when EL3 not implemented (2017-06-07 17:21:44 +0100)
3
Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging (2018-02-13 18:24:08 +0000)
4
4
5
are available in the git repository at:
5
are available in the Git repository at:
6
6
7
git://github.com/codyprime/qemu-kvm-jtc.git tags/block-pull-request
7
git://github.com/stefanha/qemu.git tags/block-pull-request
8
8
9
for you to fetch changes up to 56faeb9bb6872b3f926b3b3e0452a70beea10af2:
9
for you to fetch changes up to d2f668b74907cbd96d9df0774971768ed06de2f0:
10
10
11
block/gluster.c: Handle qdict_array_entries() failure (2017-06-09 08:41:29 -0400)
11
misc: fix spelling (2018-02-15 09:39:49 +0000)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Gluster patch
14
Pull request
15
16
v2:
17
* Dropped Fam's git-publish series because there is still ongoing discussion
18
15
----------------------------------------------------------------
19
----------------------------------------------------------------
16
20
17
Peter Maydell (1):
21
Marc-André Lureau (1):
18
block/gluster.c: Handle qdict_array_entries() failure
22
misc: fix spelling
19
23
20
block/gluster.c | 3 +--
24
Stefan Hajnoczi (1):
21
1 file changed, 1 insertion(+), 2 deletions(-)
25
vl: pause vcpus before stopping iothreads
26
27
Wolfgang Bumiller (1):
28
ratelimit: don't align wait time with slices
29
30
include/qemu/ratelimit.h | 11 +++++------
31
util/qemu-coroutine-lock.c | 2 +-
32
vl.c | 12 ++++++++++--
33
3 files changed, 16 insertions(+), 9 deletions(-)
22
34
23
--
35
--
24
2.9.3
36
2.14.3
25
37
26
38
diff view generated by jsdifflib
New patch
1
Commit dce8921b2baaf95974af8176406881872067adfa ("iothread: Stop threads
2
before main() quits") introduced iothread_stop_all() to avoid the
3
following virtio-scsi assertion failure:
1
4
5
assert(blk_get_aio_context(d->conf.blk) == s->ctx);
6
7
Back then the assertion failed because when bdrv_close_all() made
8
d->conf.blk NULL, blk_get_aio_context() returned the global AioContext
9
instead of s->ctx.
10
11
The same assertion can still fail today when vcpus submit new I/O
12
requests after iothread_stop_all() has moved the BDS to the global
13
AioContext.
14
15
This patch hardens the iothread_stop_all() approach by pausing vcpus
16
before calling iothread_stop_all().
17
18
Note that the assertion failure is a race condition. It is not possible
19
to reproduce it reliably.
20
21
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
22
Message-id: 20180201110708.8080-1-stefanha@redhat.com
23
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
24
---
25
vl.c | 12 ++++++++++--
26
1 file changed, 10 insertions(+), 2 deletions(-)
27
28
diff --git a/vl.c b/vl.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/vl.c
31
+++ b/vl.c
32
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv, char **envp)
33
34
main_loop();
35
replay_disable_events();
36
+
37
+ /* The ordering of the following is delicate. Stop vcpus to prevent new
38
+ * I/O requests being queued by the guest. Then stop IOThreads (this
39
+ * includes a drain operation and completes all request processing). At
40
+ * this point emulated devices are still associated with their IOThreads
41
+ * (if any) but no longer have any work to do. Only then can we close
42
+ * block devices safely because we know there is no more I/O coming.
43
+ */
44
+ pause_all_vcpus();
45
iothread_stop_all();
46
-
47
- pause_all_vcpus();
48
bdrv_close_all();
49
+
50
res_free();
51
52
/* vhost-user must be cleaned up before chardevs. */
53
--
54
2.14.3
55
56
diff view generated by jsdifflib
New patch
1
From: Wolfgang Bumiller <w.bumiller@proxmox.com>
1
2
3
It is possible for rate limited writes to keep overshooting a slice's
4
quota by a tiny amount causing the slice-aligned waiting period to
5
effectively halve the rate.
6
7
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
8
Reviewed-by: Alberto Garcia <berto@igalia.com>
9
Message-id: 20180207071758.6818-1-w.bumiller@proxmox.com
10
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
11
---
12
include/qemu/ratelimit.h | 11 +++++------
13
1 file changed, 5 insertions(+), 6 deletions(-)
14
15
diff --git a/include/qemu/ratelimit.h b/include/qemu/ratelimit.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/include/qemu/ratelimit.h
18
+++ b/include/qemu/ratelimit.h
19
@@ -XXX,XX +XXX,XX @@ typedef struct {
20
static inline int64_t ratelimit_calculate_delay(RateLimit *limit, uint64_t n)
21
{
22
int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
23
- uint64_t delay_slices;
24
+ double delay_slices;
25
26
assert(limit->slice_quota && limit->slice_ns);
27
28
@@ -XXX,XX +XXX,XX @@ static inline int64_t ratelimit_calculate_delay(RateLimit *limit, uint64_t n)
29
return 0;
30
}
31
32
- /* Quota exceeded. Calculate the next time slice we may start
33
- * sending data again. */
34
- delay_slices = (limit->dispatched + limit->slice_quota - 1) /
35
- limit->slice_quota;
36
+ /* Quota exceeded. Wait based on the excess amount and then start a new
37
+ * slice. */
38
+ delay_slices = (double)limit->dispatched / limit->slice_quota;
39
limit->slice_end_time = limit->slice_start_time +
40
- delay_slices * limit->slice_ns;
41
+ (uint64_t)(delay_slices * limit->slice_ns);
42
return limit->slice_end_time - now;
43
}
44
45
--
46
2.14.3
47
48
diff view generated by jsdifflib
1
From: Peter Maydell <peter.maydell@linaro.org>
1
From: Marc-André Lureau <marcandre.lureau@redhat.com>
2
2
3
In qemu_gluster_parse_json(), the call to qdict_array_entries()
3
s/pupulate/populate
4
could return a negative error code, which we were ignoring
5
because we assigned the result to an unsigned variable.
6
Fix this by using the 'int' type instead, which matches the
7
return type of qdict_array_entries() and also the type
8
we use for the loop enumeration variable 'i'.
9
4
10
(Spotted by Coverity, CID 1360960.)
5
Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Message-id: 20180208162447.10851-1-marcandre.lureau@redhat.com
8
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
9
---
10
util/qemu-coroutine-lock.c | 2 +-
11
1 file changed, 1 insertion(+), 1 deletion(-)
11
12
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
13
diff --git a/util/qemu-coroutine-lock.c b/util/qemu-coroutine-lock.c
13
Reviewed-by: Eric Blake <eblake@redhat.com>
14
Reviewed-by: Jeff Cody <jcody@redhat.com>
15
Message-id: 1496682098-1540-1-git-send-email-peter.maydell@linaro.org
16
Signed-off-by: Jeff Cody <jcody@redhat.com>
17
---
18
block/gluster.c | 3 +--
19
1 file changed, 1 insertion(+), 2 deletions(-)
20
21
diff --git a/block/gluster.c b/block/gluster.c
22
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
23
--- a/block/gluster.c
15
--- a/util/qemu-coroutine-lock.c
24
+++ b/block/gluster.c
16
+++ b/util/qemu-coroutine-lock.c
25
@@ -XXX,XX +XXX,XX @@ static int qemu_gluster_parse_json(BlockdevOptionsGluster *gconf,
17
@@ -XXX,XX +XXX,XX @@ void qemu_co_queue_run_restart(Coroutine *co)
26
Error *local_err = NULL;
18
* invalid memory. Therefore, use a temporary queue and do not touch
27
char *str = NULL;
19
* the "co" coroutine as soon as you enter another one.
28
const char *ptr;
20
*
29
- size_t num_servers;
21
- * In its turn resumed "co" can pupulate "co_queue_wakeup" queue with
30
- int i, type;
22
+ * In its turn resumed "co" can populate "co_queue_wakeup" queue with
31
+ int i, type, num_servers;
23
* new coroutines to be woken up. The caller, who has resumed "co",
32
24
* will be responsible for traversing the same queue, which may cause
33
/* create opts info from runtime_json_opts list */
25
* a different wakeup order but not any missing wakeups.
34
opts = qemu_opts_create(&runtime_json_opts, NULL, 0, &error_abort);
35
--
26
--
36
2.9.3
27
2.14.3
37
28
38
29
diff view generated by jsdifflib