1
The following changes since commit 64175afc695c0672876fbbfc31b299c86d562cb4:
1
The following changes since commit 560009f2c8b57b7cdd31a5693ea86ab369382f49:
2
2
3
arm_gicv3: Fix ICC_BPR1 reset value when EL3 not implemented (2017-06-07 17:21:44 +0100)
3
Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging (2019-10-07 15:40:53 +0100)
4
4
5
are available in the git repository at:
5
are available in the Git repository at:
6
6
7
git://github.com/codyprime/qemu-kvm-jtc.git tags/block-pull-request
7
https://github.com/stefanha/qemu.git tags/block-pull-request
8
8
9
for you to fetch changes up to 56faeb9bb6872b3f926b3b3e0452a70beea10af2:
9
for you to fetch changes up to 4d804b5305ffb4d5fa414c38d4f1bdfb987c8d0b:
10
10
11
block/gluster.c: Handle qdict_array_entries() failure (2017-06-09 08:41:29 -0400)
11
iotests/262: Switch source/dest VM launch order (2019-10-08 14:28:25 +0100)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Gluster patch
14
Pull request
15
16
This pull request also contains the two commits from the previous pull request
17
that was dropped due to a mingw compilation error. The compilation should now
18
be fixed.
19
15
----------------------------------------------------------------
20
----------------------------------------------------------------
16
21
17
Peter Maydell (1):
22
Max Reitz (2):
18
block/gluster.c: Handle qdict_array_entries() failure
23
block: Skip COR for inactive nodes
24
iotests/262: Switch source/dest VM launch order
19
25
20
block/gluster.c | 3 +--
26
Sergio Lopez (1):
21
1 file changed, 1 insertion(+), 2 deletions(-)
27
virtio-blk: schedule virtio_notify_config to run on main context
28
29
Vladimir Sementsov-Ogievskiy (1):
30
util/ioc.c: try to reassure Coverity about qemu_iovec_init_extended
31
32
block/io.c | 41 +++++++++++++++++++++++++-------------
33
hw/block/virtio-blk.c | 16 ++++++++++++++-
34
util/iov.c | 5 +++--
35
tests/qemu-iotests/262 | 12 +++++------
36
tests/qemu-iotests/262.out | 6 +++---
37
5 files changed, 54 insertions(+), 26 deletions(-)
22
38
23
--
39
--
24
2.9.3
40
2.21.0
25
41
26
42
diff view generated by jsdifflib
New patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
2
3
Make it more obvious, that filling qiov corresponds to qiov allocation,
4
which in turn corresponds to total_niov calculation, based on mid_niov
5
(not mid_len). Still add an assertion to show that there should be no
6
difference.
7
8
[Added mingw "error: 'mid_iov' may be used uninitialized in this
9
function" compiler error fix suggested by Vladimir.
10
--Stefan]
11
12
Reported-by: Coverity (CID 1405302)
13
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
14
Message-id: 20190910090310.14032-1-vsementsov@virtuozzo.com
15
Suggested-by: Peter Maydell <peter.maydell@linaro.org>
16
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
17
Message-Id: <20190910090310.14032-1-vsementsov@virtuozzo.com>
18
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
19
20
fixup! util/ioc.c: try to reassure Coverity about qemu_iovec_init_extended
21
---
22
util/iov.c | 5 +++--
23
1 file changed, 3 insertions(+), 2 deletions(-)
24
25
diff --git a/util/iov.c b/util/iov.c
26
index XXXXXXX..XXXXXXX 100644
27
--- a/util/iov.c
28
+++ b/util/iov.c
29
@@ -XXX,XX +XXX,XX @@ void qemu_iovec_init_extended(
30
{
31
size_t mid_head, mid_tail;
32
int total_niov, mid_niov = 0;
33
- struct iovec *p, *mid_iov;
34
+ struct iovec *p, *mid_iov = NULL;
35
36
if (mid_len) {
37
mid_iov = qiov_slice(mid_qiov, mid_offset, mid_len,
38
@@ -XXX,XX +XXX,XX @@ void qemu_iovec_init_extended(
39
p++;
40
}
41
42
- if (mid_len) {
43
+ assert(!mid_niov == !mid_len);
44
+ if (mid_niov) {
45
memcpy(p, mid_iov, mid_niov * sizeof(*p));
46
p[0].iov_base = (uint8_t *)p[0].iov_base + mid_head;
47
p[0].iov_len -= mid_head;
48
--
49
2.21.0
50
51
diff view generated by jsdifflib
New patch
1
From: Sergio Lopez <slp@redhat.com>
1
2
3
virtio_notify_config() needs to acquire the global mutex, which isn't
4
allowed from an iothread, and may lead to a deadlock like this:
5
6
- main thead
7
* Has acquired: qemu_global_mutex.
8
* Is trying the acquire: iothread AioContext lock via
9
AIO_WAIT_WHILE (after aio_poll).
10
11
- iothread
12
* Has acquired: AioContext lock.
13
* Is trying to acquire: qemu_global_mutex (via
14
virtio_notify_config->prepare_mmio_access).
15
16
If virtio_blk_resize() is called from an iothread, schedule
17
virtio_notify_config() to be run in the main context BH.
18
19
[Removed unnecessary newline as suggested by Kevin Wolf
20
<kwolf@redhat.com>.
21
--Stefan]
22
23
Signed-off-by: Sergio Lopez <slp@redhat.com>
24
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
25
Message-id: 20190916112411.21636-1-slp@redhat.com
26
Message-Id: <20190916112411.21636-1-slp@redhat.com>
27
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
28
---
29
hw/block/virtio-blk.c | 16 +++++++++++++++-
30
1 file changed, 15 insertions(+), 1 deletion(-)
31
32
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/hw/block/virtio-blk.c
35
+++ b/hw/block/virtio-blk.c
36
@@ -XXX,XX +XXX,XX @@
37
#include "qemu/iov.h"
38
#include "qemu/module.h"
39
#include "qemu/error-report.h"
40
+#include "qemu/main-loop.h"
41
#include "trace.h"
42
#include "hw/block/block.h"
43
#include "hw/qdev-properties.h"
44
@@ -XXX,XX +XXX,XX @@ static int virtio_blk_load_device(VirtIODevice *vdev, QEMUFile *f,
45
return 0;
46
}
47
48
+static void virtio_resize_cb(void *opaque)
49
+{
50
+ VirtIODevice *vdev = opaque;
51
+
52
+ assert(qemu_get_current_aio_context() == qemu_get_aio_context());
53
+ virtio_notify_config(vdev);
54
+}
55
+
56
static void virtio_blk_resize(void *opaque)
57
{
58
VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
59
60
- virtio_notify_config(vdev);
61
+ /*
62
+ * virtio_notify_config() needs to acquire the global mutex,
63
+ * so it can't be called from an iothread. Instead, schedule
64
+ * it to be run in the main context BH.
65
+ */
66
+ aio_bh_schedule_oneshot(qemu_get_aio_context(), virtio_resize_cb, vdev);
67
}
68
69
static const BlockDevOps virtio_block_ops = {
70
--
71
2.21.0
72
73
diff view generated by jsdifflib
New patch
1
From: Max Reitz <mreitz@redhat.com>
1
2
3
We must not write data to inactive nodes, and a COR is certainly
4
something we can simply not do without upsetting anyone. So skip COR
5
operations on inactive nodes.
6
7
Signed-off-by: Max Reitz <mreitz@redhat.com>
8
Reviewed-by: Eric Blake <eblake@redhat.com>
9
Message-id: 20191001174827.11081-2-mreitz@redhat.com
10
Message-Id: <20191001174827.11081-2-mreitz@redhat.com>
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
13
block/io.c | 41 +++++++++++++++++++++++++++--------------
14
1 file changed, 27 insertions(+), 14 deletions(-)
15
16
diff --git a/block/io.c b/block/io.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/block/io.c
19
+++ b/block/io.c
20
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child,
21
int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
22
BDRV_REQUEST_MAX_BYTES);
23
unsigned int progress = 0;
24
+ bool skip_write;
25
26
if (!drv) {
27
return -ENOMEDIUM;
28
}
29
30
+ /*
31
+ * Do not write anything when the BDS is inactive. That is not
32
+ * allowed, and it would not help.
33
+ */
34
+ skip_write = (bs->open_flags & BDRV_O_INACTIVE);
35
+
36
/* FIXME We cannot require callers to have write permissions when all they
37
* are doing is a read request. If we did things right, write permissions
38
* would be obtained anyway, but internally by the copy-on-read code. As
39
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child,
40
while (cluster_bytes) {
41
int64_t pnum;
42
43
- ret = bdrv_is_allocated(bs, cluster_offset,
44
- MIN(cluster_bytes, max_transfer), &pnum);
45
- if (ret < 0) {
46
- /* Safe to treat errors in querying allocation as if
47
- * unallocated; we'll probably fail again soon on the
48
- * read, but at least that will set a decent errno.
49
- */
50
+ if (skip_write) {
51
+ ret = 1; /* "already allocated", so nothing will be copied */
52
pnum = MIN(cluster_bytes, max_transfer);
53
- }
54
+ } else {
55
+ ret = bdrv_is_allocated(bs, cluster_offset,
56
+ MIN(cluster_bytes, max_transfer), &pnum);
57
+ if (ret < 0) {
58
+ /*
59
+ * Safe to treat errors in querying allocation as if
60
+ * unallocated; we'll probably fail again soon on the
61
+ * read, but at least that will set a decent errno.
62
+ */
63
+ pnum = MIN(cluster_bytes, max_transfer);
64
+ }
65
66
- /* Stop at EOF if the image ends in the middle of the cluster */
67
- if (ret == 0 && pnum == 0) {
68
- assert(progress >= bytes);
69
- break;
70
- }
71
+ /* Stop at EOF if the image ends in the middle of the cluster */
72
+ if (ret == 0 && pnum == 0) {
73
+ assert(progress >= bytes);
74
+ break;
75
+ }
76
77
- assert(skip_bytes < pnum);
78
+ assert(skip_bytes < pnum);
79
+ }
80
81
if (ret <= 0) {
82
QEMUIOVector local_qiov;
83
--
84
2.21.0
85
86
diff view generated by jsdifflib
1
From: Peter Maydell <peter.maydell@linaro.org>
1
From: Max Reitz <mreitz@redhat.com>
2
2
3
In qemu_gluster_parse_json(), the call to qdict_array_entries()
3
Launching the destination VM before the source VM gives us a regression
4
could return a negative error code, which we were ignoring
4
test for HEAD^:
5
because we assigned the result to an unsigned variable.
6
Fix this by using the 'int' type instead, which matches the
7
return type of qdict_array_entries() and also the type
8
we use for the loop enumeration variable 'i'.
9
5
10
(Spotted by Coverity, CID 1360960.)
6
The guest device causes a read from the disk image through
7
guess_disk_lchs(). This will not work if the first sector (containing
8
the partition table) is yet unallocated, we use COR, and the node is
9
inactive.
11
10
12
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
11
By launching the source VM before the destination, however, the COR
12
filter on the source will allocate that area in the image shared between
13
both VMs, thus the problem will not become apparent.
14
15
Switching the launch order causes the sector to still be unallocated
16
when guess_disk_lchs() runs on the inactive node in the destination VM,
17
and thus we get our test case.
18
19
Signed-off-by: Max Reitz <mreitz@redhat.com>
13
Reviewed-by: Eric Blake <eblake@redhat.com>
20
Reviewed-by: Eric Blake <eblake@redhat.com>
14
Reviewed-by: Jeff Cody <jcody@redhat.com>
21
Message-id: 20191001174827.11081-3-mreitz@redhat.com
15
Message-id: 1496682098-1540-1-git-send-email-peter.maydell@linaro.org
22
Message-Id: <20191001174827.11081-3-mreitz@redhat.com>
16
Signed-off-by: Jeff Cody <jcody@redhat.com>
23
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
17
---
24
---
18
block/gluster.c | 3 +--
25
tests/qemu-iotests/262 | 12 ++++++------
19
1 file changed, 1 insertion(+), 2 deletions(-)
26
tests/qemu-iotests/262.out | 6 +++---
27
2 files changed, 9 insertions(+), 9 deletions(-)
20
28
21
diff --git a/block/gluster.c b/block/gluster.c
29
diff --git a/tests/qemu-iotests/262 b/tests/qemu-iotests/262
30
index XXXXXXX..XXXXXXX 100755
31
--- a/tests/qemu-iotests/262
32
+++ b/tests/qemu-iotests/262
33
@@ -XXX,XX +XXX,XX @@ with iotests.FilePath('img') as img_path, \
34
35
os.mkfifo(fifo)
36
37
- iotests.log('Launching source VM...')
38
- add_opts(vm_a)
39
- vm_a.launch()
40
-
41
- vm_a.enable_migration_events('A')
42
-
43
iotests.log('Launching destination VM...')
44
add_opts(vm_b)
45
vm_b.add_incoming("exec: cat '%s'" % (fifo))
46
@@ -XXX,XX +XXX,XX @@ with iotests.FilePath('img') as img_path, \
47
48
vm_b.enable_migration_events('B')
49
50
+ iotests.log('Launching source VM...')
51
+ add_opts(vm_a)
52
+ vm_a.launch()
53
+
54
+ vm_a.enable_migration_events('A')
55
+
56
iotests.log('Starting migration to B...')
57
iotests.log(vm_a.qmp('migrate', uri='exec:cat >%s' % (fifo)))
58
with iotests.Timeout(3, 'Migration does not complete'):
59
diff --git a/tests/qemu-iotests/262.out b/tests/qemu-iotests/262.out
22
index XXXXXXX..XXXXXXX 100644
60
index XXXXXXX..XXXXXXX 100644
23
--- a/block/gluster.c
61
--- a/tests/qemu-iotests/262.out
24
+++ b/block/gluster.c
62
+++ b/tests/qemu-iotests/262.out
25
@@ -XXX,XX +XXX,XX @@ static int qemu_gluster_parse_json(BlockdevOptionsGluster *gconf,
63
@@ -XXX,XX +XXX,XX @@
26
Error *local_err = NULL;
64
-Launching source VM...
27
char *str = NULL;
65
-Enabling migration QMP events on A...
28
const char *ptr;
66
-{"return": {}}
29
- size_t num_servers;
67
Launching destination VM...
30
- int i, type;
68
Enabling migration QMP events on B...
31
+ int i, type, num_servers;
69
{"return": {}}
32
70
+Launching source VM...
33
/* create opts info from runtime_json_opts list */
71
+Enabling migration QMP events on A...
34
opts = qemu_opts_create(&runtime_json_opts, NULL, 0, &error_abort);
72
+{"return": {}}
73
Starting migration to B...
74
{"return": {}}
75
{"data": {"status": "setup"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}}
35
--
76
--
36
2.9.3
77
2.21.0
37
78
38
79
diff view generated by jsdifflib