1 | The following changes since commit 96662996eda78c48aadddd4e76d8615c7eb72d80: | 1 | The following changes since commit d922088eb4ba6bc31a99f17b32cf75e59dd306cd: |
---|---|---|---|
2 | 2 | ||
3 | Merge remote-tracking branch 'remotes/dgilbert/tags/pull-migration-20210513a' into staging (2021-05-14 12:03:47 +0100) | 3 | Merge tag 'ui-pull-request' of https://gitlab.com/marcandre.lureau/qemu into staging (2025-02-03 13:42:02 -0500) |
4 | 4 | ||
5 | are available in the Git repository at: | 5 | are available in the Git repository at: |
6 | 6 | ||
7 | git://repo.or.cz/qemu/kevin.git tags/for-upstream | 7 | https://repo.or.cz/qemu/kevin.git tags/for-upstream |
8 | 8 | ||
9 | for you to fetch changes up to b773c9fb68ceff9a9692409d7afbc5d6865983c6: | 9 | for you to fetch changes up to fc4e394b2887e15d5f83994e4fc7b26c895c627a: |
10 | 10 | ||
11 | vhost-user-blk: Check that num-queues is supported by backend (2021-05-14 18:04:27 +0200) | 11 | block: remove unused BLOCK_OP_TYPE_DATAPLANE (2025-02-06 14:51:10 +0100) |
12 | 12 | ||
13 | ---------------------------------------------------------------- | 13 | ---------------------------------------------------------------- |
14 | Block layer patches | 14 | Block layer patches |
15 | 15 | ||
16 | - vhost-user-blk: Fix error handling during initialisation | 16 | - Managing inactive nodes (enables QSD migration with shared storage) |
17 | - Add test cases for the vhost-user-blk export | 17 | - Fix swapped values for BLOCK_IO_ERROR 'device' and 'qom-path' |
18 | - Fix leaked Transaction objects | 18 | - vpc: Read images exported from Azure correctly |
19 | - qcow2: Expose dirty bit in 'qemu-img info' | 19 | - scripts/qemu-gdb: Support coroutine dumps in coredumps |
20 | - Minor cleanups | ||
20 | 21 | ||
21 | ---------------------------------------------------------------- | 22 | ---------------------------------------------------------------- |
22 | Coiby Xu (1): | 23 | Fabiano Rosas (1): |
23 | test: new qTest case to test the vhost-user-blk-server | 24 | block: Fix leak in send_qmp_error_event |
24 | 25 | ||
25 | Kevin Wolf (8): | 26 | Kevin Wolf (16): |
26 | block: Fix Transaction leak in bdrv_root_attach_child() | 27 | block: Add 'active' field to BlockDeviceInfo |
27 | block: Fix Transaction leak in bdrv_reopen_multiple() | 28 | block: Allow inactivating already inactive nodes |
28 | vhost-user-blk: Make sure to set Error on realize failure | 29 | block: Inactivate external snapshot overlays when necessary |
29 | vhost-user-blk: Don't reconnect during initialisation | 30 | migration/block-active: Remove global active flag |
30 | vhost-user-blk: Improve error reporting in realize | 31 | block: Don't attach inactive child to active node |
31 | vhost-user-blk: Get more feature flags from vhost device | 32 | block: Fix crash on block_resize on inactive node |
32 | virtio: Fail if iommu_platform is requested, but unsupported | 33 | block: Add option to create inactive nodes |
33 | vhost-user-blk: Check that num-queues is supported by backend | 34 | block: Add blockdev-set-active QMP command |
35 | block: Support inactive nodes in blk_insert_bs() | ||
36 | block/export: Don't ignore image activation error in blk_exp_add() | ||
37 | block: Drain nodes before inactivating them | ||
38 | block/export: Add option to allow export of inactive nodes | ||
39 | nbd/server: Support inactive nodes | ||
40 | iotests: Add filter_qtest() | ||
41 | iotests: Add qsd-migrate case | ||
42 | iotests: Add (NBD-based) tests for inactive nodes | ||
34 | 43 | ||
35 | Michael Tokarev (1): | 44 | Peter Krempa (1): |
36 | qapi: spelling fix (addtional) | 45 | block-backend: Fix argument order when calling 'qapi_event_send_block_io_error()' |
37 | 46 | ||
38 | Stefan Hajnoczi (3): | 47 | Peter Xu (3): |
39 | block/export: improve vu_blk_sect_range_ok() | 48 | scripts/qemu-gdb: Always do full stack dump for python errors |
40 | tests/qtest: add multi-queue test case to vhost-user-blk-test | 49 | scripts/qemu-gdb: Simplify fs_base fetching for coroutines |
41 | vhost-user-blk-test: test discard/write zeroes invalid inputs | 50 | scripts/qemu-gdb: Support coroutine dumps in coredumps |
42 | 51 | ||
43 | Vladimir Sementsov-Ogievskiy (1): | 52 | Philippe Mathieu-Daudé (1): |
44 | qcow2: set bdi->is_dirty | 53 | block: Improve blk_get_attached_dev_id() docstring |
45 | 54 | ||
46 | qapi/qom.json | 4 +- | 55 | Stefan Hajnoczi (1): |
47 | include/hw/virtio/vhost.h | 2 + | 56 | block: remove unused BLOCK_OP_TYPE_DATAPLANE |
48 | tests/qtest/libqos/vhost-user-blk.h | 48 ++ | 57 | |
49 | block.c | 9 +- | 58 | Vitaly Kuznetsov (2): |
50 | block/export/vhost-user-blk-server.c | 9 +- | 59 | vpc: Split off vpc_ignore_current_size() helper |
51 | block/qcow2.c | 1 + | 60 | vpc: Read images exported from Azure correctly |
52 | hw/block/vhost-user-blk.c | 85 ++- | 61 | |
53 | hw/virtio/vhost-user.c | 5 + | 62 | qapi/block-core.json | 44 +++- |
54 | hw/virtio/virtio-bus.c | 5 + | 63 | qapi/block-export.json | 10 +- |
55 | tests/qtest/libqos/vhost-user-blk.c | 130 +++++ | 64 | include/block/block-common.h | 2 +- |
56 | tests/qtest/vhost-user-blk-test.c | 989 +++++++++++++++++++++++++++++++++++ | 65 | include/block/block-global-state.h | 6 + |
57 | MAINTAINERS | 2 + | 66 | include/block/export.h | 3 + |
58 | tests/qtest/libqos/meson.build | 1 + | 67 | include/system/block-backend-io.h | 7 + |
59 | tests/qtest/meson.build | 4 + | 68 | migration/migration.h | 3 - |
60 | 14 files changed, 1232 insertions(+), 62 deletions(-) | 69 | block.c | 64 +++++- |
61 | create mode 100644 tests/qtest/libqos/vhost-user-blk.h | 70 | block/block-backend.c | 32 ++- |
62 | create mode 100644 tests/qtest/libqos/vhost-user-blk.c | 71 | block/export/export.c | 29 ++- |
63 | create mode 100644 tests/qtest/vhost-user-blk-test.c | 72 | block/monitor/block-hmp-cmds.c | 5 +- |
73 | block/qapi.c | 1 + | ||
74 | block/replication.c | 1 - | ||
75 | block/vpc.c | 65 +++--- | ||
76 | blockdev.c | 48 ++++ | ||
77 | blockjob.c | 2 - | ||
78 | hw/block/virtio-blk.c | 9 - | ||
79 | hw/scsi/virtio-scsi.c | 3 - | ||
80 | migration/block-active.c | 46 ---- | ||
81 | migration/migration.c | 8 - | ||
82 | nbd/server.c | 17 ++ | ||
83 | scripts/qemu-gdb.py | 2 + | ||
84 | scripts/qemugdb/coroutine.py | 102 ++++++--- | ||
85 | tests/qemu-iotests/iotests.py | 8 + | ||
86 | tests/qemu-iotests/041 | 4 +- | ||
87 | tests/qemu-iotests/165 | 4 +- | ||
88 | tests/qemu-iotests/184.out | 2 + | ||
89 | tests/qemu-iotests/191.out | 16 ++ | ||
90 | tests/qemu-iotests/273.out | 5 + | ||
91 | tests/qemu-iotests/tests/copy-before-write | 3 +- | ||
92 | tests/qemu-iotests/tests/inactive-node-nbd | 303 +++++++++++++++++++++++++ | ||
93 | tests/qemu-iotests/tests/inactive-node-nbd.out | 239 +++++++++++++++++++ | ||
94 | tests/qemu-iotests/tests/migrate-bitmaps-test | 7 +- | ||
95 | tests/qemu-iotests/tests/qsd-migrate | 140 ++++++++++++ | ||
96 | tests/qemu-iotests/tests/qsd-migrate.out | 59 +++++ | ||
97 | 35 files changed, 1133 insertions(+), 166 deletions(-) | ||
98 | create mode 100755 tests/qemu-iotests/tests/inactive-node-nbd | ||
99 | create mode 100644 tests/qemu-iotests/tests/inactive-node-nbd.out | ||
100 | create mode 100755 tests/qemu-iotests/tests/qsd-migrate | ||
101 | create mode 100644 tests/qemu-iotests/tests/qsd-migrate.out | ||
64 | 102 | ||
65 | 103 | diff view generated by jsdifflib |
1 | From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 1 | From: Vitaly Kuznetsov <vkuznets@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Set bdi->is_dirty, so that qemu-img info could show dirty flag. | 3 | In preparation to making changes to the logic deciding whether CHS or |
4 | 'current_size' need to be used in determining the image size, split off | ||
5 | vpc_ignore_current_size() helper. | ||
4 | 6 | ||
5 | After this commit the following check will show '"dirty-flag": true': | 7 | No functional change intended. |
6 | 8 | ||
7 | ./build/qemu-img create -f qcow2 -o lazy_refcounts=on x 1M | 9 | Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> |
8 | ./build/qemu-io x | 10 | Message-ID: <20241212134504.1983757-2-vkuznets@redhat.com> |
9 | qemu-io> write 0 1M | 11 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
10 | 12 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | |
11 | After "write" command success, kill the qemu-io process: | ||
12 | |||
13 | kill -9 <qemu-io pid> | ||
14 | |||
15 | ./build/qemu-img info --output=json x | ||
16 | |||
17 | This will show '"dirty-flag": true' among other things. (before this | ||
18 | commit it shows '"dirty-flag": false') | ||
19 | |||
20 | Note, that qcow2's dirty-bit is not a "dirty bit for the image". It | ||
21 | only protects qcow2 lazy refcounts feature. So, there are a lot of | ||
22 | conditions when qcow2 session may be not closed correctly, but bit is | ||
23 | 0. Still, when bit is set, the last session is definitely not finished | ||
24 | correctly and it's better to report it. | ||
25 | |||
26 | Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | ||
27 | Message-Id: <20210504160656.462836-1-vsementsov@virtuozzo.com> | ||
28 | Tested-by: Kirill Tkhai <ktkhai@virtuozzo.com> | ||
29 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
30 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 13 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
31 | --- | 14 | --- |
32 | block/qcow2.c | 1 + | 15 | block/vpc.c | 67 +++++++++++++++++++++++++++++------------------------ |
33 | 1 file changed, 1 insertion(+) | 16 | 1 file changed, 37 insertions(+), 30 deletions(-) |
34 | 17 | ||
35 | diff --git a/block/qcow2.c b/block/qcow2.c | 18 | diff --git a/block/vpc.c b/block/vpc.c |
36 | index XXXXXXX..XXXXXXX 100644 | 19 | index XXXXXXX..XXXXXXX 100644 |
37 | --- a/block/qcow2.c | 20 | --- a/block/vpc.c |
38 | +++ b/block/qcow2.c | 21 | +++ b/block/vpc.c |
39 | @@ -XXX,XX +XXX,XX @@ static int qcow2_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) | 22 | @@ -XXX,XX +XXX,XX @@ static void vpc_parse_options(BlockDriverState *bs, QemuOpts *opts, |
40 | BDRVQcow2State *s = bs->opaque; | 23 | } |
41 | bdi->cluster_size = s->cluster_size; | ||
42 | bdi->vm_state_offset = qcow2_vm_state_offset(s); | ||
43 | + bdi->is_dirty = s->incompatible_features & QCOW2_INCOMPAT_DIRTY; | ||
44 | return 0; | ||
45 | } | 24 | } |
46 | 25 | ||
26 | +/* | ||
27 | + * Microsoft Virtual PC and Microsoft Hyper-V produce and read | ||
28 | + * VHD image sizes differently. VPC will rely on CHS geometry, | ||
29 | + * while Hyper-V and disk2vhd use the size specified in the footer. | ||
30 | + * | ||
31 | + * We use a couple of approaches to try and determine the correct method: | ||
32 | + * look at the Creator App field, and look for images that have CHS | ||
33 | + * geometry that is the maximum value. | ||
34 | + * | ||
35 | + * If the CHS geometry is the maximum CHS geometry, then we assume that | ||
36 | + * the size is the footer->current_size to avoid truncation. Otherwise, | ||
37 | + * we follow the table based on footer->creator_app: | ||
38 | + * | ||
39 | + * Known creator apps: | ||
40 | + * 'vpc ' : CHS Virtual PC (uses disk geometry) | ||
41 | + * 'qemu' : CHS QEMU (uses disk geometry) | ||
42 | + * 'qem2' : current_size QEMU (uses current_size) | ||
43 | + * 'win ' : current_size Hyper-V | ||
44 | + * 'd2v ' : current_size Disk2vhd | ||
45 | + * 'tap\0' : current_size XenServer | ||
46 | + * 'CTXS' : current_size XenConverter | ||
47 | + * | ||
48 | + * The user can override the table values via drive options, however | ||
49 | + * even with an override we will still use current_size for images | ||
50 | + * that have CHS geometry of the maximum size. | ||
51 | + */ | ||
52 | +static bool vpc_ignore_current_size(VHDFooter *footer) | ||
53 | +{ | ||
54 | + return !!strncmp(footer->creator_app, "win ", 4) && | ||
55 | + !!strncmp(footer->creator_app, "qem2", 4) && | ||
56 | + !!strncmp(footer->creator_app, "d2v ", 4) && | ||
57 | + !!strncmp(footer->creator_app, "CTXS", 4) && | ||
58 | + !!memcmp(footer->creator_app, "tap", 4)); | ||
59 | +} | ||
60 | + | ||
61 | static int vpc_open(BlockDriverState *bs, QDict *options, int flags, | ||
62 | Error **errp) | ||
63 | { | ||
64 | @@ -XXX,XX +XXX,XX @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags, | ||
65 | bs->total_sectors = (int64_t) | ||
66 | be16_to_cpu(footer->cyls) * footer->heads * footer->secs_per_cyl; | ||
67 | |||
68 | - /* Microsoft Virtual PC and Microsoft Hyper-V produce and read | ||
69 | - * VHD image sizes differently. VPC will rely on CHS geometry, | ||
70 | - * while Hyper-V and disk2vhd use the size specified in the footer. | ||
71 | - * | ||
72 | - * We use a couple of approaches to try and determine the correct method: | ||
73 | - * look at the Creator App field, and look for images that have CHS | ||
74 | - * geometry that is the maximum value. | ||
75 | - * | ||
76 | - * If the CHS geometry is the maximum CHS geometry, then we assume that | ||
77 | - * the size is the footer->current_size to avoid truncation. Otherwise, | ||
78 | - * we follow the table based on footer->creator_app: | ||
79 | - * | ||
80 | - * Known creator apps: | ||
81 | - * 'vpc ' : CHS Virtual PC (uses disk geometry) | ||
82 | - * 'qemu' : CHS QEMU (uses disk geometry) | ||
83 | - * 'qem2' : current_size QEMU (uses current_size) | ||
84 | - * 'win ' : current_size Hyper-V | ||
85 | - * 'd2v ' : current_size Disk2vhd | ||
86 | - * 'tap\0' : current_size XenServer | ||
87 | - * 'CTXS' : current_size XenConverter | ||
88 | - * | ||
89 | - * The user can override the table values via drive options, however | ||
90 | - * even with an override we will still use current_size for images | ||
91 | - * that have CHS geometry of the maximum size. | ||
92 | - */ | ||
93 | - use_chs = (!!strncmp(footer->creator_app, "win ", 4) && | ||
94 | - !!strncmp(footer->creator_app, "qem2", 4) && | ||
95 | - !!strncmp(footer->creator_app, "d2v ", 4) && | ||
96 | - !!strncmp(footer->creator_app, "CTXS", 4) && | ||
97 | - !!memcmp(footer->creator_app, "tap", 4)) || s->force_use_chs; | ||
98 | + /* Use CHS or current_size to determine the image size. */ | ||
99 | + use_chs = vpc_ignore_current_size(footer) || s->force_use_chs; | ||
100 | |||
101 | if (!use_chs || bs->total_sectors == VHD_MAX_GEOMETRY || s->force_use_sz) { | ||
102 | bs->total_sectors = be64_to_cpu(footer->current_size) / | ||
47 | -- | 103 | -- |
48 | 2.30.2 | 104 | 2.48.1 |
49 | 105 | ||
50 | 106 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Vitaly Kuznetsov <vkuznets@redhat.com> | ||
1 | 2 | ||
3 | It was found that 'qemu-nbd' is not able to work with some disk images | ||
4 | exported from Azure. Looking at the 512b footer (which contains VPC | ||
5 | metadata): | ||
6 | |||
7 | 00000000 63 6f 6e 65 63 74 69 78 00 00 00 02 00 01 00 00 |conectix........| | ||
8 | 00000010 ff ff ff ff ff ff ff ff 2e c7 9b 96 77 61 00 00 |............wa..| | ||
9 | 00000020 00 07 00 00 57 69 32 6b 00 00 00 01 40 00 00 00 |....Wi2k....@...| | ||
10 | 00000030 00 00 00 01 40 00 00 00 28 a2 10 3f 00 00 00 02 |....@...(..?....| | ||
11 | 00000040 ff ff e7 47 8c 54 df 94 bd 35 71 4c 94 5f e5 44 |...G.T...5qL._.D| | ||
12 | 00000050 44 53 92 1a 00 00 00 00 00 00 00 00 00 00 00 00 |DS..............| | ||
13 | 00000060 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| | ||
14 | |||
15 | we can see that Azure uses a different 'Creator application' -- | ||
16 | 'wa\0\0' (offset 0x1c, likely reads as 'Windows Azure') and QEMU uses this | ||
17 | field to determine how it can get image size. Apparently, Azure uses 'new' | ||
18 | method, just like Hyper-V. | ||
19 | |||
20 | Overall, it seems that only VPC and old QEMUs need to be ignored as all new | ||
21 | creator apps seem to have reliable current_size. Invert the logic and make | ||
22 | 'current_size' method the default to avoid adding every new creator app to | ||
23 | the list. | ||
24 | |||
25 | Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> | ||
26 | Message-ID: <20241212134504.1983757-3-vkuznets@redhat.com> | ||
27 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
28 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
29 | --- | ||
30 | block/vpc.c | 8 +++----- | ||
31 | 1 file changed, 3 insertions(+), 5 deletions(-) | ||
32 | |||
33 | diff --git a/block/vpc.c b/block/vpc.c | ||
34 | index XXXXXXX..XXXXXXX 100644 | ||
35 | --- a/block/vpc.c | ||
36 | +++ b/block/vpc.c | ||
37 | @@ -XXX,XX +XXX,XX @@ static void vpc_parse_options(BlockDriverState *bs, QemuOpts *opts, | ||
38 | * 'd2v ' : current_size Disk2vhd | ||
39 | * 'tap\0' : current_size XenServer | ||
40 | * 'CTXS' : current_size XenConverter | ||
41 | + * 'wa\0\0': current_size Azure | ||
42 | * | ||
43 | * The user can override the table values via drive options, however | ||
44 | * even with an override we will still use current_size for images | ||
45 | @@ -XXX,XX +XXX,XX @@ static void vpc_parse_options(BlockDriverState *bs, QemuOpts *opts, | ||
46 | */ | ||
47 | static bool vpc_ignore_current_size(VHDFooter *footer) | ||
48 | { | ||
49 | - return !!strncmp(footer->creator_app, "win ", 4) && | ||
50 | - !!strncmp(footer->creator_app, "qem2", 4) && | ||
51 | - !!strncmp(footer->creator_app, "d2v ", 4) && | ||
52 | - !!strncmp(footer->creator_app, "CTXS", 4) && | ||
53 | - !!memcmp(footer->creator_app, "tap", 4)); | ||
54 | + return !strncmp(footer->creator_app, "vpc ", 4) || | ||
55 | + !strncmp(footer->creator_app, "qemu", 4); | ||
56 | } | ||
57 | |||
58 | static int vpc_open(BlockDriverState *bs, QDict *options, int flags, | ||
59 | -- | ||
60 | 2.48.1 | diff view generated by jsdifflib |
1 | From: Stefan Hajnoczi <stefanha@redhat.com> | 1 | From: Philippe Mathieu-Daudé <philmd@linaro.org> |
---|---|---|---|
2 | 2 | ||
3 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 3 | Expose the method docstring in the header, and mention |
4 | Message-Id: <20210309094106.196911-4-stefanha@redhat.com> | 4 | returned value must be free'd by caller. |
5 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 5 | |
6 | Message-Id: <20210322092327.150720-3-stefanha@redhat.com> | 6 | Reported-by: Fabiano Rosas <farosas@suse.de> |
7 | Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
8 | Message-ID: <20241111170333.43833-2-philmd@linaro.org> | ||
7 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 9 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
8 | --- | 10 | --- |
9 | tests/qtest/vhost-user-blk-test.c | 81 +++++++++++++++++++++++++++++-- | 11 | include/system/block-backend-io.h | 7 +++++++ |
10 | 1 file changed, 76 insertions(+), 5 deletions(-) | 12 | block/block-backend.c | 12 ++++++++---- |
13 | 2 files changed, 15 insertions(+), 4 deletions(-) | ||
11 | 14 | ||
12 | diff --git a/tests/qtest/vhost-user-blk-test.c b/tests/qtest/vhost-user-blk-test.c | 15 | diff --git a/include/system/block-backend-io.h b/include/system/block-backend-io.h |
13 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
14 | --- a/tests/qtest/vhost-user-blk-test.c | 17 | --- a/include/system/block-backend-io.h |
15 | +++ b/tests/qtest/vhost-user-blk-test.c | 18 | +++ b/include/system/block-backend-io.h |
16 | @@ -XXX,XX +XXX,XX @@ static void pci_hotplug(void *obj, void *data, QGuestAllocator *t_alloc) | 19 | @@ -XXX,XX +XXX,XX @@ void blk_set_allow_aio_context_change(BlockBackend *blk, bool allow); |
17 | qpci_unplug_acpi_device_test(qts, "drv1", PCI_SLOT_HP); | 20 | void blk_set_disable_request_queuing(BlockBackend *blk, bool disable); |
21 | bool blk_iostatus_is_enabled(const BlockBackend *blk); | ||
22 | |||
23 | +/* | ||
24 | + * Return the qdev ID, or if no ID is assigned the QOM path, | ||
25 | + * of the block device attached to the BlockBackend. | ||
26 | + * | ||
27 | + * The caller is responsible for releasing the value returned | ||
28 | + * with g_free() after use. | ||
29 | + */ | ||
30 | char *blk_get_attached_dev_id(BlockBackend *blk); | ||
31 | |||
32 | BlockAIOCB *blk_aio_pwrite_zeroes(BlockBackend *blk, int64_t offset, | ||
33 | diff --git a/block/block-backend.c b/block/block-backend.c | ||
34 | index XXXXXXX..XXXXXXX 100644 | ||
35 | --- a/block/block-backend.c | ||
36 | +++ b/block/block-backend.c | ||
37 | @@ -XXX,XX +XXX,XX @@ DeviceState *blk_get_attached_dev(BlockBackend *blk) | ||
38 | return blk->dev; | ||
18 | } | 39 | } |
19 | 40 | ||
20 | +static void multiqueue(void *obj, void *data, QGuestAllocator *t_alloc) | 41 | +/* |
21 | +{ | 42 | + * The caller is responsible for releasing the value returned |
22 | + QVirtioPCIDevice *pdev1 = obj; | 43 | + * with g_free() after use. |
23 | + QVirtioDevice *dev1 = &pdev1->vdev; | 44 | + */ |
24 | + QVirtioPCIDevice *pdev8; | 45 | static char *blk_get_attached_dev_id_or_path(BlockBackend *blk, bool want_id) |
25 | + QVirtioDevice *dev8; | 46 | { |
26 | + QTestState *qts = pdev1->pdev->bus->qts; | 47 | DeviceState *dev = blk->dev; |
27 | + uint64_t features; | 48 | @@ -XXX,XX +XXX,XX @@ static char *blk_get_attached_dev_id_or_path(BlockBackend *blk, bool want_id) |
28 | + uint16_t num_queues; | 49 | return object_get_canonical_path(OBJECT(dev)) ?: g_strdup(""); |
29 | + | ||
30 | + /* | ||
31 | + * The primary device has 1 queue and VIRTIO_BLK_F_MQ is not enabled. The | ||
32 | + * VIRTIO specification allows VIRTIO_BLK_F_MQ to be enabled when there is | ||
33 | + * only 1 virtqueue, but --device vhost-user-blk-pci doesn't do this (which | ||
34 | + * is also spec-compliant). | ||
35 | + */ | ||
36 | + features = qvirtio_get_features(dev1); | ||
37 | + g_assert_cmpint(features & (1u << VIRTIO_BLK_F_MQ), ==, 0); | ||
38 | + features = features & ~(QVIRTIO_F_BAD_FEATURE | | ||
39 | + (1u << VIRTIO_RING_F_INDIRECT_DESC) | | ||
40 | + (1u << VIRTIO_F_NOTIFY_ON_EMPTY) | | ||
41 | + (1u << VIRTIO_BLK_F_SCSI)); | ||
42 | + qvirtio_set_features(dev1, features); | ||
43 | + | ||
44 | + /* Hotplug a secondary device with 8 queues */ | ||
45 | + qtest_qmp_device_add(qts, "vhost-user-blk-pci", "drv1", | ||
46 | + "{'addr': %s, 'chardev': 'char2', 'num-queues': 8}", | ||
47 | + stringify(PCI_SLOT_HP) ".0"); | ||
48 | + | ||
49 | + pdev8 = virtio_pci_new(pdev1->pdev->bus, | ||
50 | + &(QPCIAddress) { | ||
51 | + .devfn = QPCI_DEVFN(PCI_SLOT_HP, 0) | ||
52 | + }); | ||
53 | + g_assert_nonnull(pdev8); | ||
54 | + g_assert_cmpint(pdev8->vdev.device_type, ==, VIRTIO_ID_BLOCK); | ||
55 | + | ||
56 | + qos_object_start_hw(&pdev8->obj); | ||
57 | + | ||
58 | + dev8 = &pdev8->vdev; | ||
59 | + features = qvirtio_get_features(dev8); | ||
60 | + g_assert_cmpint(features & (1u << VIRTIO_BLK_F_MQ), | ||
61 | + ==, | ||
62 | + (1u << VIRTIO_BLK_F_MQ)); | ||
63 | + features = features & ~(QVIRTIO_F_BAD_FEATURE | | ||
64 | + (1u << VIRTIO_RING_F_INDIRECT_DESC) | | ||
65 | + (1u << VIRTIO_F_NOTIFY_ON_EMPTY) | | ||
66 | + (1u << VIRTIO_BLK_F_SCSI) | | ||
67 | + (1u << VIRTIO_BLK_F_MQ)); | ||
68 | + qvirtio_set_features(dev8, features); | ||
69 | + | ||
70 | + num_queues = qvirtio_config_readw(dev8, | ||
71 | + offsetof(struct virtio_blk_config, num_queues)); | ||
72 | + g_assert_cmpint(num_queues, ==, 8); | ||
73 | + | ||
74 | + qvirtio_pci_device_disable(pdev8); | ||
75 | + qos_object_destroy(&pdev8->obj); | ||
76 | + | ||
77 | + /* unplug secondary disk */ | ||
78 | + qpci_unplug_acpi_device_test(qts, "drv1", PCI_SLOT_HP); | ||
79 | +} | ||
80 | + | ||
81 | /* | ||
82 | * Check that setting the vring addr on a non-existent virtqueue does | ||
83 | * not crash. | ||
84 | @@ -XXX,XX +XXX,XX @@ static void quit_storage_daemon(void *data) | ||
85 | g_free(data); | ||
86 | } | 50 | } |
87 | 51 | ||
88 | -static void start_vhost_user_blk(GString *cmd_line, int vus_instances) | 52 | -/* |
89 | +static void start_vhost_user_blk(GString *cmd_line, int vus_instances, | 53 | - * Return the qdev ID, or if no ID is assigned the QOM path, of the block |
90 | + int num_queues) | 54 | - * device attached to the BlockBackend. |
55 | - */ | ||
56 | char *blk_get_attached_dev_id(BlockBackend *blk) | ||
91 | { | 57 | { |
92 | const char *vhost_user_blk_bin = qtest_qemu_storage_daemon_binary(); | 58 | return blk_get_attached_dev_id_or_path(blk, true); |
93 | int i; | 59 | } |
94 | @@ -XXX,XX +XXX,XX @@ static void start_vhost_user_blk(GString *cmd_line, int vus_instances) | 60 | |
95 | g_string_append_printf(storage_daemon_command, | 61 | +/* |
96 | "--blockdev driver=file,node-name=disk%d,filename=%s " | 62 | + * The caller is responsible for releasing the value returned |
97 | "--export type=vhost-user-blk,id=disk%d,addr.type=unix,addr.path=%s," | 63 | + * with g_free() after use. |
98 | - "node-name=disk%i,writable=on ", | 64 | + */ |
99 | - i, img_path, i, sock_path, i); | 65 | static char *blk_get_attached_dev_path(BlockBackend *blk) |
100 | + "node-name=disk%i,writable=on,num-queues=%d ", | ||
101 | + i, img_path, i, sock_path, i, num_queues); | ||
102 | |||
103 | g_string_append_printf(cmd_line, "-chardev socket,id=char%d,path=%s ", | ||
104 | i + 1, sock_path); | ||
105 | @@ -XXX,XX +XXX,XX @@ static void start_vhost_user_blk(GString *cmd_line, int vus_instances) | ||
106 | |||
107 | static void *vhost_user_blk_test_setup(GString *cmd_line, void *arg) | ||
108 | { | 66 | { |
109 | - start_vhost_user_blk(cmd_line, 1); | 67 | return blk_get_attached_dev_id_or_path(blk, false); |
110 | + start_vhost_user_blk(cmd_line, 1, 1); | ||
111 | return arg; | ||
112 | } | ||
113 | |||
114 | @@ -XXX,XX +XXX,XX @@ static void *vhost_user_blk_test_setup(GString *cmd_line, void *arg) | ||
115 | static void *vhost_user_blk_hotplug_test_setup(GString *cmd_line, void *arg) | ||
116 | { | ||
117 | /* "-chardev socket,id=char2" is used for pci_hotplug*/ | ||
118 | - start_vhost_user_blk(cmd_line, 2); | ||
119 | + start_vhost_user_blk(cmd_line, 2, 1); | ||
120 | + return arg; | ||
121 | +} | ||
122 | + | ||
123 | +static void *vhost_user_blk_multiqueue_test_setup(GString *cmd_line, void *arg) | ||
124 | +{ | ||
125 | + start_vhost_user_blk(cmd_line, 2, 8); | ||
126 | return arg; | ||
127 | } | ||
128 | |||
129 | @@ -XXX,XX +XXX,XX @@ static void register_vhost_user_blk_test(void) | ||
130 | |||
131 | opts.before = vhost_user_blk_hotplug_test_setup; | ||
132 | qos_add_test("hotplug", "vhost-user-blk-pci", pci_hotplug, &opts); | ||
133 | + | ||
134 | + opts.before = vhost_user_blk_multiqueue_test_setup; | ||
135 | + qos_add_test("multiqueue", "vhost-user-blk-pci", multiqueue, &opts); | ||
136 | } | ||
137 | |||
138 | libqos_init(register_vhost_user_blk_test); | ||
139 | -- | 68 | -- |
140 | 2.30.2 | 69 | 2.48.1 |
141 | 70 | ||
142 | 71 | diff view generated by jsdifflib |
1 | From: Michael Tokarev <mjt@tls.msk.ru> | 1 | From: Fabiano Rosas <farosas@suse.de> |
---|---|---|---|
2 | 2 | ||
3 | Fixes: 3d0d3c30ae3a259bff176f85a3efa2d0816695af | 3 | ASAN detected a leak when running the ahci-test |
4 | Signed-off-by: Michael Tokarev <mjt@tls.msk.ru> | 4 | /ahci/io/dma/lba28/retry: |
5 | Message-Id: <20210508093315.393274-1-mjt@msgid.tls.msk.ru> | 5 | |
6 | Direct leak of 35 byte(s) in 1 object(s) allocated from: | ||
7 | #0 in malloc | ||
8 | #1 in __vasprintf_internal | ||
9 | #2 in vasprintf | ||
10 | #3 in g_vasprintf | ||
11 | #4 in g_strdup_vprintf | ||
12 | #5 in g_strdup_printf | ||
13 | #6 in object_get_canonical_path ../qom/object.c:2096:19 | ||
14 | #7 in blk_get_attached_dev_id_or_path ../block/block-backend.c:1033:12 | ||
15 | #8 in blk_get_attached_dev_path ../block/block-backend.c:1047:12 | ||
16 | #9 in send_qmp_error_event ../block/block-backend.c:2140:36 | ||
17 | #10 in blk_error_action ../block/block-backend.c:2172:9 | ||
18 | #11 in ide_handle_rw_error ../hw/ide/core.c:875:5 | ||
19 | #12 in ide_dma_cb ../hw/ide/core.c:894:13 | ||
20 | #13 in dma_complete ../system/dma-helpers.c:107:9 | ||
21 | #14 in dma_blk_cb ../system/dma-helpers.c:129:9 | ||
22 | #15 in blk_aio_complete ../block/block-backend.c:1552:9 | ||
23 | #16 in blk_aio_write_entry ../block/block-backend.c:1619:5 | ||
24 | #17 in coroutine_trampoline ../util/coroutine-ucontext.c:175:9 | ||
25 | |||
26 | Plug the leak by freeing the device path string. | ||
27 | |||
28 | Signed-off-by: Fabiano Rosas <farosas@suse.de> | ||
29 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
30 | Message-ID: <20241111145214.8261-1-farosas@suse.de> | ||
31 | [PMD: Use g_autofree] | ||
32 | Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
33 | Message-ID: <20241111170333.43833-3-philmd@linaro.org> | ||
6 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 34 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
7 | --- | 35 | --- |
8 | qapi/qom.json | 4 ++-- | 36 | block/block-backend.c | 4 ++-- |
9 | 1 file changed, 2 insertions(+), 2 deletions(-) | 37 | 1 file changed, 2 insertions(+), 2 deletions(-) |
10 | 38 | ||
11 | diff --git a/qapi/qom.json b/qapi/qom.json | 39 | diff --git a/block/block-backend.c b/block/block-backend.c |
12 | index XXXXXXX..XXXXXXX 100644 | 40 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/qapi/qom.json | 41 | --- a/block/block-backend.c |
14 | +++ b/qapi/qom.json | 42 | +++ b/block/block-backend.c |
15 | @@ -XXX,XX +XXX,XX @@ | 43 | @@ -XXX,XX +XXX,XX @@ static void send_qmp_error_event(BlockBackend *blk, |
16 | # | 44 | { |
17 | # @max_queue_size: the maximum number of packets to keep in the queue for | 45 | IoOperationType optype; |
18 | # comparing with incoming packets from @secondary_in. If the | 46 | BlockDriverState *bs = blk_bs(blk); |
19 | -# queue is full and addtional packets are received, the | 47 | + g_autofree char *path = blk_get_attached_dev_path(blk); |
20 | -# addtional packets are dropped. (default: 1024) | 48 | |
21 | +# queue is full and additional packets are received, the | 49 | optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE; |
22 | +# additional packets are dropped. (default: 1024) | 50 | - qapi_event_send_block_io_error(blk_name(blk), |
23 | # | 51 | - blk_get_attached_dev_path(blk), |
24 | # @vnet_hdr_support: if true, vnet header support is enabled (default: false) | 52 | + qapi_event_send_block_io_error(blk_name(blk), path, |
25 | # | 53 | bs ? bdrv_get_node_name(bs) : NULL, optype, |
54 | action, blk_iostatus_is_enabled(blk), | ||
55 | error == ENOSPC, strerror(error)); | ||
26 | -- | 56 | -- |
27 | 2.30.2 | 57 | 2.48.1 |
28 | 58 | ||
29 | 59 | diff view generated by jsdifflib |
1 | VIRTIO_F_RING_PACKED and VIRTIO_F_IOMMU_PLATFORM need to be supported by | 1 | From: Peter Xu <peterx@redhat.com> |
---|---|---|---|
2 | the vhost device, otherwise advertising it to the guest doesn't result | ||
3 | in a working configuration. They are currently not supported by the | ||
4 | vhost-user-blk export in QEMU. | ||
5 | 2 | ||
6 | Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1935020 | 3 | It's easier for either debugging plugin errors, or issue reports. |
7 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 4 | |
8 | Acked-by: Raphael Norwitz <raphael.norwitz@nutanix.com> | 5 | Signed-off-by: Peter Xu <peterx@redhat.com> |
9 | Message-Id: <20210429171316.162022-5-kwolf@redhat.com> | 6 | Message-ID: <20241212204801.1420528-2-peterx@redhat.com> |
10 | Reviewed-by: Michael S. Tsirkin <mst@redhat.com> | 7 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
11 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 8 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
12 | --- | 9 | --- |
13 | hw/block/vhost-user-blk.c | 2 ++ | 10 | scripts/qemu-gdb.py | 2 ++ |
14 | 1 file changed, 2 insertions(+) | 11 | 1 file changed, 2 insertions(+) |
15 | 12 | ||
16 | diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c | 13 | diff --git a/scripts/qemu-gdb.py b/scripts/qemu-gdb.py |
17 | index XXXXXXX..XXXXXXX 100644 | 14 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/hw/block/vhost-user-blk.c | 15 | --- a/scripts/qemu-gdb.py |
19 | +++ b/hw/block/vhost-user-blk.c | 16 | +++ b/scripts/qemu-gdb.py |
20 | @@ -XXX,XX +XXX,XX @@ static const int user_feature_bits[] = { | 17 | @@ -XXX,XX +XXX,XX @@ def __init__(self): |
21 | VIRTIO_RING_F_INDIRECT_DESC, | 18 | # Default to silently passing through SIGUSR1, because QEMU sends it |
22 | VIRTIO_RING_F_EVENT_IDX, | 19 | # to itself a lot. |
23 | VIRTIO_F_NOTIFY_ON_EMPTY, | 20 | gdb.execute('handle SIGUSR1 pass noprint nostop') |
24 | + VIRTIO_F_RING_PACKED, | 21 | +# Always print full stack for python errors, easier to debug and report issues |
25 | + VIRTIO_F_IOMMU_PLATFORM, | 22 | +gdb.execute('set python print-stack full') |
26 | VHOST_INVALID_FEATURE_BIT | ||
27 | }; | ||
28 | |||
29 | -- | 23 | -- |
30 | 2.30.2 | 24 | 2.48.1 |
31 | |||
32 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Peter Xu <peterx@redhat.com> | ||
1 | 2 | ||
3 | There're a bunch of code trying to fetch fs_base in different ways. IIUC | ||
4 | the simplest way instead is "$fs_base". It also has the benefit that it'll | ||
5 | work for both live gdb session or coredumps. | ||
6 | |||
7 | Signed-off-by: Peter Xu <peterx@redhat.com> | ||
8 | Message-ID: <20241212204801.1420528-3-peterx@redhat.com> | ||
9 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
10 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
11 | --- | ||
12 | scripts/qemugdb/coroutine.py | 23 ++--------------------- | ||
13 | 1 file changed, 2 insertions(+), 21 deletions(-) | ||
14 | |||
15 | diff --git a/scripts/qemugdb/coroutine.py b/scripts/qemugdb/coroutine.py | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/scripts/qemugdb/coroutine.py | ||
18 | +++ b/scripts/qemugdb/coroutine.py | ||
19 | @@ -XXX,XX +XXX,XX @@ | ||
20 | |||
21 | VOID_PTR = gdb.lookup_type('void').pointer() | ||
22 | |||
23 | -def get_fs_base(): | ||
24 | - '''Fetch %fs base value using arch_prctl(ARCH_GET_FS). This is | ||
25 | - pthread_self().''' | ||
26 | - # %rsp - 120 is scratch space according to the SystemV ABI | ||
27 | - old = gdb.parse_and_eval('*(uint64_t*)($rsp - 120)') | ||
28 | - gdb.execute('call (int)arch_prctl(0x1003, $rsp - 120)', False, True) | ||
29 | - fs_base = gdb.parse_and_eval('*(uint64_t*)($rsp - 120)') | ||
30 | - gdb.execute('set *(uint64_t*)($rsp - 120) = %s' % old, False, True) | ||
31 | - return fs_base | ||
32 | - | ||
33 | def pthread_self(): | ||
34 | - '''Fetch pthread_self() from the glibc start_thread function.''' | ||
35 | - f = gdb.newest_frame() | ||
36 | - while f.name() != 'start_thread': | ||
37 | - f = f.older() | ||
38 | - if f is None: | ||
39 | - return get_fs_base() | ||
40 | - | ||
41 | - try: | ||
42 | - return f.read_var("arg") | ||
43 | - except ValueError: | ||
44 | - return get_fs_base() | ||
45 | + '''Fetch the base address of TLS.''' | ||
46 | + return gdb.parse_and_eval("$fs_base") | ||
47 | |||
48 | def get_glibc_pointer_guard(): | ||
49 | '''Fetch glibc pointer guard value''' | ||
50 | -- | ||
51 | 2.48.1 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Peter Xu <peterx@redhat.com> | ||
1 | 2 | ||
3 | Dumping coroutines don't yet work with coredumps. Let's make it work. | ||
4 | |||
5 | We still kept most of the old code because they can be either more | ||
6 | flexible, or prettier. Only add the fallbacks when they stop working. | ||
7 | |||
8 | Currently the raw unwind is pretty ugly, but it works, like this: | ||
9 | |||
10 | (gdb) qemu bt | ||
11 | #0 process_incoming_migration_co (opaque=0x0) at ../migration/migration.c:788 | ||
12 | #1 0x000055ae6c0dc4d9 in coroutine_trampoline (i0=-1711718576, i1=21934) at ../util/coroutine-ucontext.c:175 | ||
13 | #2 0x00007f9f59d72f40 in ??? () at /lib64/libc.so.6 | ||
14 | #3 0x00007ffd549214a0 in ??? () | ||
15 | #4 0x0000000000000000 in ??? () | ||
16 | Coroutine at 0x7f9f4c57c748: | ||
17 | #0 0x55ae6c0dc9a8 in qemu_coroutine_switch<+120> () at ../util/coroutine-ucontext.c:321 | ||
18 | #1 0x55ae6c0da2f8 in qemu_aio_coroutine_enter<+356> () at ../util/qemu-coroutine.c:293 | ||
19 | #2 0x55ae6c0da3f1 in qemu_coroutine_enter<+34> () at ../util/qemu-coroutine.c:316 | ||
20 | #3 0x55ae6baf775e in migration_incoming_process<+43> () at ../migration/migration.c:876 | ||
21 | #4 0x55ae6baf7ab4 in migration_ioc_process_incoming<+490> () at ../migration/migration.c:1008 | ||
22 | #5 0x55ae6bae9ae7 in migration_channel_process_incoming<+145> () at ../migration/channel.c:45 | ||
23 | #6 0x55ae6bb18e35 in socket_accept_incoming_migration<+118> () at ../migration/socket.c:132 | ||
24 | #7 0x55ae6be939ef in qio_net_listener_channel_func<+131> () at ../io/net-listener.c:54 | ||
25 | #8 0x55ae6be8ce1a in qio_channel_fd_source_dispatch<+78> () at ../io/channel-watch.c:84 | ||
26 | #9 0x7f9f5b26728c in g_main_context_dispatch_unlocked.lto_priv<+315> () | ||
27 | #10 0x7f9f5b267555 in g_main_context_dispatch<+36> () | ||
28 | #11 0x55ae6c0d91a7 in glib_pollfds_poll<+90> () at ../util/main-loop.c:287 | ||
29 | #12 0x55ae6c0d9235 in os_host_main_loop_wait<+128> () at ../util/main-loop.c:310 | ||
30 | #13 0x55ae6c0d9364 in main_loop_wait<+203> () at ../util/main-loop.c:589 | ||
31 | #14 0x55ae6bac212a in qemu_main_loop<+41> () at ../system/runstate.c:835 | ||
32 | #15 0x55ae6bfdf522 in qemu_default_main<+19> () at ../system/main.c:37 | ||
33 | #16 0x55ae6bfdf55f in main<+40> () at ../system/main.c:48 | ||
34 | #17 0x7f9f59d42248 in __libc_start_call_main<+119> () | ||
35 | #18 0x7f9f59d4230b in __libc_start_main_impl<+138> () | ||
36 | |||
37 | Signed-off-by: Peter Xu <peterx@redhat.com> | ||
38 | Message-ID: <20241212204801.1420528-4-peterx@redhat.com> | ||
39 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
40 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
41 | --- | ||
42 | scripts/qemugdb/coroutine.py | 79 +++++++++++++++++++++++++++++++++--- | ||
43 | 1 file changed, 73 insertions(+), 6 deletions(-) | ||
44 | |||
45 | diff --git a/scripts/qemugdb/coroutine.py b/scripts/qemugdb/coroutine.py | ||
46 | index XXXXXXX..XXXXXXX 100644 | ||
47 | --- a/scripts/qemugdb/coroutine.py | ||
48 | +++ b/scripts/qemugdb/coroutine.py | ||
49 | @@ -XXX,XX +XXX,XX @@ def get_jmpbuf_regs(jmpbuf): | ||
50 | 'r15': jmpbuf[JB_R15], | ||
51 | 'rip': glibc_ptr_demangle(jmpbuf[JB_PC], pointer_guard) } | ||
52 | |||
53 | -def bt_jmpbuf(jmpbuf): | ||
54 | - '''Backtrace a jmpbuf''' | ||
55 | - regs = get_jmpbuf_regs(jmpbuf) | ||
56 | +def symbol_lookup(addr): | ||
57 | + # Example: "__clone3 + 44 in section .text of /lib64/libc.so.6" | ||
58 | + result = gdb.execute(f"info symbol {hex(addr)}", to_string=True).strip() | ||
59 | + try: | ||
60 | + if "+" in result: | ||
61 | + (func, result) = result.split(" + ") | ||
62 | + (offset, result) = result.split(" in ") | ||
63 | + else: | ||
64 | + offset = "0" | ||
65 | + (func, result) = result.split(" in ") | ||
66 | + func_str = f"{func}<+{offset}> ()" | ||
67 | + except: | ||
68 | + return f"??? ({result})" | ||
69 | + | ||
70 | + # Example: Line 321 of "../util/coroutine-ucontext.c" starts at address | ||
71 | + # 0x55cf3894d993 <qemu_coroutine_switch+99> and ends at 0x55cf3894d9ab | ||
72 | + # <qemu_coroutine_switch+123>. | ||
73 | + result = gdb.execute(f"info line *{hex(addr)}", to_string=True).strip() | ||
74 | + if not result.startswith("Line "): | ||
75 | + return func_str | ||
76 | + result = result[5:] | ||
77 | + | ||
78 | + try: | ||
79 | + result = result.split(" starts ")[0] | ||
80 | + (line, path) = result.split(" of ") | ||
81 | + path = path.replace("\"", "") | ||
82 | + except: | ||
83 | + return func_str | ||
84 | + | ||
85 | + return f"{func_str} at {path}:{line}" | ||
86 | + | ||
87 | +def dump_backtrace(regs): | ||
88 | + ''' | ||
89 | + Backtrace dump with raw registers, mimic GDB command 'bt'. | ||
90 | + ''' | ||
91 | + # Here only rbp and rip that matter.. | ||
92 | + rbp = regs['rbp'] | ||
93 | + rip = regs['rip'] | ||
94 | + i = 0 | ||
95 | + | ||
96 | + while rbp: | ||
97 | + # For all return addresses on stack, we want to look up symbol/line | ||
98 | + # on the CALL command, because the return address is the next | ||
99 | + # instruction instead of the CALL. Here -1 would work for any | ||
100 | + # sized CALL instruction. | ||
101 | + print(f"#{i} {hex(rip)} in {symbol_lookup(rip if i == 0 else rip-1)}") | ||
102 | + rip = gdb.parse_and_eval(f"*(uint64_t *)(uint64_t)({hex(rbp)} + 8)") | ||
103 | + rbp = gdb.parse_and_eval(f"*(uint64_t *)(uint64_t)({hex(rbp)})") | ||
104 | + i += 1 | ||
105 | + | ||
106 | +def dump_backtrace_live(regs): | ||
107 | + ''' | ||
108 | + Backtrace dump with gdb's 'bt' command, only usable in a live session. | ||
109 | + ''' | ||
110 | old = dict() | ||
111 | |||
112 | # remember current stack frame and select the topmost | ||
113 | @@ -XXX,XX +XXX,XX @@ def bt_jmpbuf(jmpbuf): | ||
114 | |||
115 | selected_frame.select() | ||
116 | |||
117 | +def bt_jmpbuf(jmpbuf): | ||
118 | + '''Backtrace a jmpbuf''' | ||
119 | + regs = get_jmpbuf_regs(jmpbuf) | ||
120 | + try: | ||
121 | + # This reuses gdb's "bt" command, which can be slightly prettier | ||
122 | + # but only works with live sessions. | ||
123 | + dump_backtrace_live(regs) | ||
124 | + except: | ||
125 | + # If above doesn't work, fallback to poor man's unwind | ||
126 | + dump_backtrace(regs) | ||
127 | + | ||
128 | def co_cast(co): | ||
129 | return co.cast(gdb.lookup_type('CoroutineUContext').pointer()) | ||
130 | |||
131 | @@ -XXX,XX +XXX,XX @@ def invoke(self, arg, from_tty): | ||
132 | |||
133 | gdb.execute("bt") | ||
134 | |||
135 | - if gdb.parse_and_eval("qemu_in_coroutine()") == False: | ||
136 | - return | ||
137 | + try: | ||
138 | + # This only works with a live session | ||
139 | + co_ptr = gdb.parse_and_eval("qemu_coroutine_self()") | ||
140 | + except: | ||
141 | + # Fallback to use hard-coded ucontext vars if it's coredump | ||
142 | + co_ptr = gdb.parse_and_eval("co_tls_current") | ||
143 | |||
144 | - co_ptr = gdb.parse_and_eval("qemu_coroutine_self()") | ||
145 | + if co_ptr == False: | ||
146 | + return | ||
147 | |||
148 | while True: | ||
149 | co = co_cast(co_ptr) | ||
150 | -- | ||
151 | 2.48.1 | diff view generated by jsdifflib |
1 | Creating a device with a number of queues that isn't supported by the | 1 | From: Peter Krempa <pkrempa@redhat.com> |
---|---|---|---|
2 | backend is pointless, the device won't work properly and the error | ||
3 | messages are rather confusing. | ||
4 | 2 | ||
5 | Just fail to create the device if num-queues is higher than what the | 3 | Commit 7452162adec25c10 introduced 'qom-path' argument to BLOCK_IO_ERROR |
6 | backend supports. | 4 | event but when the event is instantiated in 'send_qmp_error_event()' the |
5 | arguments for 'device' and 'qom_path' in | ||
6 | qapi_event_send_block_io_error() were reversed : | ||
7 | 7 | ||
8 | Since the relationship between num-queues and the number of virtqueues | 8 | Generated code for sending event: |
9 | depends on the specific device, this is an additional value that needs | ||
10 | to be initialised by the device. For convenience, allow leaving it 0 if | ||
11 | the check should be skipped. This makes sense for vhost-user-net where | ||
12 | separate vhost devices are used for the queues and custom initialisation | ||
13 | code is needed to perform the check. | ||
14 | 9 | ||
15 | Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1935031 | 10 | void qapi_event_send_block_io_error(const char *qom_path, |
16 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 11 | const char *device, |
17 | Reviewed-by: Raphael Norwitz <raphael.norwitz@nutanix.com> | 12 | const char *node_name, |
18 | Message-Id: <20210429171316.162022-7-kwolf@redhat.com> | 13 | IoOperationType operation, |
19 | Reviewed-by: Michael S. Tsirkin <mst@redhat.com> | 14 | [...] |
15 | |||
16 | Call inside send_qmp_error_event(): | ||
17 | |||
18 | qapi_event_send_block_io_error(blk_name(blk), | ||
19 | blk_get_attached_dev_path(blk), | ||
20 | bs ? bdrv_get_node_name(bs) : NULL, optype, | ||
21 | [...] | ||
22 | |||
23 | This results into reporting the QOM path as the device alias and vice | ||
24 | versa which in turn breaks libvirt, which expects the device alias being | ||
25 | either a valid alias or empty (which would make libvirt do the lookup by | ||
26 | node-name instead). | ||
27 | |||
28 | Cc: qemu-stable@nongnu.org | ||
29 | Fixes: 7452162adec2 ("qapi: add qom-path to BLOCK_IO_ERROR event") | ||
30 | Signed-off-by: Peter Krempa <pkrempa@redhat.com> | ||
31 | Message-ID: <09728d784888b38d7a8f09ee5e9e9c542c875e1e.1737973614.git.pkrempa@redhat.com> | ||
32 | Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> | ||
33 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
20 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 34 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
21 | --- | 35 | --- |
22 | include/hw/virtio/vhost.h | 2 ++ | 36 | block/block-backend.c | 2 +- |
23 | hw/block/vhost-user-blk.c | 1 + | 37 | 1 file changed, 1 insertion(+), 1 deletion(-) |
24 | hw/virtio/vhost-user.c | 5 +++++ | ||
25 | 3 files changed, 8 insertions(+) | ||
26 | 38 | ||
27 | diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h | 39 | diff --git a/block/block-backend.c b/block/block-backend.c |
28 | index XXXXXXX..XXXXXXX 100644 | 40 | index XXXXXXX..XXXXXXX 100644 |
29 | --- a/include/hw/virtio/vhost.h | 41 | --- a/block/block-backend.c |
30 | +++ b/include/hw/virtio/vhost.h | 42 | +++ b/block/block-backend.c |
31 | @@ -XXX,XX +XXX,XX @@ struct vhost_dev { | 43 | @@ -XXX,XX +XXX,XX @@ static void send_qmp_error_event(BlockBackend *blk, |
32 | int nvqs; | 44 | g_autofree char *path = blk_get_attached_dev_path(blk); |
33 | /* the first virtqueue which would be used by this vhost dev */ | 45 | |
34 | int vq_index; | 46 | optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE; |
35 | + /* if non-zero, minimum required value for max_queues */ | 47 | - qapi_event_send_block_io_error(blk_name(blk), path, |
36 | + int num_queues; | 48 | + qapi_event_send_block_io_error(path, blk_name(blk), |
37 | uint64_t features; | 49 | bs ? bdrv_get_node_name(bs) : NULL, optype, |
38 | uint64_t acked_features; | 50 | action, blk_iostatus_is_enabled(blk), |
39 | uint64_t backend_features; | 51 | error == ENOSPC, strerror(error)); |
40 | diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c | ||
41 | index XXXXXXX..XXXXXXX 100644 | ||
42 | --- a/hw/block/vhost-user-blk.c | ||
43 | +++ b/hw/block/vhost-user-blk.c | ||
44 | @@ -XXX,XX +XXX,XX @@ static int vhost_user_blk_connect(DeviceState *dev, Error **errp) | ||
45 | } | ||
46 | s->connected = true; | ||
47 | |||
48 | + s->dev.num_queues = s->num_queues; | ||
49 | s->dev.nvqs = s->num_queues; | ||
50 | s->dev.vqs = s->vhost_vqs; | ||
51 | s->dev.vq_index = 0; | ||
52 | diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c | ||
53 | index XXXXXXX..XXXXXXX 100644 | ||
54 | --- a/hw/virtio/vhost-user.c | ||
55 | +++ b/hw/virtio/vhost-user.c | ||
56 | @@ -XXX,XX +XXX,XX @@ static int vhost_user_backend_init(struct vhost_dev *dev, void *opaque) | ||
57 | return err; | ||
58 | } | ||
59 | } | ||
60 | + if (dev->num_queues && dev->max_queues < dev->num_queues) { | ||
61 | + error_report("The maximum number of queues supported by the " | ||
62 | + "backend is %" PRIu64, dev->max_queues); | ||
63 | + return -EINVAL; | ||
64 | + } | ||
65 | |||
66 | if (virtio_has_feature(features, VIRTIO_F_IOMMU_PLATFORM) && | ||
67 | !(virtio_has_feature(dev->protocol_features, | ||
68 | -- | 52 | -- |
69 | 2.30.2 | 53 | 2.48.1 |
70 | 54 | ||
71 | 55 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | This allows querying from QMP (and also HMP) whether an image is | ||
2 | currently active or inactive (in the sense of BDRV_O_INACTIVE). | ||
1 | 3 | ||
4 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
5 | Acked-by: Fabiano Rosas <farosas@suse.de> | ||
6 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
7 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
8 | Message-ID: <20250204211407.381505-2-kwolf@redhat.com> | ||
9 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
10 | --- | ||
11 | qapi/block-core.json | 6 +++++- | ||
12 | include/block/block-global-state.h | 3 +++ | ||
13 | block.c | 4 ++++ | ||
14 | block/monitor/block-hmp-cmds.c | 5 +++-- | ||
15 | block/qapi.c | 1 + | ||
16 | tests/qemu-iotests/184.out | 2 ++ | ||
17 | tests/qemu-iotests/191.out | 16 ++++++++++++++++ | ||
18 | tests/qemu-iotests/273.out | 5 +++++ | ||
19 | 8 files changed, 39 insertions(+), 3 deletions(-) | ||
20 | |||
21 | diff --git a/qapi/block-core.json b/qapi/block-core.json | ||
22 | index XXXXXXX..XXXXXXX 100644 | ||
23 | --- a/qapi/block-core.json | ||
24 | +++ b/qapi/block-core.json | ||
25 | @@ -XXX,XX +XXX,XX @@ | ||
26 | # @backing_file_depth: number of files in the backing file chain | ||
27 | # (since: 1.2) | ||
28 | # | ||
29 | +# @active: true if the backend is active; typical cases for inactive backends | ||
30 | +# are on the migration source instance after migration completes and on the | ||
31 | +# destination before it completes. (since: 10.0) | ||
32 | +# | ||
33 | # @encrypted: true if the backing device is encrypted | ||
34 | # | ||
35 | # @detect_zeroes: detect and optimize zero writes (Since 2.1) | ||
36 | @@ -XXX,XX +XXX,XX @@ | ||
37 | { 'struct': 'BlockDeviceInfo', | ||
38 | 'data': { 'file': 'str', '*node-name': 'str', 'ro': 'bool', 'drv': 'str', | ||
39 | '*backing_file': 'str', 'backing_file_depth': 'int', | ||
40 | - 'encrypted': 'bool', | ||
41 | + 'active': 'bool', 'encrypted': 'bool', | ||
42 | 'detect_zeroes': 'BlockdevDetectZeroesOptions', | ||
43 | 'bps': 'int', 'bps_rd': 'int', 'bps_wr': 'int', | ||
44 | 'iops': 'int', 'iops_rd': 'int', 'iops_wr': 'int', | ||
45 | diff --git a/include/block/block-global-state.h b/include/block/block-global-state.h | ||
46 | index XXXXXXX..XXXXXXX 100644 | ||
47 | --- a/include/block/block-global-state.h | ||
48 | +++ b/include/block/block-global-state.h | ||
49 | @@ -XXX,XX +XXX,XX @@ BlockDriverState * GRAPH_RDLOCK | ||
50 | check_to_replace_node(BlockDriverState *parent_bs, const char *node_name, | ||
51 | Error **errp); | ||
52 | |||
53 | + | ||
54 | +bool GRAPH_RDLOCK bdrv_is_inactive(BlockDriverState *bs); | ||
55 | + | ||
56 | int no_coroutine_fn GRAPH_RDLOCK | ||
57 | bdrv_activate(BlockDriverState *bs, Error **errp); | ||
58 | |||
59 | diff --git a/block.c b/block.c | ||
60 | index XXXXXXX..XXXXXXX 100644 | ||
61 | --- a/block.c | ||
62 | +++ b/block.c | ||
63 | @@ -XXX,XX +XXX,XX @@ void bdrv_init_with_whitelist(void) | ||
64 | bdrv_init(); | ||
65 | } | ||
66 | |||
67 | +bool bdrv_is_inactive(BlockDriverState *bs) { | ||
68 | + return bs->open_flags & BDRV_O_INACTIVE; | ||
69 | +} | ||
70 | + | ||
71 | int bdrv_activate(BlockDriverState *bs, Error **errp) | ||
72 | { | ||
73 | BdrvChild *child, *parent; | ||
74 | diff --git a/block/monitor/block-hmp-cmds.c b/block/monitor/block-hmp-cmds.c | ||
75 | index XXXXXXX..XXXXXXX 100644 | ||
76 | --- a/block/monitor/block-hmp-cmds.c | ||
77 | +++ b/block/monitor/block-hmp-cmds.c | ||
78 | @@ -XXX,XX +XXX,XX @@ static void print_block_info(Monitor *mon, BlockInfo *info, | ||
79 | } | ||
80 | |||
81 | if (inserted) { | ||
82 | - monitor_printf(mon, ": %s (%s%s%s)\n", | ||
83 | + monitor_printf(mon, ": %s (%s%s%s%s)\n", | ||
84 | inserted->file, | ||
85 | inserted->drv, | ||
86 | inserted->ro ? ", read-only" : "", | ||
87 | - inserted->encrypted ? ", encrypted" : ""); | ||
88 | + inserted->encrypted ? ", encrypted" : "", | ||
89 | + inserted->active ? "" : ", inactive"); | ||
90 | } else { | ||
91 | monitor_printf(mon, ": [not inserted]\n"); | ||
92 | } | ||
93 | diff --git a/block/qapi.c b/block/qapi.c | ||
94 | index XXXXXXX..XXXXXXX 100644 | ||
95 | --- a/block/qapi.c | ||
96 | +++ b/block/qapi.c | ||
97 | @@ -XXX,XX +XXX,XX @@ BlockDeviceInfo *bdrv_block_device_info(BlockBackend *blk, | ||
98 | info->file = g_strdup(bs->filename); | ||
99 | info->ro = bdrv_is_read_only(bs); | ||
100 | info->drv = g_strdup(bs->drv->format_name); | ||
101 | + info->active = !bdrv_is_inactive(bs); | ||
102 | info->encrypted = bs->encrypted; | ||
103 | |||
104 | info->cache = g_new(BlockdevCacheInfo, 1); | ||
105 | diff --git a/tests/qemu-iotests/184.out b/tests/qemu-iotests/184.out | ||
106 | index XXXXXXX..XXXXXXX 100644 | ||
107 | --- a/tests/qemu-iotests/184.out | ||
108 | +++ b/tests/qemu-iotests/184.out | ||
109 | @@ -XXX,XX +XXX,XX @@ Testing: | ||
110 | { | ||
111 | "iops_rd": 0, | ||
112 | "detect_zeroes": "off", | ||
113 | + "active": true, | ||
114 | "image": { | ||
115 | "backing-image": { | ||
116 | "virtual-size": 1073741824, | ||
117 | @@ -XXX,XX +XXX,XX @@ Testing: | ||
118 | { | ||
119 | "iops_rd": 0, | ||
120 | "detect_zeroes": "off", | ||
121 | + "active": true, | ||
122 | "image": { | ||
123 | "virtual-size": 1073741824, | ||
124 | "filename": "null-co://", | ||
125 | diff --git a/tests/qemu-iotests/191.out b/tests/qemu-iotests/191.out | ||
126 | index XXXXXXX..XXXXXXX 100644 | ||
127 | --- a/tests/qemu-iotests/191.out | ||
128 | +++ b/tests/qemu-iotests/191.out | ||
129 | @@ -XXX,XX +XXX,XX @@ wrote 65536/65536 bytes at offset 1048576 | ||
130 | { | ||
131 | "iops_rd": 0, | ||
132 | "detect_zeroes": "off", | ||
133 | + "active": true, | ||
134 | "image": { | ||
135 | "backing-image": { | ||
136 | "virtual-size": 67108864, | ||
137 | @@ -XXX,XX +XXX,XX @@ wrote 65536/65536 bytes at offset 1048576 | ||
138 | { | ||
139 | "iops_rd": 0, | ||
140 | "detect_zeroes": "off", | ||
141 | + "active": true, | ||
142 | "image": { | ||
143 | "virtual-size": 197120, | ||
144 | "filename": "TEST_DIR/t.IMGFMT.ovl2", | ||
145 | @@ -XXX,XX +XXX,XX @@ wrote 65536/65536 bytes at offset 1048576 | ||
146 | { | ||
147 | "iops_rd": 0, | ||
148 | "detect_zeroes": "off", | ||
149 | + "active": true, | ||
150 | "image": { | ||
151 | "backing-image": { | ||
152 | "virtual-size": 67108864, | ||
153 | @@ -XXX,XX +XXX,XX @@ wrote 65536/65536 bytes at offset 1048576 | ||
154 | { | ||
155 | "iops_rd": 0, | ||
156 | "detect_zeroes": "off", | ||
157 | + "active": true, | ||
158 | "image": { | ||
159 | "virtual-size": 197120, | ||
160 | "filename": "TEST_DIR/t.IMGFMT", | ||
161 | @@ -XXX,XX +XXX,XX @@ wrote 65536/65536 bytes at offset 1048576 | ||
162 | { | ||
163 | "iops_rd": 0, | ||
164 | "detect_zeroes": "off", | ||
165 | + "active": true, | ||
166 | "image": { | ||
167 | "backing-image": { | ||
168 | "virtual-size": 67108864, | ||
169 | @@ -XXX,XX +XXX,XX @@ wrote 65536/65536 bytes at offset 1048576 | ||
170 | { | ||
171 | "iops_rd": 0, | ||
172 | "detect_zeroes": "off", | ||
173 | + "active": true, | ||
174 | "image": { | ||
175 | "virtual-size": 393216, | ||
176 | "filename": "TEST_DIR/t.IMGFMT.mid", | ||
177 | @@ -XXX,XX +XXX,XX @@ wrote 65536/65536 bytes at offset 1048576 | ||
178 | { | ||
179 | "iops_rd": 0, | ||
180 | "detect_zeroes": "off", | ||
181 | + "active": true, | ||
182 | "image": { | ||
183 | "virtual-size": 67108864, | ||
184 | "filename": "TEST_DIR/t.IMGFMT.base", | ||
185 | @@ -XXX,XX +XXX,XX @@ wrote 65536/65536 bytes at offset 1048576 | ||
186 | { | ||
187 | "iops_rd": 0, | ||
188 | "detect_zeroes": "off", | ||
189 | + "active": true, | ||
190 | "image": { | ||
191 | "virtual-size": 393216, | ||
192 | "filename": "TEST_DIR/t.IMGFMT.base", | ||
193 | @@ -XXX,XX +XXX,XX @@ wrote 65536/65536 bytes at offset 1048576 | ||
194 | { | ||
195 | "iops_rd": 0, | ||
196 | "detect_zeroes": "off", | ||
197 | + "active": true, | ||
198 | "image": { | ||
199 | "backing-image": { | ||
200 | "virtual-size": 67108864, | ||
201 | @@ -XXX,XX +XXX,XX @@ wrote 65536/65536 bytes at offset 1048576 | ||
202 | { | ||
203 | "iops_rd": 0, | ||
204 | "detect_zeroes": "off", | ||
205 | + "active": true, | ||
206 | "image": { | ||
207 | "virtual-size": 197120, | ||
208 | "filename": "TEST_DIR/t.IMGFMT.ovl2", | ||
209 | @@ -XXX,XX +XXX,XX @@ wrote 65536/65536 bytes at offset 1048576 | ||
210 | { | ||
211 | "iops_rd": 0, | ||
212 | "detect_zeroes": "off", | ||
213 | + "active": true, | ||
214 | "image": { | ||
215 | "backing-image": { | ||
216 | "backing-image": { | ||
217 | @@ -XXX,XX +XXX,XX @@ wrote 65536/65536 bytes at offset 1048576 | ||
218 | { | ||
219 | "iops_rd": 0, | ||
220 | "detect_zeroes": "off", | ||
221 | + "active": true, | ||
222 | "image": { | ||
223 | "virtual-size": 197120, | ||
224 | "filename": "TEST_DIR/t.IMGFMT.ovl3", | ||
225 | @@ -XXX,XX +XXX,XX @@ wrote 65536/65536 bytes at offset 1048576 | ||
226 | { | ||
227 | "iops_rd": 0, | ||
228 | "detect_zeroes": "off", | ||
229 | + "active": true, | ||
230 | "image": { | ||
231 | "virtual-size": 67108864, | ||
232 | "filename": "TEST_DIR/t.IMGFMT.base", | ||
233 | @@ -XXX,XX +XXX,XX @@ wrote 65536/65536 bytes at offset 1048576 | ||
234 | { | ||
235 | "iops_rd": 0, | ||
236 | "detect_zeroes": "off", | ||
237 | + "active": true, | ||
238 | "image": { | ||
239 | "virtual-size": 393216, | ||
240 | "filename": "TEST_DIR/t.IMGFMT.base", | ||
241 | @@ -XXX,XX +XXX,XX @@ wrote 65536/65536 bytes at offset 1048576 | ||
242 | { | ||
243 | "iops_rd": 0, | ||
244 | "detect_zeroes": "off", | ||
245 | + "active": true, | ||
246 | "image": { | ||
247 | "backing-image": { | ||
248 | "virtual-size": 67108864, | ||
249 | @@ -XXX,XX +XXX,XX @@ wrote 65536/65536 bytes at offset 1048576 | ||
250 | { | ||
251 | "iops_rd": 0, | ||
252 | "detect_zeroes": "off", | ||
253 | + "active": true, | ||
254 | "image": { | ||
255 | "virtual-size": 197120, | ||
256 | "filename": "TEST_DIR/t.IMGFMT", | ||
257 | diff --git a/tests/qemu-iotests/273.out b/tests/qemu-iotests/273.out | ||
258 | index XXXXXXX..XXXXXXX 100644 | ||
259 | --- a/tests/qemu-iotests/273.out | ||
260 | +++ b/tests/qemu-iotests/273.out | ||
261 | @@ -XXX,XX +XXX,XX @@ Testing: -blockdev file,node-name=base,filename=TEST_DIR/t.IMGFMT.base -blockdev | ||
262 | { | ||
263 | "iops_rd": 0, | ||
264 | "detect_zeroes": "off", | ||
265 | + "active": true, | ||
266 | "image": { | ||
267 | "backing-image": { | ||
268 | "backing-image": { | ||
269 | @@ -XXX,XX +XXX,XX @@ Testing: -blockdev file,node-name=base,filename=TEST_DIR/t.IMGFMT.base -blockdev | ||
270 | { | ||
271 | "iops_rd": 0, | ||
272 | "detect_zeroes": "off", | ||
273 | + "active": true, | ||
274 | "image": { | ||
275 | "virtual-size": 197120, | ||
276 | "filename": "TEST_DIR/t.IMGFMT", | ||
277 | @@ -XXX,XX +XXX,XX @@ Testing: -blockdev file,node-name=base,filename=TEST_DIR/t.IMGFMT.base -blockdev | ||
278 | { | ||
279 | "iops_rd": 0, | ||
280 | "detect_zeroes": "off", | ||
281 | + "active": true, | ||
282 | "image": { | ||
283 | "backing-image": { | ||
284 | "virtual-size": 197120, | ||
285 | @@ -XXX,XX +XXX,XX @@ Testing: -blockdev file,node-name=base,filename=TEST_DIR/t.IMGFMT.base -blockdev | ||
286 | { | ||
287 | "iops_rd": 0, | ||
288 | "detect_zeroes": "off", | ||
289 | + "active": true, | ||
290 | "image": { | ||
291 | "virtual-size": 197120, | ||
292 | "filename": "TEST_DIR/t.IMGFMT.mid", | ||
293 | @@ -XXX,XX +XXX,XX @@ Testing: -blockdev file,node-name=base,filename=TEST_DIR/t.IMGFMT.base -blockdev | ||
294 | { | ||
295 | "iops_rd": 0, | ||
296 | "detect_zeroes": "off", | ||
297 | + "active": true, | ||
298 | "image": { | ||
299 | "virtual-size": 197120, | ||
300 | "filename": "TEST_DIR/t.IMGFMT.base", | ||
301 | -- | ||
302 | 2.48.1 | diff view generated by jsdifflib |
1 | Now that vhost_user_blk_connect() is not called from an event handler | 1 | What we wanted to catch with the assertion is cases where the recursion |
---|---|---|---|
2 | any more, but directly from vhost_user_blk_device_realize(), we can | 2 | finds that a child was inactive before its parent. This should never |
3 | actually make use of Error again instead of calling error_report() in | 3 | happen. But if the user tries to inactivate an image that is already |
4 | the inner function and setting a more generic and therefore less useful | 4 | inactive, that's harmless and we don't want to fail the assertion. |
5 | error message in realize() itself. | ||
6 | |||
7 | With Error, the callers are responsible for adding context if necessary | ||
8 | (such as the "-device" option the error refers to). Additional prefixes | ||
9 | are redundant and better omitted. | ||
10 | 5 | ||
11 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 6 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
12 | Acked-by: Raphael Norwitz <raphael.norwitz@nutanix.com> | 7 | Acked-by: Fabiano Rosas <farosas@suse.de> |
13 | Message-Id: <20210429171316.162022-4-kwolf@redhat.com> | 8 | Reviewed-by: Eric Blake <eblake@redhat.com> |
14 | Reviewed-by: Michael S. Tsirkin <mst@redhat.com> | 9 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> |
10 | Message-ID: <20250204211407.381505-3-kwolf@redhat.com> | ||
15 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 11 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
16 | --- | 12 | --- |
17 | hw/block/vhost-user-blk.c | 23 +++++++++++------------ | 13 | block.c | 16 ++++++++++++---- |
18 | 1 file changed, 11 insertions(+), 12 deletions(-) | 14 | 1 file changed, 12 insertions(+), 4 deletions(-) |
19 | 15 | ||
20 | diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c | 16 | diff --git a/block.c b/block.c |
21 | index XXXXXXX..XXXXXXX 100644 | 17 | index XXXXXXX..XXXXXXX 100644 |
22 | --- a/hw/block/vhost-user-blk.c | 18 | --- a/block.c |
23 | +++ b/hw/block/vhost-user-blk.c | 19 | +++ b/block.c |
24 | @@ -XXX,XX +XXX,XX @@ static void vhost_user_blk_reset(VirtIODevice *vdev) | 20 | @@ -XXX,XX +XXX,XX @@ bdrv_has_bds_parent(BlockDriverState *bs, bool only_active) |
25 | vhost_dev_free_inflight(s->inflight); | 21 | return false; |
26 | } | 22 | } |
27 | 23 | ||
28 | -static int vhost_user_blk_connect(DeviceState *dev) | 24 | -static int GRAPH_RDLOCK bdrv_inactivate_recurse(BlockDriverState *bs) |
29 | +static int vhost_user_blk_connect(DeviceState *dev, Error **errp) | 25 | +static int GRAPH_RDLOCK |
26 | +bdrv_inactivate_recurse(BlockDriverState *bs, bool top_level) | ||
30 | { | 27 | { |
31 | VirtIODevice *vdev = VIRTIO_DEVICE(dev); | 28 | BdrvChild *child, *parent; |
32 | VHostUserBlk *s = VHOST_USER_BLK(vdev); | 29 | int ret; |
33 | @@ -XXX,XX +XXX,XX @@ static int vhost_user_blk_connect(DeviceState *dev) | 30 | @@ -XXX,XX +XXX,XX @@ static int GRAPH_RDLOCK bdrv_inactivate_recurse(BlockDriverState *bs) |
34 | 31 | return 0; | |
35 | ret = vhost_dev_init(&s->dev, &s->vhost_user, VHOST_BACKEND_TYPE_USER, 0); | ||
36 | if (ret < 0) { | ||
37 | - error_report("vhost-user-blk: vhost initialization failed: %s", | ||
38 | - strerror(-ret)); | ||
39 | + error_setg_errno(errp, -ret, "vhost initialization failed"); | ||
40 | return ret; | ||
41 | } | 32 | } |
42 | 33 | ||
43 | @@ -XXX,XX +XXX,XX @@ static int vhost_user_blk_connect(DeviceState *dev) | 34 | - assert(!(bs->open_flags & BDRV_O_INACTIVE)); |
44 | if (virtio_device_started(vdev, vdev->status)) { | 35 | + /* |
45 | ret = vhost_user_blk_start(vdev); | 36 | + * Inactivating an already inactive node on user request is harmless, but if |
37 | + * a child is already inactive before its parent, that's bad. | ||
38 | + */ | ||
39 | + if (bs->open_flags & BDRV_O_INACTIVE) { | ||
40 | + assert(top_level); | ||
41 | + return 0; | ||
42 | + } | ||
43 | |||
44 | /* Inactivate this node */ | ||
45 | if (bs->drv->bdrv_inactivate) { | ||
46 | @@ -XXX,XX +XXX,XX @@ static int GRAPH_RDLOCK bdrv_inactivate_recurse(BlockDriverState *bs) | ||
47 | |||
48 | /* Recursively inactivate children */ | ||
49 | QLIST_FOREACH(child, &bs->children, next) { | ||
50 | - ret = bdrv_inactivate_recurse(child->bs); | ||
51 | + ret = bdrv_inactivate_recurse(child->bs, false); | ||
46 | if (ret < 0) { | 52 | if (ret < 0) { |
47 | - error_report("vhost-user-blk: vhost start failed: %s", | ||
48 | - strerror(-ret)); | ||
49 | + error_setg_errno(errp, -ret, "vhost start failed"); | ||
50 | return ret; | 53 | return ret; |
51 | } | 54 | } |
52 | } | 55 | @@ -XXX,XX +XXX,XX @@ int bdrv_inactivate_all(void) |
53 | @@ -XXX,XX +XXX,XX @@ static void vhost_user_blk_event(void *opaque, QEMUChrEvent event) | 56 | if (bdrv_has_bds_parent(bs, false)) { |
54 | DeviceState *dev = opaque; | 57 | continue; |
55 | VirtIODevice *vdev = VIRTIO_DEVICE(dev); | ||
56 | VHostUserBlk *s = VHOST_USER_BLK(vdev); | ||
57 | + Error *local_err = NULL; | ||
58 | |||
59 | switch (event) { | ||
60 | case CHR_EVENT_OPENED: | ||
61 | - if (vhost_user_blk_connect(dev) < 0) { | ||
62 | + if (vhost_user_blk_connect(dev, &local_err) < 0) { | ||
63 | + error_report_err(local_err); | ||
64 | qemu_chr_fe_disconnect(&s->chardev); | ||
65 | return; | ||
66 | } | 58 | } |
67 | @@ -XXX,XX +XXX,XX @@ static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp) | 59 | - ret = bdrv_inactivate_recurse(bs); |
68 | int i, ret; | 60 | + ret = bdrv_inactivate_recurse(bs, true); |
69 | 61 | if (ret < 0) { | |
70 | if (!s->chardev.chr) { | 62 | bdrv_next_cleanup(&it); |
71 | - error_setg(errp, "vhost-user-blk: chardev is mandatory"); | 63 | break; |
72 | + error_setg(errp, "chardev is mandatory"); | ||
73 | return; | ||
74 | } | ||
75 | |||
76 | @@ -XXX,XX +XXX,XX @@ static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp) | ||
77 | s->num_queues = 1; | ||
78 | } | ||
79 | if (!s->num_queues || s->num_queues > VIRTIO_QUEUE_MAX) { | ||
80 | - error_setg(errp, "vhost-user-blk: invalid number of IO queues"); | ||
81 | + error_setg(errp, "invalid number of IO queues"); | ||
82 | return; | ||
83 | } | ||
84 | |||
85 | if (!s->queue_size) { | ||
86 | - error_setg(errp, "vhost-user-blk: queue size must be non-zero"); | ||
87 | + error_setg(errp, "queue size must be non-zero"); | ||
88 | return; | ||
89 | } | ||
90 | if (s->queue_size > VIRTQUEUE_MAX_SIZE) { | ||
91 | - error_setg(errp, "vhost-user-blk: queue size must not exceed %d", | ||
92 | + error_setg(errp, "queue size must not exceed %d", | ||
93 | VIRTQUEUE_MAX_SIZE); | ||
94 | return; | ||
95 | } | ||
96 | @@ -XXX,XX +XXX,XX @@ static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp) | ||
97 | goto virtio_err; | ||
98 | } | ||
99 | |||
100 | - if (vhost_user_blk_connect(dev) < 0) { | ||
101 | - error_setg(errp, "vhost-user-blk: could not connect"); | ||
102 | + if (vhost_user_blk_connect(dev, errp) < 0) { | ||
103 | qemu_chr_fe_disconnect(&s->chardev); | ||
104 | goto virtio_err; | ||
105 | } | ||
106 | -- | 64 | -- |
107 | 2.30.2 | 65 | 2.48.1 |
108 | |||
109 | diff view generated by jsdifflib |
1 | Commit 2943b53f6 (' virtio: force VIRTIO_F_IOMMU_PLATFORM') made sure | 1 | Putting an active block node on top of an inactive one is strictly |
---|---|---|---|
2 | that vhost can't just reject VIRTIO_F_IOMMU_PLATFORM when it was | 2 | speaking an invalid configuration and the next patch will turn it into a |
3 | requested. However, just adding it back to the negotiated flags isn't | 3 | hard error. |
4 | right either because it promises support to the guest that the device | ||
5 | actually doesn't support. One example of a vhost-user device that | ||
6 | doesn't have support for the flag is the vhost-user-blk export of QEMU. | ||
7 | 4 | ||
8 | Instead of successfully creating a device that doesn't work, just fail | 5 | However, taking a snapshot while disk images are inactive after |
9 | to plug the device when it doesn't support the feature, but it was | 6 | completing migration has an important use case: After migrating to a |
10 | requested. This results in much clearer error messages. | 7 | file, taking an external snapshot is what is needed to take a full VM |
8 | snapshot. | ||
11 | 9 | ||
12 | Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1935019 | 10 | In order for this to keep working after the later patches, change |
11 | creating a snapshot such that it automatically inactivates an overlay | ||
12 | that is added on top of an already inactive node. | ||
13 | |||
13 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 14 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
14 | Reviewed-by: Raphael Norwitz <raphael.norwitz@nutanix.com> | 15 | Acked-by: Fabiano Rosas <farosas@suse.de> |
15 | Message-Id: <20210429171316.162022-6-kwolf@redhat.com> | 16 | Reviewed-by: Eric Blake <eblake@redhat.com> |
16 | Reviewed-by: Michael S. Tsirkin <mst@redhat.com> | 17 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> |
18 | Message-ID: <20250204211407.381505-4-kwolf@redhat.com> | ||
17 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 19 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
18 | --- | 20 | --- |
19 | hw/virtio/virtio-bus.c | 5 +++++ | 21 | blockdev.c | 16 ++++++++++++++++ |
20 | 1 file changed, 5 insertions(+) | 22 | 1 file changed, 16 insertions(+) |
21 | 23 | ||
22 | diff --git a/hw/virtio/virtio-bus.c b/hw/virtio/virtio-bus.c | 24 | diff --git a/blockdev.c b/blockdev.c |
23 | index XXXXXXX..XXXXXXX 100644 | 25 | index XXXXXXX..XXXXXXX 100644 |
24 | --- a/hw/virtio/virtio-bus.c | 26 | --- a/blockdev.c |
25 | +++ b/hw/virtio/virtio-bus.c | 27 | +++ b/blockdev.c |
26 | @@ -XXX,XX +XXX,XX @@ void virtio_bus_device_plugged(VirtIODevice *vdev, Error **errp) | 28 | @@ -XXX,XX +XXX,XX @@ static void external_snapshot_action(TransactionAction *action, |
27 | return; | 29 | return; |
28 | } | 30 | } |
29 | 31 | ||
30 | + if (has_iommu && !virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) { | 32 | + /* |
31 | + error_setg(errp, "iommu_platform=true is not supported by the device"); | 33 | + * Older QEMU versions have allowed adding an active parent node to an |
32 | + return; | 34 | + * inactive child node. This is unsafe in the general case, but there is an |
35 | + * important use case, which is taking a VM snapshot with migration to file | ||
36 | + * and then adding an external snapshot while the VM is still stopped and | ||
37 | + * images are inactive. Requiring the user to explicitly create the overlay | ||
38 | + * as inactive would break compatibility, so just do it automatically here | ||
39 | + * to keep this working. | ||
40 | + */ | ||
41 | + if (bdrv_is_inactive(state->old_bs) && !bdrv_is_inactive(state->new_bs)) { | ||
42 | + ret = bdrv_inactivate(state->new_bs, errp); | ||
43 | + if (ret < 0) { | ||
44 | + return; | ||
45 | + } | ||
33 | + } | 46 | + } |
34 | + | 47 | + |
35 | if (klass->device_plugged != NULL) { | 48 | ret = bdrv_append(state->new_bs, state->old_bs, errp); |
36 | klass->device_plugged(qbus->parent, &local_err); | 49 | if (ret < 0) { |
37 | } | 50 | return; |
38 | -- | 51 | -- |
39 | 2.30.2 | 52 | 2.48.1 |
40 | |||
41 | diff view generated by jsdifflib |
1 | This is a partial revert of commits 77542d43149 and bc79c87bcde. | 1 | Block devices have an individual active state, a single global flag |
---|---|---|---|
2 | can't cover this correctly. This becomes more important as we allow | ||
3 | users to manually manage which nodes are active or inactive. | ||
2 | 4 | ||
3 | Usually, an error during initialisation means that the configuration was | 5 | Now that it's allowed to call bdrv_inactivate_all() even when some |
4 | wrong. Reconnecting won't make the error go away, but just turn the | 6 | nodes are already inactive, we can remove the flag and just |
5 | error condition into an endless loop. Avoid this and return errors | 7 | unconditionally call bdrv_inactivate_all() and, more importantly, |
6 | again. | 8 | bdrv_activate_all() before we make use of the nodes. |
7 | |||
8 | Additionally, calling vhost_user_blk_disconnect() from the chardev event | ||
9 | handler could result in use-after-free because none of the | ||
10 | initialisation code expects that the device could just go away in the | ||
11 | middle. So removing the call fixes crashes in several places. | ||
12 | |||
13 | For example, using a num-queues setting that is incompatible with the | ||
14 | backend would result in a crash like this (dereferencing dev->opaque, | ||
15 | which is already NULL): | ||
16 | |||
17 | #0 0x0000555555d0a4bd in vhost_user_read_cb (source=0x5555568f4690, condition=(G_IO_IN | G_IO_HUP), opaque=0x7fffffffcbf0) at ../hw/virtio/vhost-user.c:313 | ||
18 | #1 0x0000555555d950d3 in qio_channel_fd_source_dispatch (source=0x555557c3f750, callback=0x555555d0a478 <vhost_user_read_cb>, user_data=0x7fffffffcbf0) at ../io/channel-watch.c:84 | ||
19 | #2 0x00007ffff7b32a9f in g_main_context_dispatch () at /lib64/libglib-2.0.so.0 | ||
20 | #3 0x00007ffff7b84a98 in g_main_context_iterate.constprop () at /lib64/libglib-2.0.so.0 | ||
21 | #4 0x00007ffff7b32163 in g_main_loop_run () at /lib64/libglib-2.0.so.0 | ||
22 | #5 0x0000555555d0a724 in vhost_user_read (dev=0x555557bc62f8, msg=0x7fffffffcc50) at ../hw/virtio/vhost-user.c:402 | ||
23 | #6 0x0000555555d0ee6b in vhost_user_get_config (dev=0x555557bc62f8, config=0x555557bc62ac "", config_len=60) at ../hw/virtio/vhost-user.c:2133 | ||
24 | #7 0x0000555555d56d46 in vhost_dev_get_config (hdev=0x555557bc62f8, config=0x555557bc62ac "", config_len=60) at ../hw/virtio/vhost.c:1566 | ||
25 | #8 0x0000555555cdd150 in vhost_user_blk_device_realize (dev=0x555557bc60b0, errp=0x7fffffffcf90) at ../hw/block/vhost-user-blk.c:510 | ||
26 | #9 0x0000555555d08f6d in virtio_device_realize (dev=0x555557bc60b0, errp=0x7fffffffcff0) at ../hw/virtio/virtio.c:3660 | ||
27 | |||
28 | Note that this removes the ability to reconnect during initialisation | ||
29 | (but not during operation) when there is no permanent error, but the | ||
30 | backend restarts, as the implementation was buggy. This feature can be | ||
31 | added back in a follow-up series after changing error paths to | ||
32 | distinguish cases where retrying could help from cases with permanent | ||
33 | errors. | ||
34 | 9 | ||
35 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 10 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
36 | Message-Id: <20210429171316.162022-3-kwolf@redhat.com> | 11 | Acked-by: Fabiano Rosas <farosas@suse.de> |
37 | Reviewed-by: Michael S. Tsirkin <mst@redhat.com> | 12 | Reviewed-by: Eric Blake <eblake@redhat.com> |
13 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
14 | Message-ID: <20250204211407.381505-5-kwolf@redhat.com> | ||
38 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 15 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
39 | --- | 16 | --- |
40 | hw/block/vhost-user-blk.c | 59 +++++++++++---------------------------- | 17 | migration/migration.h | 3 --- |
41 | 1 file changed, 17 insertions(+), 42 deletions(-) | 18 | migration/block-active.c | 46 ---------------------------------------- |
19 | migration/migration.c | 8 ------- | ||
20 | 3 files changed, 57 deletions(-) | ||
42 | 21 | ||
43 | diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c | 22 | diff --git a/migration/migration.h b/migration/migration.h |
44 | index XXXXXXX..XXXXXXX 100644 | 23 | index XXXXXXX..XXXXXXX 100644 |
45 | --- a/hw/block/vhost-user-blk.c | 24 | --- a/migration/migration.h |
46 | +++ b/hw/block/vhost-user-blk.c | 25 | +++ b/migration/migration.h |
47 | @@ -XXX,XX +XXX,XX @@ static const int user_feature_bits[] = { | 26 | @@ -XXX,XX +XXX,XX @@ void migration_bitmap_sync_precopy(bool last_stage); |
48 | VHOST_INVALID_FEATURE_BIT | 27 | void dirty_bitmap_mig_init(void); |
49 | }; | 28 | bool should_send_vmdesc(void); |
50 | 29 | ||
51 | +static void vhost_user_blk_event(void *opaque, QEMUChrEvent event); | 30 | -/* migration/block-active.c */ |
52 | + | 31 | -void migration_block_active_setup(bool active); |
53 | static void vhost_user_blk_update_config(VirtIODevice *vdev, uint8_t *config) | ||
54 | { | ||
55 | VHostUserBlk *s = VHOST_USER_BLK(vdev); | ||
56 | @@ -XXX,XX +XXX,XX @@ static void vhost_user_blk_disconnect(DeviceState *dev) | ||
57 | vhost_dev_cleanup(&s->dev); | ||
58 | } | ||
59 | |||
60 | -static void vhost_user_blk_event(void *opaque, QEMUChrEvent event, | ||
61 | - bool realized); | ||
62 | - | 32 | - |
63 | -static void vhost_user_blk_event_realize(void *opaque, QEMUChrEvent event) | 33 | #endif |
34 | diff --git a/migration/block-active.c b/migration/block-active.c | ||
35 | index XXXXXXX..XXXXXXX 100644 | ||
36 | --- a/migration/block-active.c | ||
37 | +++ b/migration/block-active.c | ||
38 | @@ -XXX,XX +XXX,XX @@ | ||
39 | #include "qemu/error-report.h" | ||
40 | #include "trace.h" | ||
41 | |||
42 | -/* | ||
43 | - * Migration-only cache to remember the block layer activation status. | ||
44 | - * Protected by BQL. | ||
45 | - * | ||
46 | - * We need this because.. | ||
47 | - * | ||
48 | - * - Migration can fail after block devices are invalidated (during | ||
49 | - * switchover phase). When that happens, we need to be able to recover | ||
50 | - * the block drive status by re-activating them. | ||
51 | - * | ||
52 | - * - Currently bdrv_inactivate_all() is not safe to be invoked on top of | ||
53 | - * invalidated drives (even if bdrv_activate_all() is actually safe to be | ||
54 | - * called any time!). It means remembering this could help migration to | ||
55 | - * make sure it won't invalidate twice in a row, crashing QEMU. It can | ||
56 | - * happen when we migrate a PAUSED VM from host1 to host2, then migrate | ||
57 | - * again to host3 without starting it. TODO: a cleaner solution is to | ||
58 | - * allow safe invoke of bdrv_inactivate_all() at anytime, like | ||
59 | - * bdrv_activate_all(). | ||
60 | - * | ||
61 | - * For freshly started QEMU, the flag is initialized to TRUE reflecting the | ||
62 | - * scenario where QEMU owns block device ownerships. | ||
63 | - * | ||
64 | - * For incoming QEMU taking a migration stream, the flag is initialized to | ||
65 | - * FALSE reflecting that the incoming side doesn't own the block devices, | ||
66 | - * not until switchover happens. | ||
67 | - */ | ||
68 | -static bool migration_block_active; | ||
69 | - | ||
70 | -/* Setup the disk activation status */ | ||
71 | -void migration_block_active_setup(bool active) | ||
64 | -{ | 72 | -{ |
65 | - vhost_user_blk_event(opaque, event, false); | 73 | - migration_block_active = active; |
66 | -} | 74 | -} |
67 | - | 75 | - |
68 | -static void vhost_user_blk_event_oper(void *opaque, QEMUChrEvent event) | 76 | bool migration_block_activate(Error **errp) |
69 | -{ | 77 | { |
70 | - vhost_user_blk_event(opaque, event, true); | 78 | ERRP_GUARD(); |
71 | -} | 79 | |
80 | assert(bql_locked()); | ||
81 | |||
82 | - if (migration_block_active) { | ||
83 | - trace_migration_block_activation("active-skipped"); | ||
84 | - return true; | ||
85 | - } | ||
72 | - | 86 | - |
73 | static void vhost_user_blk_chr_closed_bh(void *opaque) | 87 | trace_migration_block_activation("active"); |
74 | { | 88 | |
75 | DeviceState *dev = opaque; | 89 | bdrv_activate_all(errp); |
76 | @@ -XXX,XX +XXX,XX @@ static void vhost_user_blk_chr_closed_bh(void *opaque) | 90 | @@ -XXX,XX +XXX,XX @@ bool migration_block_activate(Error **errp) |
77 | VHostUserBlk *s = VHOST_USER_BLK(vdev); | 91 | return false; |
78 | 92 | } | |
79 | vhost_user_blk_disconnect(dev); | 93 | |
80 | - qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, | 94 | - migration_block_active = true; |
81 | - vhost_user_blk_event_oper, NULL, opaque, NULL, true); | 95 | return true; |
82 | + qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, vhost_user_blk_event, | ||
83 | + NULL, opaque, NULL, true); | ||
84 | } | 96 | } |
85 | 97 | ||
86 | -static void vhost_user_blk_event(void *opaque, QEMUChrEvent event, | 98 | @@ -XXX,XX +XXX,XX @@ bool migration_block_inactivate(void) |
87 | - bool realized) | 99 | |
88 | +static void vhost_user_blk_event(void *opaque, QEMUChrEvent event) | 100 | assert(bql_locked()); |
89 | { | 101 | |
90 | DeviceState *dev = opaque; | 102 | - if (!migration_block_active) { |
91 | VirtIODevice *vdev = VIRTIO_DEVICE(dev); | 103 | - trace_migration_block_activation("inactive-skipped"); |
92 | @@ -XXX,XX +XXX,XX @@ static void vhost_user_blk_event(void *opaque, QEMUChrEvent event, | 104 | - return true; |
93 | } | 105 | - } |
94 | break; | ||
95 | case CHR_EVENT_CLOSED: | ||
96 | - /* | ||
97 | - * Closing the connection should happen differently on device | ||
98 | - * initialization and operation stages. | ||
99 | - * On initalization, we want to re-start vhost_dev initialization | ||
100 | - * from the very beginning right away when the connection is closed, | ||
101 | - * so we clean up vhost_dev on each connection closing. | ||
102 | - * On operation, we want to postpone vhost_dev cleanup to let the | ||
103 | - * other code perform its own cleanup sequence using vhost_dev data | ||
104 | - * (e.g. vhost_dev_set_log). | ||
105 | - */ | ||
106 | - if (realized && !runstate_check(RUN_STATE_SHUTDOWN)) { | ||
107 | + if (!runstate_check(RUN_STATE_SHUTDOWN)) { | ||
108 | /* | ||
109 | * A close event may happen during a read/write, but vhost | ||
110 | * code assumes the vhost_dev remains setup, so delay the | ||
111 | @@ -XXX,XX +XXX,XX @@ static void vhost_user_blk_event(void *opaque, QEMUChrEvent event, | ||
112 | * knowing its type (in this case vhost-user). | ||
113 | */ | ||
114 | s->dev.started = false; | ||
115 | - } else { | ||
116 | - vhost_user_blk_disconnect(dev); | ||
117 | } | ||
118 | break; | ||
119 | case CHR_EVENT_BREAK: | ||
120 | @@ -XXX,XX +XXX,XX @@ static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp) | ||
121 | s->vhost_vqs = g_new0(struct vhost_virtqueue, s->num_queues); | ||
122 | s->connected = false; | ||
123 | |||
124 | - qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, | ||
125 | - vhost_user_blk_event_realize, NULL, (void *)dev, | ||
126 | - NULL, true); | ||
127 | - | 106 | - |
128 | -reconnect: | 107 | trace_migration_block_activation("inactive"); |
129 | if (qemu_chr_fe_wait_connected(&s->chardev, errp) < 0) { | 108 | |
130 | goto virtio_err; | 109 | ret = bdrv_inactivate_all(); |
110 | @@ -XXX,XX +XXX,XX @@ bool migration_block_inactivate(void) | ||
111 | return false; | ||
131 | } | 112 | } |
132 | 113 | ||
133 | - /* check whether vhost_user_blk_connect() failed or not */ | 114 | - migration_block_active = false; |
134 | - if (!s->connected) { | 115 | return true; |
135 | - goto reconnect; | 116 | } |
136 | + if (vhost_user_blk_connect(dev) < 0) { | 117 | diff --git a/migration/migration.c b/migration/migration.c |
137 | + error_setg(errp, "vhost-user-blk: could not connect"); | 118 | index XXXXXXX..XXXXXXX 100644 |
138 | + qemu_chr_fe_disconnect(&s->chardev); | 119 | --- a/migration/migration.c |
139 | + goto virtio_err; | 120 | +++ b/migration/migration.c |
121 | @@ -XXX,XX +XXX,XX @@ void qmp_migrate_incoming(const char *uri, bool has_channels, | ||
122 | return; | ||
140 | } | 123 | } |
141 | + assert(s->connected); | 124 | |
142 | 125 | - /* | |
143 | ret = vhost_dev_get_config(&s->dev, (uint8_t *)&s->blkcfg, | 126 | - * Newly setup incoming QEMU. Mark the block active state to reflect |
144 | sizeof(struct virtio_blk_config)); | 127 | - * that the src currently owns the disks. |
145 | if (ret < 0) { | 128 | - */ |
146 | - error_report("vhost-user-blk: get block config failed"); | 129 | - migration_block_active_setup(false); |
147 | - goto reconnect; | 130 | - |
148 | + error_setg(errp, "vhost-user-blk: get block config failed"); | 131 | once = false; |
149 | + goto vhost_err; | 132 | } |
150 | } | 133 | |
151 | 134 | @@ -XXX,XX +XXX,XX @@ static void migration_instance_init(Object *obj) | |
152 | - /* we're fully initialized, now we can operate, so change the handler */ | 135 | ms->state = MIGRATION_STATUS_NONE; |
153 | + /* we're fully initialized, now we can operate, so add the handler */ | 136 | ms->mbps = -1; |
154 | qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, | 137 | ms->pages_per_second = -1; |
155 | - vhost_user_blk_event_oper, NULL, (void *)dev, | 138 | - /* Freshly started QEMU owns all the block devices */ |
156 | + vhost_user_blk_event, NULL, (void *)dev, | 139 | - migration_block_active_setup(true); |
157 | NULL, true); | 140 | qemu_sem_init(&ms->pause_sem, 0); |
158 | return; | 141 | qemu_mutex_init(&ms->error_mutex); |
159 | 142 | ||
160 | +vhost_err: | ||
161 | + vhost_dev_cleanup(&s->dev); | ||
162 | virtio_err: | ||
163 | g_free(s->vhost_vqs); | ||
164 | s->vhost_vqs = NULL; | ||
165 | -- | 143 | -- |
166 | 2.30.2 | 144 | 2.48.1 |
167 | |||
168 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | An active node makes unrestricted use of its children and would possibly | ||
2 | run into assertion failures when it operates on an inactive child node. | ||
1 | 3 | ||
4 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
5 | Acked-by: Fabiano Rosas <farosas@suse.de> | ||
6 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
7 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
8 | Message-ID: <20250204211407.381505-6-kwolf@redhat.com> | ||
9 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
10 | --- | ||
11 | block.c | 5 +++++ | ||
12 | 1 file changed, 5 insertions(+) | ||
13 | |||
14 | diff --git a/block.c b/block.c | ||
15 | index XXXXXXX..XXXXXXX 100644 | ||
16 | --- a/block.c | ||
17 | +++ b/block.c | ||
18 | @@ -XXX,XX +XXX,XX @@ bdrv_attach_child_noperm(BlockDriverState *parent_bs, | ||
19 | child_bs->node_name, child_name, parent_bs->node_name); | ||
20 | return NULL; | ||
21 | } | ||
22 | + if (bdrv_is_inactive(child_bs) && !bdrv_is_inactive(parent_bs)) { | ||
23 | + error_setg(errp, "Inactive '%s' can't be a %s child of active '%s'", | ||
24 | + child_bs->node_name, child_name, parent_bs->node_name); | ||
25 | + return NULL; | ||
26 | + } | ||
27 | |||
28 | bdrv_get_cumulative_perm(parent_bs, &perm, &shared_perm); | ||
29 | bdrv_child_perm(parent_bs, child_bs, NULL, child_role, NULL, | ||
30 | -- | ||
31 | 2.48.1 | diff view generated by jsdifflib |
1 | Like other error paths, this one needs to call tran_finalize() and clean | 1 | In order for block_resize to fail gracefully on an inactive node instead |
---|---|---|---|
2 | up the BlockReopenQueue, too. | 2 | of crashing with an assertion failure in bdrv_co_write_req_prepare() |
3 | (called from bdrv_co_truncate()), we need to check for inactive nodes | ||
4 | also when they are attached as a root node and make sure that | ||
5 | BLK_PERM_RESIZE isn't among the permissions allowed for inactive nodes. | ||
6 | To this effect, don't enumerate the permissions that are incompatible | ||
7 | with inactive nodes any more, but allow only BLK_PERM_CONSISTENT_READ | ||
8 | for them. | ||
3 | 9 | ||
4 | Fixes: CID 1452772 | ||
5 | Fixes: 72373e40fbc7e4218061a8211384db362d3e7348 | ||
6 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 10 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
7 | Message-Id: <20210503110555.24001-3-kwolf@redhat.com> | 11 | Acked-by: Fabiano Rosas <farosas@suse.de> |
8 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 12 | Reviewed-by: Eric Blake <eblake@redhat.com> |
13 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
14 | Message-ID: <20250204211407.381505-7-kwolf@redhat.com> | ||
9 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 15 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
10 | --- | 16 | --- |
11 | block.c | 2 +- | 17 | block.c | 7 +++++++ |
12 | 1 file changed, 1 insertion(+), 1 deletion(-) | 18 | block/block-backend.c | 2 +- |
19 | 2 files changed, 8 insertions(+), 1 deletion(-) | ||
13 | 20 | ||
14 | diff --git a/block.c b/block.c | 21 | diff --git a/block.c b/block.c |
15 | index XXXXXXX..XXXXXXX 100644 | 22 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/block.c | 23 | --- a/block.c |
17 | +++ b/block.c | 24 | +++ b/block.c |
18 | @@ -XXX,XX +XXX,XX @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp) | 25 | @@ -XXX,XX +XXX,XX @@ bdrv_attach_child_common(BlockDriverState *child_bs, |
19 | ret = bdrv_flush(bs_entry->state.bs); | 26 | assert(child_class->get_parent_desc); |
20 | if (ret < 0) { | 27 | GLOBAL_STATE_CODE(); |
21 | error_setg_errno(errp, -ret, "Error flushing drive"); | 28 | |
22 | - goto cleanup; | 29 | + if (bdrv_is_inactive(child_bs) && (perm & ~BLK_PERM_CONSISTENT_READ)) { |
23 | + goto abort; | 30 | + g_autofree char *perm_names = bdrv_perm_names(perm); |
24 | } | 31 | + error_setg(errp, "Permission '%s' unavailable on inactive node", |
32 | + perm_names); | ||
33 | + return NULL; | ||
34 | + } | ||
35 | + | ||
36 | new_child = g_new(BdrvChild, 1); | ||
37 | *new_child = (BdrvChild) { | ||
38 | .bs = NULL, | ||
39 | diff --git a/block/block-backend.c b/block/block-backend.c | ||
40 | index XXXXXXX..XXXXXXX 100644 | ||
41 | --- a/block/block-backend.c | ||
42 | +++ b/block/block-backend.c | ||
43 | @@ -XXX,XX +XXX,XX @@ static bool blk_can_inactivate(BlockBackend *blk) | ||
44 | * guest. For block job BBs that satisfy this, we can just allow | ||
45 | * it. This is the case for mirror job source, which is required | ||
46 | * by libvirt non-shared block migration. */ | ||
47 | - if (!(blk->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED))) { | ||
48 | + if (!(blk->perm & ~BLK_PERM_CONSISTENT_READ)) { | ||
49 | return true; | ||
25 | } | 50 | } |
26 | 51 | ||
27 | -- | 52 | -- |
28 | 2.30.2 | 53 | 2.48.1 |
29 | |||
30 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | In QEMU, nodes are automatically created inactive while expecting an | ||
2 | incoming migration (i.e. RUN_STATE_INMIGRATE). In qemu-storage-daemon, | ||
3 | the notion of runstates doesn't exist. It also wouldn't necessarily make | ||
4 | sense to introduce it because a single daemon can serve multiple VMs | ||
5 | that can be in different states. | ||
1 | 6 | ||
7 | Therefore, allow the user to explicitly open images as inactive with a | ||
8 | new option. The default is as before: Nodes are usually active, except | ||
9 | when created during RUN_STATE_INMIGRATE. | ||
10 | |||
11 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
12 | Acked-by: Fabiano Rosas <farosas@suse.de> | ||
13 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
14 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
15 | Message-ID: <20250204211407.381505-8-kwolf@redhat.com> | ||
16 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
17 | --- | ||
18 | qapi/block-core.json | 6 ++++++ | ||
19 | include/block/block-common.h | 1 + | ||
20 | block.c | 9 +++++++++ | ||
21 | 3 files changed, 16 insertions(+) | ||
22 | |||
23 | diff --git a/qapi/block-core.json b/qapi/block-core.json | ||
24 | index XXXXXXX..XXXXXXX 100644 | ||
25 | --- a/qapi/block-core.json | ||
26 | +++ b/qapi/block-core.json | ||
27 | @@ -XXX,XX +XXX,XX @@ | ||
28 | # | ||
29 | # @cache: cache-related options | ||
30 | # | ||
31 | +# @active: whether the block node should be activated (default: true). | ||
32 | +# Having inactive block nodes is useful primarily for migration because it | ||
33 | +# allows opening an image on the destination while the source is still | ||
34 | +# holding locks for it. (Since 10.0) | ||
35 | +# | ||
36 | # @read-only: whether the block device should be read-only (default: | ||
37 | # false). Note that some block drivers support only read-only | ||
38 | # access, either generally or in certain configurations. In this | ||
39 | @@ -XXX,XX +XXX,XX @@ | ||
40 | '*node-name': 'str', | ||
41 | '*discard': 'BlockdevDiscardOptions', | ||
42 | '*cache': 'BlockdevCacheOptions', | ||
43 | + '*active': 'bool', | ||
44 | '*read-only': 'bool', | ||
45 | '*auto-read-only': 'bool', | ||
46 | '*force-share': 'bool', | ||
47 | diff --git a/include/block/block-common.h b/include/block/block-common.h | ||
48 | index XXXXXXX..XXXXXXX 100644 | ||
49 | --- a/include/block/block-common.h | ||
50 | +++ b/include/block/block-common.h | ||
51 | @@ -XXX,XX +XXX,XX @@ typedef enum { | ||
52 | #define BDRV_OPT_AUTO_READ_ONLY "auto-read-only" | ||
53 | #define BDRV_OPT_DISCARD "discard" | ||
54 | #define BDRV_OPT_FORCE_SHARE "force-share" | ||
55 | +#define BDRV_OPT_ACTIVE "active" | ||
56 | |||
57 | |||
58 | #define BDRV_SECTOR_BITS 9 | ||
59 | diff --git a/block.c b/block.c | ||
60 | index XXXXXXX..XXXXXXX 100644 | ||
61 | --- a/block.c | ||
62 | +++ b/block.c | ||
63 | @@ -XXX,XX +XXX,XX @@ static void update_flags_from_options(int *flags, QemuOpts *opts) | ||
64 | if (qemu_opt_get_bool_del(opts, BDRV_OPT_AUTO_READ_ONLY, false)) { | ||
65 | *flags |= BDRV_O_AUTO_RDONLY; | ||
66 | } | ||
67 | + | ||
68 | + if (!qemu_opt_get_bool_del(opts, BDRV_OPT_ACTIVE, true)) { | ||
69 | + *flags |= BDRV_O_INACTIVE; | ||
70 | + } | ||
71 | } | ||
72 | |||
73 | static void update_options_from_flags(QDict *options, int flags) | ||
74 | @@ -XXX,XX +XXX,XX @@ QemuOptsList bdrv_runtime_opts = { | ||
75 | .type = QEMU_OPT_BOOL, | ||
76 | .help = "Ignore flush requests", | ||
77 | }, | ||
78 | + { | ||
79 | + .name = BDRV_OPT_ACTIVE, | ||
80 | + .type = QEMU_OPT_BOOL, | ||
81 | + .help = "Node is activated", | ||
82 | + }, | ||
83 | { | ||
84 | .name = BDRV_OPT_READ_ONLY, | ||
85 | .type = QEMU_OPT_BOOL, | ||
86 | -- | ||
87 | 2.48.1 | diff view generated by jsdifflib |
1 | From: Coiby Xu <coiby.xu@gmail.com> | 1 | The system emulator tries to automatically activate and inactivate block |
---|---|---|---|
2 | nodes at the right point during migration. However, there are still | ||
3 | cases where it's necessary that the user can do this manually. | ||
2 | 4 | ||
3 | This test case has the same tests as tests/virtio-blk-test.c except for | 5 | Images are only activated on the destination VM of a migration when the |
4 | tests have block_resize. Since the vhost-user-blk export only serves one | 6 | VM is actually resumed. If the VM was paused, this doesn't happen |
5 | client one time, two exports are started by qemu-storage-daemon for the | 7 | automatically. The user may want to perform some operation on a block |
6 | hotplug test. | 8 | device (e.g. taking a snapshot or starting a block job) without also |
9 | resuming the VM yet. This is an example where a manual command is | ||
10 | necessary. | ||
7 | 11 | ||
8 | Suggested-by: Thomas Huth <thuth@redhat.com> | 12 | Another example is VM migration when the image files are opened by an |
9 | Signed-off-by: Coiby Xu <coiby.xu@gmail.com> | 13 | external qemu-storage-daemon instance on each side. In this case, the |
10 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 14 | process that needs to hand over the images isn't even part of the |
11 | Message-Id: <20210309094106.196911-3-stefanha@redhat.com> | 15 | migration and can't know when the migration completes. Management tools |
16 | need a way to explicitly inactivate images on the source and activate | ||
17 | them on the destination. | ||
18 | |||
19 | This adds a new blockdev-set-active QMP command that lets the user | ||
20 | change the status of individual nodes (this is necessary in | ||
21 | qemu-storage-daemon because it could be serving multiple VMs and only | ||
22 | one of them migrates at a time). For convenience, operating on all | ||
23 | devices (like QEMU does automatically during migration) is offered as an | ||
24 | option, too, and can be used in the context of single VM. | ||
25 | |||
12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 26 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
13 | Message-Id: <20210322092327.150720-2-stefanha@redhat.com> | 27 | Acked-by: Fabiano Rosas <farosas@suse.de> |
28 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
29 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
30 | Message-ID: <20250204211407.381505-9-kwolf@redhat.com> | ||
14 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 31 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
15 | --- | 32 | --- |
16 | tests/qtest/libqos/vhost-user-blk.h | 48 ++ | 33 | qapi/block-core.json | 32 ++++++++++++++++++++++++++++++ |
17 | tests/qtest/libqos/vhost-user-blk.c | 130 +++++ | 34 | include/block/block-global-state.h | 3 +++ |
18 | tests/qtest/vhost-user-blk-test.c | 794 ++++++++++++++++++++++++++++ | 35 | block.c | 21 ++++++++++++++++++++ |
19 | MAINTAINERS | 2 + | 36 | blockdev.c | 32 ++++++++++++++++++++++++++++++ |
20 | tests/qtest/libqos/meson.build | 1 + | 37 | 4 files changed, 88 insertions(+) |
21 | tests/qtest/meson.build | 4 + | ||
22 | 6 files changed, 979 insertions(+) | ||
23 | create mode 100644 tests/qtest/libqos/vhost-user-blk.h | ||
24 | create mode 100644 tests/qtest/libqos/vhost-user-blk.c | ||
25 | create mode 100644 tests/qtest/vhost-user-blk-test.c | ||
26 | 38 | ||
27 | diff --git a/tests/qtest/libqos/vhost-user-blk.h b/tests/qtest/libqos/vhost-user-blk.h | 39 | diff --git a/qapi/block-core.json b/qapi/block-core.json |
28 | new file mode 100644 | 40 | index XXXXXXX..XXXXXXX 100644 |
29 | index XXXXXXX..XXXXXXX | 41 | --- a/qapi/block-core.json |
30 | --- /dev/null | 42 | +++ b/qapi/block-core.json |
31 | +++ b/tests/qtest/libqos/vhost-user-blk.h | ||
32 | @@ -XXX,XX +XXX,XX @@ | 43 | @@ -XXX,XX +XXX,XX @@ |
33 | +/* | 44 | { 'command': 'blockdev-del', 'data': { 'node-name': 'str' }, |
34 | + * libqos driver framework | 45 | 'allow-preconfig': true } |
35 | + * | 46 | |
36 | + * Based on tests/qtest/libqos/virtio-blk.c | 47 | +## |
37 | + * | 48 | +# @blockdev-set-active: |
38 | + * Copyright (c) 2020 Coiby Xu <coiby.xu@gmail.com> | 49 | +# |
39 | + * | 50 | +# Activate or inactivate a block device. Use this to manage the handover of |
40 | + * Copyright (c) 2018 Emanuele Giuseppe Esposito <e.emanuelegiuseppe@gmail.com> | 51 | +# block devices on migration with qemu-storage-daemon. |
41 | + * | 52 | +# |
42 | + * This library is free software; you can redistribute it and/or | 53 | +# Activating a node automatically activates all of its child nodes first. |
43 | + * modify it under the terms of the GNU Lesser General Public | 54 | +# Inactivating a node automatically inactivates any of its child nodes that are |
44 | + * License version 2 as published by the Free Software Foundation. | 55 | +# not in use by a still active node. |
45 | + * | 56 | +# |
46 | + * This library is distributed in the hope that it will be useful, | 57 | +# @node-name: Name of the graph node to activate or inactivate. By default, all |
47 | + * but WITHOUT ANY WARRANTY; without even the implied warranty of | 58 | +# nodes are affected by the operation. |
48 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | 59 | +# |
49 | + * Lesser General Public License for more details. | 60 | +# @active: true if the nodes should be active when the command returns success, |
50 | + * | 61 | +# false if they should be inactive. |
51 | + * You should have received a copy of the GNU Lesser General Public | 62 | +# |
52 | + * License along with this library; if not, see <http://www.gnu.org/licenses/> | 63 | +# Since: 10.0 |
53 | + */ | 64 | +# |
65 | +# .. qmp-example:: | ||
66 | +# | ||
67 | +# -> { "execute": "blockdev-set-active", | ||
68 | +# "arguments": { | ||
69 | +# "node-name": "node0", | ||
70 | +# "active": false | ||
71 | +# } | ||
72 | +# } | ||
73 | +# <- { "return": {} } | ||
74 | +## | ||
75 | +{ 'command': 'blockdev-set-active', | ||
76 | + 'data': { '*node-name': 'str', 'active': 'bool' }, | ||
77 | + 'allow-preconfig': true } | ||
54 | + | 78 | + |
55 | +#ifndef TESTS_LIBQOS_VHOST_USER_BLK_H | 79 | ## |
56 | +#define TESTS_LIBQOS_VHOST_USER_BLK_H | 80 | # @BlockdevCreateOptionsFile: |
81 | # | ||
82 | diff --git a/include/block/block-global-state.h b/include/block/block-global-state.h | ||
83 | index XXXXXXX..XXXXXXX 100644 | ||
84 | --- a/include/block/block-global-state.h | ||
85 | +++ b/include/block/block-global-state.h | ||
86 | @@ -XXX,XX +XXX,XX @@ bdrv_activate(BlockDriverState *bs, Error **errp); | ||
87 | int coroutine_fn no_co_wrapper_bdrv_rdlock | ||
88 | bdrv_co_activate(BlockDriverState *bs, Error **errp); | ||
89 | |||
90 | +int no_coroutine_fn | ||
91 | +bdrv_inactivate(BlockDriverState *bs, Error **errp); | ||
57 | + | 92 | + |
58 | +#include "qgraph.h" | 93 | void bdrv_activate_all(Error **errp); |
59 | +#include "virtio.h" | 94 | int bdrv_inactivate_all(void); |
60 | +#include "virtio-pci.h" | 95 | |
96 | diff --git a/block.c b/block.c | ||
97 | index XXXXXXX..XXXXXXX 100644 | ||
98 | --- a/block.c | ||
99 | +++ b/block.c | ||
100 | @@ -XXX,XX +XXX,XX @@ bdrv_inactivate_recurse(BlockDriverState *bs, bool top_level) | ||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | +int bdrv_inactivate(BlockDriverState *bs, Error **errp) | ||
105 | +{ | ||
106 | + int ret; | ||
61 | + | 107 | + |
62 | +typedef struct QVhostUserBlk QVhostUserBlk; | 108 | + GLOBAL_STATE_CODE(); |
63 | +typedef struct QVhostUserBlkPCI QVhostUserBlkPCI; | 109 | + GRAPH_RDLOCK_GUARD_MAINLOOP(); |
64 | +typedef struct QVhostUserBlkDevice QVhostUserBlkDevice; | ||
65 | + | 110 | + |
66 | +struct QVhostUserBlk { | 111 | + if (bdrv_has_bds_parent(bs, true)) { |
67 | + QVirtioDevice *vdev; | 112 | + error_setg(errp, "Node has active parent node"); |
68 | +}; | 113 | + return -EPERM; |
69 | + | ||
70 | +struct QVhostUserBlkPCI { | ||
71 | + QVirtioPCIDevice pci_vdev; | ||
72 | + QVhostUserBlk blk; | ||
73 | +}; | ||
74 | + | ||
75 | +struct QVhostUserBlkDevice { | ||
76 | + QOSGraphObject obj; | ||
77 | + QVhostUserBlk blk; | ||
78 | +}; | ||
79 | + | ||
80 | +#endif | ||
81 | diff --git a/tests/qtest/libqos/vhost-user-blk.c b/tests/qtest/libqos/vhost-user-blk.c | ||
82 | new file mode 100644 | ||
83 | index XXXXXXX..XXXXXXX | ||
84 | --- /dev/null | ||
85 | +++ b/tests/qtest/libqos/vhost-user-blk.c | ||
86 | @@ -XXX,XX +XXX,XX @@ | ||
87 | +/* | ||
88 | + * libqos driver framework | ||
89 | + * | ||
90 | + * Based on tests/qtest/libqos/virtio-blk.c | ||
91 | + * | ||
92 | + * Copyright (c) 2020 Coiby Xu <coiby.xu@gmail.com> | ||
93 | + * | ||
94 | + * Copyright (c) 2018 Emanuele Giuseppe Esposito <e.emanuelegiuseppe@gmail.com> | ||
95 | + * | ||
96 | + * This library is free software; you can redistribute it and/or | ||
97 | + * modify it under the terms of the GNU Lesser General Public | ||
98 | + * License version 2.1 as published by the Free Software Foundation. | ||
99 | + * | ||
100 | + * This library is distributed in the hope that it will be useful, | ||
101 | + * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
102 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
103 | + * Lesser General Public License for more details. | ||
104 | + * | ||
105 | + * You should have received a copy of the GNU Lesser General Public | ||
106 | + * License along with this library; if not, see <http://www.gnu.org/licenses/> | ||
107 | + */ | ||
108 | + | ||
109 | +#include "qemu/osdep.h" | ||
110 | +#include "libqtest.h" | ||
111 | +#include "qemu/module.h" | ||
112 | +#include "standard-headers/linux/virtio_blk.h" | ||
113 | +#include "vhost-user-blk.h" | ||
114 | + | ||
115 | +#define PCI_SLOT 0x04 | ||
116 | +#define PCI_FN 0x00 | ||
117 | + | ||
118 | +/* virtio-blk-device */ | ||
119 | +static void *qvhost_user_blk_get_driver(QVhostUserBlk *v_blk, | ||
120 | + const char *interface) | ||
121 | +{ | ||
122 | + if (!g_strcmp0(interface, "vhost-user-blk")) { | ||
123 | + return v_blk; | ||
124 | + } | ||
125 | + if (!g_strcmp0(interface, "virtio")) { | ||
126 | + return v_blk->vdev; | ||
127 | + } | 114 | + } |
128 | + | 115 | + |
129 | + fprintf(stderr, "%s not present in vhost-user-blk-device\n", interface); | 116 | + ret = bdrv_inactivate_recurse(bs, true); |
130 | + g_assert_not_reached(); | 117 | + if (ret < 0) { |
118 | + error_setg_errno(errp, -ret, "Failed to inactivate node"); | ||
119 | + return ret; | ||
120 | + } | ||
121 | + | ||
122 | + return 0; | ||
131 | +} | 123 | +} |
132 | + | 124 | + |
133 | +static void *qvhost_user_blk_device_get_driver(void *object, | 125 | int bdrv_inactivate_all(void) |
134 | + const char *interface) | 126 | { |
127 | BlockDriverState *bs = NULL; | ||
128 | diff --git a/blockdev.c b/blockdev.c | ||
129 | index XXXXXXX..XXXXXXX 100644 | ||
130 | --- a/blockdev.c | ||
131 | +++ b/blockdev.c | ||
132 | @@ -XXX,XX +XXX,XX @@ void qmp_blockdev_del(const char *node_name, Error **errp) | ||
133 | bdrv_unref(bs); | ||
134 | } | ||
135 | |||
136 | +void qmp_blockdev_set_active(const char *node_name, bool active, Error **errp) | ||
135 | +{ | 137 | +{ |
136 | + QVhostUserBlkDevice *v_blk = object; | 138 | + int ret; |
137 | + return qvhost_user_blk_get_driver(&v_blk->blk, interface); | ||
138 | +} | ||
139 | + | 139 | + |
140 | +static void *vhost_user_blk_device_create(void *virtio_dev, | 140 | + GLOBAL_STATE_CODE(); |
141 | + QGuestAllocator *t_alloc, | 141 | + GRAPH_RDLOCK_GUARD_MAINLOOP(); |
142 | + void *addr) | ||
143 | +{ | ||
144 | + QVhostUserBlkDevice *vhost_user_blk = g_new0(QVhostUserBlkDevice, 1); | ||
145 | + QVhostUserBlk *interface = &vhost_user_blk->blk; | ||
146 | + | 142 | + |
147 | + interface->vdev = virtio_dev; | 143 | + if (!node_name) { |
144 | + if (active) { | ||
145 | + bdrv_activate_all(errp); | ||
146 | + } else { | ||
147 | + ret = bdrv_inactivate_all(); | ||
148 | + if (ret < 0) { | ||
149 | + error_setg_errno(errp, -ret, "Failed to inactivate all nodes"); | ||
150 | + } | ||
151 | + } | ||
152 | + } else { | ||
153 | + BlockDriverState *bs = bdrv_find_node(node_name); | ||
154 | + if (!bs) { | ||
155 | + error_setg(errp, "Failed to find node with node-name='%s'", | ||
156 | + node_name); | ||
157 | + return; | ||
158 | + } | ||
148 | + | 159 | + |
149 | + vhost_user_blk->obj.get_driver = qvhost_user_blk_device_get_driver; | 160 | + if (active) { |
150 | + | 161 | + bdrv_activate(bs, errp); |
151 | + return &vhost_user_blk->obj; | 162 | + } else { |
152 | +} | 163 | + bdrv_inactivate(bs, errp); |
153 | + | 164 | + } |
154 | +/* virtio-blk-pci */ | ||
155 | +static void *qvhost_user_blk_pci_get_driver(void *object, const char *interface) | ||
156 | +{ | ||
157 | + QVhostUserBlkPCI *v_blk = object; | ||
158 | + if (!g_strcmp0(interface, "pci-device")) { | ||
159 | + return v_blk->pci_vdev.pdev; | ||
160 | + } | ||
161 | + return qvhost_user_blk_get_driver(&v_blk->blk, interface); | ||
162 | +} | ||
163 | + | ||
164 | +static void *vhost_user_blk_pci_create(void *pci_bus, QGuestAllocator *t_alloc, | ||
165 | + void *addr) | ||
166 | +{ | ||
167 | + QVhostUserBlkPCI *vhost_user_blk = g_new0(QVhostUserBlkPCI, 1); | ||
168 | + QVhostUserBlk *interface = &vhost_user_blk->blk; | ||
169 | + QOSGraphObject *obj = &vhost_user_blk->pci_vdev.obj; | ||
170 | + | ||
171 | + virtio_pci_init(&vhost_user_blk->pci_vdev, pci_bus, addr); | ||
172 | + interface->vdev = &vhost_user_blk->pci_vdev.vdev; | ||
173 | + | ||
174 | + g_assert_cmphex(interface->vdev->device_type, ==, VIRTIO_ID_BLOCK); | ||
175 | + | ||
176 | + obj->get_driver = qvhost_user_blk_pci_get_driver; | ||
177 | + | ||
178 | + return obj; | ||
179 | +} | ||
180 | + | ||
181 | +static void vhost_user_blk_register_nodes(void) | ||
182 | +{ | ||
183 | + /* | ||
184 | + * FIXME: every test using these two nodes needs to setup a | ||
185 | + * -drive,id=drive0 otherwise QEMU is not going to start. | ||
186 | + * Therefore, we do not include "produces" edge for virtio | ||
187 | + * and pci-device yet. | ||
188 | + */ | ||
189 | + | ||
190 | + char *arg = g_strdup_printf("id=drv0,chardev=char1,addr=%x.%x", | ||
191 | + PCI_SLOT, PCI_FN); | ||
192 | + | ||
193 | + QPCIAddress addr = { | ||
194 | + .devfn = QPCI_DEVFN(PCI_SLOT, PCI_FN), | ||
195 | + }; | ||
196 | + | ||
197 | + QOSGraphEdgeOptions opts = { }; | ||
198 | + | ||
199 | + /* virtio-blk-device */ | ||
200 | + /** opts.extra_device_opts = "drive=drive0"; */ | ||
201 | + qos_node_create_driver("vhost-user-blk-device", | ||
202 | + vhost_user_blk_device_create); | ||
203 | + qos_node_consumes("vhost-user-blk-device", "virtio-bus", &opts); | ||
204 | + qos_node_produces("vhost-user-blk-device", "vhost-user-blk"); | ||
205 | + | ||
206 | + /* virtio-blk-pci */ | ||
207 | + opts.extra_device_opts = arg; | ||
208 | + add_qpci_address(&opts, &addr); | ||
209 | + qos_node_create_driver("vhost-user-blk-pci", vhost_user_blk_pci_create); | ||
210 | + qos_node_consumes("vhost-user-blk-pci", "pci-bus", &opts); | ||
211 | + qos_node_produces("vhost-user-blk-pci", "vhost-user-blk"); | ||
212 | + | ||
213 | + g_free(arg); | ||
214 | +} | ||
215 | + | ||
216 | +libqos_init(vhost_user_blk_register_nodes); | ||
217 | diff --git a/tests/qtest/vhost-user-blk-test.c b/tests/qtest/vhost-user-blk-test.c | ||
218 | new file mode 100644 | ||
219 | index XXXXXXX..XXXXXXX | ||
220 | --- /dev/null | ||
221 | +++ b/tests/qtest/vhost-user-blk-test.c | ||
222 | @@ -XXX,XX +XXX,XX @@ | ||
223 | +/* | ||
224 | + * QTest testcase for Vhost-user Block Device | ||
225 | + * | ||
226 | + * Based on tests/qtest//virtio-blk-test.c | ||
227 | + | ||
228 | + * Copyright (c) 2014 SUSE LINUX Products GmbH | ||
229 | + * Copyright (c) 2014 Marc Marí | ||
230 | + * Copyright (c) 2020 Coiby Xu | ||
231 | + * | ||
232 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. | ||
233 | + * See the COPYING file in the top-level directory. | ||
234 | + */ | ||
235 | + | ||
236 | +#include "qemu/osdep.h" | ||
237 | +#include "libqtest-single.h" | ||
238 | +#include "qemu/bswap.h" | ||
239 | +#include "qemu/module.h" | ||
240 | +#include "standard-headers/linux/virtio_blk.h" | ||
241 | +#include "standard-headers/linux/virtio_pci.h" | ||
242 | +#include "libqos/qgraph.h" | ||
243 | +#include "libqos/vhost-user-blk.h" | ||
244 | +#include "libqos/libqos-pc.h" | ||
245 | + | ||
246 | +#define TEST_IMAGE_SIZE (64 * 1024 * 1024) | ||
247 | +#define QVIRTIO_BLK_TIMEOUT_US (30 * 1000 * 1000) | ||
248 | +#define PCI_SLOT_HP 0x06 | ||
249 | + | ||
250 | +typedef struct { | ||
251 | + pid_t pid; | ||
252 | +} QemuStorageDaemonState; | ||
253 | + | ||
254 | +typedef struct QVirtioBlkReq { | ||
255 | + uint32_t type; | ||
256 | + uint32_t ioprio; | ||
257 | + uint64_t sector; | ||
258 | + char *data; | ||
259 | + uint8_t status; | ||
260 | +} QVirtioBlkReq; | ||
261 | + | ||
262 | +#ifdef HOST_WORDS_BIGENDIAN | ||
263 | +static const bool host_is_big_endian = true; | ||
264 | +#else | ||
265 | +static const bool host_is_big_endian; /* false */ | ||
266 | +#endif | ||
267 | + | ||
268 | +static inline void virtio_blk_fix_request(QVirtioDevice *d, QVirtioBlkReq *req) | ||
269 | +{ | ||
270 | + if (qvirtio_is_big_endian(d) != host_is_big_endian) { | ||
271 | + req->type = bswap32(req->type); | ||
272 | + req->ioprio = bswap32(req->ioprio); | ||
273 | + req->sector = bswap64(req->sector); | ||
274 | + } | 165 | + } |
275 | +} | 166 | +} |
276 | + | 167 | + |
277 | +static inline void virtio_blk_fix_dwz_hdr(QVirtioDevice *d, | 168 | static BdrvChild * GRAPH_RDLOCK |
278 | + struct virtio_blk_discard_write_zeroes *dwz_hdr) | 169 | bdrv_find_child(BlockDriverState *parent_bs, const char *child_name) |
279 | +{ | 170 | { |
280 | + if (qvirtio_is_big_endian(d) != host_is_big_endian) { | ||
281 | + dwz_hdr->sector = bswap64(dwz_hdr->sector); | ||
282 | + dwz_hdr->num_sectors = bswap32(dwz_hdr->num_sectors); | ||
283 | + dwz_hdr->flags = bswap32(dwz_hdr->flags); | ||
284 | + } | ||
285 | +} | ||
286 | + | ||
287 | +static uint64_t virtio_blk_request(QGuestAllocator *alloc, QVirtioDevice *d, | ||
288 | + QVirtioBlkReq *req, uint64_t data_size) | ||
289 | +{ | ||
290 | + uint64_t addr; | ||
291 | + uint8_t status = 0xFF; | ||
292 | + QTestState *qts = global_qtest; | ||
293 | + | ||
294 | + switch (req->type) { | ||
295 | + case VIRTIO_BLK_T_IN: | ||
296 | + case VIRTIO_BLK_T_OUT: | ||
297 | + g_assert_cmpuint(data_size % 512, ==, 0); | ||
298 | + break; | ||
299 | + case VIRTIO_BLK_T_DISCARD: | ||
300 | + case VIRTIO_BLK_T_WRITE_ZEROES: | ||
301 | + g_assert_cmpuint(data_size % | ||
302 | + sizeof(struct virtio_blk_discard_write_zeroes), ==, 0); | ||
303 | + break; | ||
304 | + default: | ||
305 | + g_assert_cmpuint(data_size, ==, 0); | ||
306 | + } | ||
307 | + | ||
308 | + addr = guest_alloc(alloc, sizeof(*req) + data_size); | ||
309 | + | ||
310 | + virtio_blk_fix_request(d, req); | ||
311 | + | ||
312 | + qtest_memwrite(qts, addr, req, 16); | ||
313 | + qtest_memwrite(qts, addr + 16, req->data, data_size); | ||
314 | + qtest_memwrite(qts, addr + 16 + data_size, &status, sizeof(status)); | ||
315 | + | ||
316 | + return addr; | ||
317 | +} | ||
318 | + | ||
319 | +/* Returns the request virtqueue so the caller can perform further tests */ | ||
320 | +static QVirtQueue *test_basic(QVirtioDevice *dev, QGuestAllocator *alloc) | ||
321 | +{ | ||
322 | + QVirtioBlkReq req; | ||
323 | + uint64_t req_addr; | ||
324 | + uint64_t capacity; | ||
325 | + uint64_t features; | ||
326 | + uint32_t free_head; | ||
327 | + uint8_t status; | ||
328 | + char *data; | ||
329 | + QTestState *qts = global_qtest; | ||
330 | + QVirtQueue *vq; | ||
331 | + | ||
332 | + features = qvirtio_get_features(dev); | ||
333 | + features = features & ~(QVIRTIO_F_BAD_FEATURE | | ||
334 | + (1u << VIRTIO_RING_F_INDIRECT_DESC) | | ||
335 | + (1u << VIRTIO_RING_F_EVENT_IDX) | | ||
336 | + (1u << VIRTIO_BLK_F_SCSI)); | ||
337 | + qvirtio_set_features(dev, features); | ||
338 | + | ||
339 | + capacity = qvirtio_config_readq(dev, 0); | ||
340 | + g_assert_cmpint(capacity, ==, TEST_IMAGE_SIZE / 512); | ||
341 | + | ||
342 | + vq = qvirtqueue_setup(dev, alloc, 0); | ||
343 | + | ||
344 | + qvirtio_set_driver_ok(dev); | ||
345 | + | ||
346 | + /* Write and read with 3 descriptor layout */ | ||
347 | + /* Write request */ | ||
348 | + req.type = VIRTIO_BLK_T_OUT; | ||
349 | + req.ioprio = 1; | ||
350 | + req.sector = 0; | ||
351 | + req.data = g_malloc0(512); | ||
352 | + strcpy(req.data, "TEST"); | ||
353 | + | ||
354 | + req_addr = virtio_blk_request(alloc, dev, &req, 512); | ||
355 | + | ||
356 | + g_free(req.data); | ||
357 | + | ||
358 | + free_head = qvirtqueue_add(qts, vq, req_addr, 16, false, true); | ||
359 | + qvirtqueue_add(qts, vq, req_addr + 16, 512, false, true); | ||
360 | + qvirtqueue_add(qts, vq, req_addr + 528, 1, true, false); | ||
361 | + | ||
362 | + qvirtqueue_kick(qts, dev, vq, free_head); | ||
363 | + | ||
364 | + qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL, | ||
365 | + QVIRTIO_BLK_TIMEOUT_US); | ||
366 | + status = readb(req_addr + 528); | ||
367 | + g_assert_cmpint(status, ==, 0); | ||
368 | + | ||
369 | + guest_free(alloc, req_addr); | ||
370 | + | ||
371 | + /* Read request */ | ||
372 | + req.type = VIRTIO_BLK_T_IN; | ||
373 | + req.ioprio = 1; | ||
374 | + req.sector = 0; | ||
375 | + req.data = g_malloc0(512); | ||
376 | + | ||
377 | + req_addr = virtio_blk_request(alloc, dev, &req, 512); | ||
378 | + | ||
379 | + g_free(req.data); | ||
380 | + | ||
381 | + free_head = qvirtqueue_add(qts, vq, req_addr, 16, false, true); | ||
382 | + qvirtqueue_add(qts, vq, req_addr + 16, 512, true, true); | ||
383 | + qvirtqueue_add(qts, vq, req_addr + 528, 1, true, false); | ||
384 | + | ||
385 | + qvirtqueue_kick(qts, dev, vq, free_head); | ||
386 | + | ||
387 | + qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL, | ||
388 | + QVIRTIO_BLK_TIMEOUT_US); | ||
389 | + status = readb(req_addr + 528); | ||
390 | + g_assert_cmpint(status, ==, 0); | ||
391 | + | ||
392 | + data = g_malloc0(512); | ||
393 | + qtest_memread(qts, req_addr + 16, data, 512); | ||
394 | + g_assert_cmpstr(data, ==, "TEST"); | ||
395 | + g_free(data); | ||
396 | + | ||
397 | + guest_free(alloc, req_addr); | ||
398 | + | ||
399 | + if (features & (1u << VIRTIO_BLK_F_WRITE_ZEROES)) { | ||
400 | + struct virtio_blk_discard_write_zeroes dwz_hdr; | ||
401 | + void *expected; | ||
402 | + | ||
403 | + /* | ||
404 | + * WRITE_ZEROES request on the same sector of previous test where | ||
405 | + * we wrote "TEST". | ||
406 | + */ | ||
407 | + req.type = VIRTIO_BLK_T_WRITE_ZEROES; | ||
408 | + req.data = (char *) &dwz_hdr; | ||
409 | + dwz_hdr.sector = 0; | ||
410 | + dwz_hdr.num_sectors = 1; | ||
411 | + dwz_hdr.flags = 0; | ||
412 | + | ||
413 | + virtio_blk_fix_dwz_hdr(dev, &dwz_hdr); | ||
414 | + | ||
415 | + req_addr = virtio_blk_request(alloc, dev, &req, sizeof(dwz_hdr)); | ||
416 | + | ||
417 | + free_head = qvirtqueue_add(qts, vq, req_addr, 16, false, true); | ||
418 | + qvirtqueue_add(qts, vq, req_addr + 16, sizeof(dwz_hdr), false, true); | ||
419 | + qvirtqueue_add(qts, vq, req_addr + 16 + sizeof(dwz_hdr), 1, true, | ||
420 | + false); | ||
421 | + | ||
422 | + qvirtqueue_kick(qts, dev, vq, free_head); | ||
423 | + | ||
424 | + qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL, | ||
425 | + QVIRTIO_BLK_TIMEOUT_US); | ||
426 | + status = readb(req_addr + 16 + sizeof(dwz_hdr)); | ||
427 | + g_assert_cmpint(status, ==, 0); | ||
428 | + | ||
429 | + guest_free(alloc, req_addr); | ||
430 | + | ||
431 | + /* Read request to check if the sector contains all zeroes */ | ||
432 | + req.type = VIRTIO_BLK_T_IN; | ||
433 | + req.ioprio = 1; | ||
434 | + req.sector = 0; | ||
435 | + req.data = g_malloc0(512); | ||
436 | + | ||
437 | + req_addr = virtio_blk_request(alloc, dev, &req, 512); | ||
438 | + | ||
439 | + g_free(req.data); | ||
440 | + | ||
441 | + free_head = qvirtqueue_add(qts, vq, req_addr, 16, false, true); | ||
442 | + qvirtqueue_add(qts, vq, req_addr + 16, 512, true, true); | ||
443 | + qvirtqueue_add(qts, vq, req_addr + 528, 1, true, false); | ||
444 | + | ||
445 | + qvirtqueue_kick(qts, dev, vq, free_head); | ||
446 | + | ||
447 | + qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL, | ||
448 | + QVIRTIO_BLK_TIMEOUT_US); | ||
449 | + status = readb(req_addr + 528); | ||
450 | + g_assert_cmpint(status, ==, 0); | ||
451 | + | ||
452 | + data = g_malloc(512); | ||
453 | + expected = g_malloc0(512); | ||
454 | + qtest_memread(qts, req_addr + 16, data, 512); | ||
455 | + g_assert_cmpmem(data, 512, expected, 512); | ||
456 | + g_free(expected); | ||
457 | + g_free(data); | ||
458 | + | ||
459 | + guest_free(alloc, req_addr); | ||
460 | + } | ||
461 | + | ||
462 | + if (features & (1u << VIRTIO_BLK_F_DISCARD)) { | ||
463 | + struct virtio_blk_discard_write_zeroes dwz_hdr; | ||
464 | + | ||
465 | + req.type = VIRTIO_BLK_T_DISCARD; | ||
466 | + req.data = (char *) &dwz_hdr; | ||
467 | + dwz_hdr.sector = 0; | ||
468 | + dwz_hdr.num_sectors = 1; | ||
469 | + dwz_hdr.flags = 0; | ||
470 | + | ||
471 | + virtio_blk_fix_dwz_hdr(dev, &dwz_hdr); | ||
472 | + | ||
473 | + req_addr = virtio_blk_request(alloc, dev, &req, sizeof(dwz_hdr)); | ||
474 | + | ||
475 | + free_head = qvirtqueue_add(qts, vq, req_addr, 16, false, true); | ||
476 | + qvirtqueue_add(qts, vq, req_addr + 16, sizeof(dwz_hdr), false, true); | ||
477 | + qvirtqueue_add(qts, vq, req_addr + 16 + sizeof(dwz_hdr), | ||
478 | + 1, true, false); | ||
479 | + | ||
480 | + qvirtqueue_kick(qts, dev, vq, free_head); | ||
481 | + | ||
482 | + qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL, | ||
483 | + QVIRTIO_BLK_TIMEOUT_US); | ||
484 | + status = readb(req_addr + 16 + sizeof(dwz_hdr)); | ||
485 | + g_assert_cmpint(status, ==, 0); | ||
486 | + | ||
487 | + guest_free(alloc, req_addr); | ||
488 | + } | ||
489 | + | ||
490 | + if (features & (1u << VIRTIO_F_ANY_LAYOUT)) { | ||
491 | + /* Write and read with 2 descriptor layout */ | ||
492 | + /* Write request */ | ||
493 | + req.type = VIRTIO_BLK_T_OUT; | ||
494 | + req.ioprio = 1; | ||
495 | + req.sector = 1; | ||
496 | + req.data = g_malloc0(512); | ||
497 | + strcpy(req.data, "TEST"); | ||
498 | + | ||
499 | + req_addr = virtio_blk_request(alloc, dev, &req, 512); | ||
500 | + | ||
501 | + g_free(req.data); | ||
502 | + | ||
503 | + free_head = qvirtqueue_add(qts, vq, req_addr, 528, false, true); | ||
504 | + qvirtqueue_add(qts, vq, req_addr + 528, 1, true, false); | ||
505 | + qvirtqueue_kick(qts, dev, vq, free_head); | ||
506 | + | ||
507 | + qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL, | ||
508 | + QVIRTIO_BLK_TIMEOUT_US); | ||
509 | + status = readb(req_addr + 528); | ||
510 | + g_assert_cmpint(status, ==, 0); | ||
511 | + | ||
512 | + guest_free(alloc, req_addr); | ||
513 | + | ||
514 | + /* Read request */ | ||
515 | + req.type = VIRTIO_BLK_T_IN; | ||
516 | + req.ioprio = 1; | ||
517 | + req.sector = 1; | ||
518 | + req.data = g_malloc0(512); | ||
519 | + | ||
520 | + req_addr = virtio_blk_request(alloc, dev, &req, 512); | ||
521 | + | ||
522 | + g_free(req.data); | ||
523 | + | ||
524 | + free_head = qvirtqueue_add(qts, vq, req_addr, 16, false, true); | ||
525 | + qvirtqueue_add(qts, vq, req_addr + 16, 513, true, false); | ||
526 | + | ||
527 | + qvirtqueue_kick(qts, dev, vq, free_head); | ||
528 | + | ||
529 | + qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL, | ||
530 | + QVIRTIO_BLK_TIMEOUT_US); | ||
531 | + status = readb(req_addr + 528); | ||
532 | + g_assert_cmpint(status, ==, 0); | ||
533 | + | ||
534 | + data = g_malloc0(512); | ||
535 | + qtest_memread(qts, req_addr + 16, data, 512); | ||
536 | + g_assert_cmpstr(data, ==, "TEST"); | ||
537 | + g_free(data); | ||
538 | + | ||
539 | + guest_free(alloc, req_addr); | ||
540 | + } | ||
541 | + | ||
542 | + return vq; | ||
543 | +} | ||
544 | + | ||
545 | +static void basic(void *obj, void *data, QGuestAllocator *t_alloc) | ||
546 | +{ | ||
547 | + QVhostUserBlk *blk_if = obj; | ||
548 | + QVirtQueue *vq; | ||
549 | + | ||
550 | + vq = test_basic(blk_if->vdev, t_alloc); | ||
551 | + qvirtqueue_cleanup(blk_if->vdev->bus, vq, t_alloc); | ||
552 | + | ||
553 | +} | ||
554 | + | ||
555 | +static void indirect(void *obj, void *u_data, QGuestAllocator *t_alloc) | ||
556 | +{ | ||
557 | + QVirtQueue *vq; | ||
558 | + QVhostUserBlk *blk_if = obj; | ||
559 | + QVirtioDevice *dev = blk_if->vdev; | ||
560 | + QVirtioBlkReq req; | ||
561 | + QVRingIndirectDesc *indirect; | ||
562 | + uint64_t req_addr; | ||
563 | + uint64_t capacity; | ||
564 | + uint64_t features; | ||
565 | + uint32_t free_head; | ||
566 | + uint8_t status; | ||
567 | + char *data; | ||
568 | + QTestState *qts = global_qtest; | ||
569 | + | ||
570 | + features = qvirtio_get_features(dev); | ||
571 | + g_assert_cmphex(features & (1u << VIRTIO_RING_F_INDIRECT_DESC), !=, 0); | ||
572 | + features = features & ~(QVIRTIO_F_BAD_FEATURE | | ||
573 | + (1u << VIRTIO_RING_F_EVENT_IDX) | | ||
574 | + (1u << VIRTIO_BLK_F_SCSI)); | ||
575 | + qvirtio_set_features(dev, features); | ||
576 | + | ||
577 | + capacity = qvirtio_config_readq(dev, 0); | ||
578 | + g_assert_cmpint(capacity, ==, TEST_IMAGE_SIZE / 512); | ||
579 | + | ||
580 | + vq = qvirtqueue_setup(dev, t_alloc, 0); | ||
581 | + qvirtio_set_driver_ok(dev); | ||
582 | + | ||
583 | + /* Write request */ | ||
584 | + req.type = VIRTIO_BLK_T_OUT; | ||
585 | + req.ioprio = 1; | ||
586 | + req.sector = 0; | ||
587 | + req.data = g_malloc0(512); | ||
588 | + strcpy(req.data, "TEST"); | ||
589 | + | ||
590 | + req_addr = virtio_blk_request(t_alloc, dev, &req, 512); | ||
591 | + | ||
592 | + g_free(req.data); | ||
593 | + | ||
594 | + indirect = qvring_indirect_desc_setup(qts, dev, t_alloc, 2); | ||
595 | + qvring_indirect_desc_add(dev, qts, indirect, req_addr, 528, false); | ||
596 | + qvring_indirect_desc_add(dev, qts, indirect, req_addr + 528, 1, true); | ||
597 | + free_head = qvirtqueue_add_indirect(qts, vq, indirect); | ||
598 | + qvirtqueue_kick(qts, dev, vq, free_head); | ||
599 | + | ||
600 | + qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL, | ||
601 | + QVIRTIO_BLK_TIMEOUT_US); | ||
602 | + status = readb(req_addr + 528); | ||
603 | + g_assert_cmpint(status, ==, 0); | ||
604 | + | ||
605 | + g_free(indirect); | ||
606 | + guest_free(t_alloc, req_addr); | ||
607 | + | ||
608 | + /* Read request */ | ||
609 | + req.type = VIRTIO_BLK_T_IN; | ||
610 | + req.ioprio = 1; | ||
611 | + req.sector = 0; | ||
612 | + req.data = g_malloc0(512); | ||
613 | + strcpy(req.data, "TEST"); | ||
614 | + | ||
615 | + req_addr = virtio_blk_request(t_alloc, dev, &req, 512); | ||
616 | + | ||
617 | + g_free(req.data); | ||
618 | + | ||
619 | + indirect = qvring_indirect_desc_setup(qts, dev, t_alloc, 2); | ||
620 | + qvring_indirect_desc_add(dev, qts, indirect, req_addr, 16, false); | ||
621 | + qvring_indirect_desc_add(dev, qts, indirect, req_addr + 16, 513, true); | ||
622 | + free_head = qvirtqueue_add_indirect(qts, vq, indirect); | ||
623 | + qvirtqueue_kick(qts, dev, vq, free_head); | ||
624 | + | ||
625 | + qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL, | ||
626 | + QVIRTIO_BLK_TIMEOUT_US); | ||
627 | + status = readb(req_addr + 528); | ||
628 | + g_assert_cmpint(status, ==, 0); | ||
629 | + | ||
630 | + data = g_malloc0(512); | ||
631 | + qtest_memread(qts, req_addr + 16, data, 512); | ||
632 | + g_assert_cmpstr(data, ==, "TEST"); | ||
633 | + g_free(data); | ||
634 | + | ||
635 | + g_free(indirect); | ||
636 | + guest_free(t_alloc, req_addr); | ||
637 | + qvirtqueue_cleanup(dev->bus, vq, t_alloc); | ||
638 | +} | ||
639 | + | ||
640 | +static void idx(void *obj, void *u_data, QGuestAllocator *t_alloc) | ||
641 | +{ | ||
642 | + QVirtQueue *vq; | ||
643 | + QVhostUserBlkPCI *blk = obj; | ||
644 | + QVirtioPCIDevice *pdev = &blk->pci_vdev; | ||
645 | + QVirtioDevice *dev = &pdev->vdev; | ||
646 | + QVirtioBlkReq req; | ||
647 | + uint64_t req_addr; | ||
648 | + uint64_t capacity; | ||
649 | + uint64_t features; | ||
650 | + uint32_t free_head; | ||
651 | + uint32_t write_head; | ||
652 | + uint32_t desc_idx; | ||
653 | + uint8_t status; | ||
654 | + char *data; | ||
655 | + QOSGraphObject *blk_object = obj; | ||
656 | + QPCIDevice *pci_dev = blk_object->get_driver(blk_object, "pci-device"); | ||
657 | + QTestState *qts = global_qtest; | ||
658 | + | ||
659 | + if (qpci_check_buggy_msi(pci_dev)) { | ||
660 | + return; | ||
661 | + } | ||
662 | + | ||
663 | + qpci_msix_enable(pdev->pdev); | ||
664 | + qvirtio_pci_set_msix_configuration_vector(pdev, t_alloc, 0); | ||
665 | + | ||
666 | + features = qvirtio_get_features(dev); | ||
667 | + features = features & ~(QVIRTIO_F_BAD_FEATURE | | ||
668 | + (1u << VIRTIO_RING_F_INDIRECT_DESC) | | ||
669 | + (1u << VIRTIO_F_NOTIFY_ON_EMPTY) | | ||
670 | + (1u << VIRTIO_BLK_F_SCSI)); | ||
671 | + qvirtio_set_features(dev, features); | ||
672 | + | ||
673 | + capacity = qvirtio_config_readq(dev, 0); | ||
674 | + g_assert_cmpint(capacity, ==, TEST_IMAGE_SIZE / 512); | ||
675 | + | ||
676 | + vq = qvirtqueue_setup(dev, t_alloc, 0); | ||
677 | + qvirtqueue_pci_msix_setup(pdev, (QVirtQueuePCI *)vq, t_alloc, 1); | ||
678 | + | ||
679 | + qvirtio_set_driver_ok(dev); | ||
680 | + | ||
681 | + /* | ||
682 | + * libvhost-user signals the call fd in VHOST_USER_SET_VRING_CALL, make | ||
683 | + * sure to wait for the isr here so we don't race and confuse it later on. | ||
684 | + */ | ||
685 | + qvirtio_wait_queue_isr(qts, dev, vq, QVIRTIO_BLK_TIMEOUT_US); | ||
686 | + | ||
687 | + /* Write request */ | ||
688 | + req.type = VIRTIO_BLK_T_OUT; | ||
689 | + req.ioprio = 1; | ||
690 | + req.sector = 0; | ||
691 | + req.data = g_malloc0(512); | ||
692 | + strcpy(req.data, "TEST"); | ||
693 | + | ||
694 | + req_addr = virtio_blk_request(t_alloc, dev, &req, 512); | ||
695 | + | ||
696 | + g_free(req.data); | ||
697 | + | ||
698 | + free_head = qvirtqueue_add(qts, vq, req_addr, 16, false, true); | ||
699 | + qvirtqueue_add(qts, vq, req_addr + 16, 512, false, true); | ||
700 | + qvirtqueue_add(qts, vq, req_addr + 528, 1, true, false); | ||
701 | + qvirtqueue_kick(qts, dev, vq, free_head); | ||
702 | + | ||
703 | + qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL, | ||
704 | + QVIRTIO_BLK_TIMEOUT_US); | ||
705 | + | ||
706 | + /* Write request */ | ||
707 | + req.type = VIRTIO_BLK_T_OUT; | ||
708 | + req.ioprio = 1; | ||
709 | + req.sector = 1; | ||
710 | + req.data = g_malloc0(512); | ||
711 | + strcpy(req.data, "TEST"); | ||
712 | + | ||
713 | + req_addr = virtio_blk_request(t_alloc, dev, &req, 512); | ||
714 | + | ||
715 | + g_free(req.data); | ||
716 | + | ||
717 | + /* Notify after processing the third request */ | ||
718 | + qvirtqueue_set_used_event(qts, vq, 2); | ||
719 | + free_head = qvirtqueue_add(qts, vq, req_addr, 16, false, true); | ||
720 | + qvirtqueue_add(qts, vq, req_addr + 16, 512, false, true); | ||
721 | + qvirtqueue_add(qts, vq, req_addr + 528, 1, true, false); | ||
722 | + qvirtqueue_kick(qts, dev, vq, free_head); | ||
723 | + write_head = free_head; | ||
724 | + | ||
725 | + /* No notification expected */ | ||
726 | + status = qvirtio_wait_status_byte_no_isr(qts, dev, | ||
727 | + vq, req_addr + 528, | ||
728 | + QVIRTIO_BLK_TIMEOUT_US); | ||
729 | + g_assert_cmpint(status, ==, 0); | ||
730 | + | ||
731 | + guest_free(t_alloc, req_addr); | ||
732 | + | ||
733 | + /* Read request */ | ||
734 | + req.type = VIRTIO_BLK_T_IN; | ||
735 | + req.ioprio = 1; | ||
736 | + req.sector = 1; | ||
737 | + req.data = g_malloc0(512); | ||
738 | + | ||
739 | + req_addr = virtio_blk_request(t_alloc, dev, &req, 512); | ||
740 | + | ||
741 | + g_free(req.data); | ||
742 | + | ||
743 | + free_head = qvirtqueue_add(qts, vq, req_addr, 16, false, true); | ||
744 | + qvirtqueue_add(qts, vq, req_addr + 16, 512, true, true); | ||
745 | + qvirtqueue_add(qts, vq, req_addr + 528, 1, true, false); | ||
746 | + | ||
747 | + qvirtqueue_kick(qts, dev, vq, free_head); | ||
748 | + | ||
749 | + /* We get just one notification for both requests */ | ||
750 | + qvirtio_wait_used_elem(qts, dev, vq, write_head, NULL, | ||
751 | + QVIRTIO_BLK_TIMEOUT_US); | ||
752 | + g_assert(qvirtqueue_get_buf(qts, vq, &desc_idx, NULL)); | ||
753 | + g_assert_cmpint(desc_idx, ==, free_head); | ||
754 | + | ||
755 | + status = readb(req_addr + 528); | ||
756 | + g_assert_cmpint(status, ==, 0); | ||
757 | + | ||
758 | + data = g_malloc0(512); | ||
759 | + qtest_memread(qts, req_addr + 16, data, 512); | ||
760 | + g_assert_cmpstr(data, ==, "TEST"); | ||
761 | + g_free(data); | ||
762 | + | ||
763 | + guest_free(t_alloc, req_addr); | ||
764 | + | ||
765 | + /* End test */ | ||
766 | + qpci_msix_disable(pdev->pdev); | ||
767 | + | ||
768 | + qvirtqueue_cleanup(dev->bus, vq, t_alloc); | ||
769 | +} | ||
770 | + | ||
771 | +static void pci_hotplug(void *obj, void *data, QGuestAllocator *t_alloc) | ||
772 | +{ | ||
773 | + QVirtioPCIDevice *dev1 = obj; | ||
774 | + QVirtioPCIDevice *dev; | ||
775 | + QTestState *qts = dev1->pdev->bus->qts; | ||
776 | + | ||
777 | + /* plug secondary disk */ | ||
778 | + qtest_qmp_device_add(qts, "vhost-user-blk-pci", "drv1", | ||
779 | + "{'addr': %s, 'chardev': 'char2'}", | ||
780 | + stringify(PCI_SLOT_HP) ".0"); | ||
781 | + | ||
782 | + dev = virtio_pci_new(dev1->pdev->bus, | ||
783 | + &(QPCIAddress) { .devfn = QPCI_DEVFN(PCI_SLOT_HP, 0) | ||
784 | + }); | ||
785 | + g_assert_nonnull(dev); | ||
786 | + g_assert_cmpint(dev->vdev.device_type, ==, VIRTIO_ID_BLOCK); | ||
787 | + qvirtio_pci_device_disable(dev); | ||
788 | + qos_object_destroy((QOSGraphObject *)dev); | ||
789 | + | ||
790 | + /* unplug secondary disk */ | ||
791 | + qpci_unplug_acpi_device_test(qts, "drv1", PCI_SLOT_HP); | ||
792 | +} | ||
793 | + | ||
794 | +/* | ||
795 | + * Check that setting the vring addr on a non-existent virtqueue does | ||
796 | + * not crash. | ||
797 | + */ | ||
798 | +static void test_nonexistent_virtqueue(void *obj, void *data, | ||
799 | + QGuestAllocator *t_alloc) | ||
800 | +{ | ||
801 | + QVhostUserBlkPCI *blk = obj; | ||
802 | + QVirtioPCIDevice *pdev = &blk->pci_vdev; | ||
803 | + QPCIBar bar0; | ||
804 | + QPCIDevice *dev; | ||
805 | + | ||
806 | + dev = qpci_device_find(pdev->pdev->bus, QPCI_DEVFN(4, 0)); | ||
807 | + g_assert(dev != NULL); | ||
808 | + qpci_device_enable(dev); | ||
809 | + | ||
810 | + bar0 = qpci_iomap(dev, 0, NULL); | ||
811 | + | ||
812 | + qpci_io_writeb(dev, bar0, VIRTIO_PCI_QUEUE_SEL, 2); | ||
813 | + qpci_io_writel(dev, bar0, VIRTIO_PCI_QUEUE_PFN, 1); | ||
814 | + | ||
815 | + g_free(dev); | ||
816 | +} | ||
817 | + | ||
818 | +static const char *qtest_qemu_storage_daemon_binary(void) | ||
819 | +{ | ||
820 | + const char *qemu_storage_daemon_bin; | ||
821 | + | ||
822 | + qemu_storage_daemon_bin = getenv("QTEST_QEMU_STORAGE_DAEMON_BINARY"); | ||
823 | + if (!qemu_storage_daemon_bin) { | ||
824 | + fprintf(stderr, "Environment variable " | ||
825 | + "QTEST_QEMU_STORAGE_DAEMON_BINARY required\n"); | ||
826 | + exit(0); | ||
827 | + } | ||
828 | + | ||
829 | + return qemu_storage_daemon_bin; | ||
830 | +} | ||
831 | + | ||
832 | +/* g_test_queue_destroy() cleanup function for files */ | ||
833 | +static void destroy_file(void *path) | ||
834 | +{ | ||
835 | + unlink(path); | ||
836 | + g_free(path); | ||
837 | + qos_invalidate_command_line(); | ||
838 | +} | ||
839 | + | ||
840 | +static char *drive_create(void) | ||
841 | +{ | ||
842 | + int fd, ret; | ||
843 | + /** vhost-user-blk won't recognize drive located in /tmp */ | ||
844 | + char *t_path = g_strdup("qtest.XXXXXX"); | ||
845 | + | ||
846 | + /** Create a temporary raw image */ | ||
847 | + fd = mkstemp(t_path); | ||
848 | + g_assert_cmpint(fd, >=, 0); | ||
849 | + ret = ftruncate(fd, TEST_IMAGE_SIZE); | ||
850 | + g_assert_cmpint(ret, ==, 0); | ||
851 | + close(fd); | ||
852 | + | ||
853 | + g_test_queue_destroy(destroy_file, t_path); | ||
854 | + return t_path; | ||
855 | +} | ||
856 | + | ||
857 | +static char *create_listen_socket(int *fd) | ||
858 | +{ | ||
859 | + int tmp_fd; | ||
860 | + char *path; | ||
861 | + | ||
862 | + /* No race because our pid makes the path unique */ | ||
863 | + path = g_strdup_printf("/tmp/qtest-%d-sock.XXXXXX", getpid()); | ||
864 | + tmp_fd = mkstemp(path); | ||
865 | + g_assert_cmpint(tmp_fd, >=, 0); | ||
866 | + close(tmp_fd); | ||
867 | + unlink(path); | ||
868 | + | ||
869 | + *fd = qtest_socket_server(path); | ||
870 | + g_test_queue_destroy(destroy_file, path); | ||
871 | + return path; | ||
872 | +} | ||
873 | + | ||
874 | +/* | ||
875 | + * g_test_queue_destroy() and qtest_add_abrt_handler() cleanup function for | ||
876 | + * qemu-storage-daemon. | ||
877 | + */ | ||
878 | +static void quit_storage_daemon(void *data) | ||
879 | +{ | ||
880 | + QemuStorageDaemonState *qsd = data; | ||
881 | + int wstatus; | ||
882 | + pid_t pid; | ||
883 | + | ||
884 | + /* | ||
885 | + * If we were invoked as a g_test_queue_destroy() cleanup function we need | ||
886 | + * to remove the abrt handler to avoid being called again if the code below | ||
887 | + * aborts. Also, we must not leave the abrt handler installed after | ||
888 | + * cleanup. | ||
889 | + */ | ||
890 | + qtest_remove_abrt_handler(data); | ||
891 | + | ||
892 | + /* Before quitting storage-daemon, quit qemu to avoid dubious messages */ | ||
893 | + qtest_kill_qemu(global_qtest); | ||
894 | + | ||
895 | + kill(qsd->pid, SIGTERM); | ||
896 | + pid = waitpid(qsd->pid, &wstatus, 0); | ||
897 | + g_assert_cmpint(pid, ==, qsd->pid); | ||
898 | + if (!WIFEXITED(wstatus)) { | ||
899 | + fprintf(stderr, "%s: expected qemu-storage-daemon to exit\n", | ||
900 | + __func__); | ||
901 | + abort(); | ||
902 | + } | ||
903 | + if (WEXITSTATUS(wstatus) != 0) { | ||
904 | + fprintf(stderr, "%s: expected qemu-storage-daemon to exit " | ||
905 | + "successfully, got %d\n", | ||
906 | + __func__, WEXITSTATUS(wstatus)); | ||
907 | + abort(); | ||
908 | + } | ||
909 | + | ||
910 | + g_free(data); | ||
911 | +} | ||
912 | + | ||
913 | +static void start_vhost_user_blk(GString *cmd_line, int vus_instances) | ||
914 | +{ | ||
915 | + const char *vhost_user_blk_bin = qtest_qemu_storage_daemon_binary(); | ||
916 | + int i; | ||
917 | + gchar *img_path; | ||
918 | + GString *storage_daemon_command = g_string_new(NULL); | ||
919 | + QemuStorageDaemonState *qsd; | ||
920 | + | ||
921 | + g_string_append_printf(storage_daemon_command, | ||
922 | + "exec %s ", | ||
923 | + vhost_user_blk_bin); | ||
924 | + | ||
925 | + g_string_append_printf(cmd_line, | ||
926 | + " -object memory-backend-memfd,id=mem,size=256M,share=on " | ||
927 | + " -M memory-backend=mem -m 256M "); | ||
928 | + | ||
929 | + for (i = 0; i < vus_instances; i++) { | ||
930 | + int fd; | ||
931 | + char *sock_path = create_listen_socket(&fd); | ||
932 | + | ||
933 | + /* create image file */ | ||
934 | + img_path = drive_create(); | ||
935 | + g_string_append_printf(storage_daemon_command, | ||
936 | + "--blockdev driver=file,node-name=disk%d,filename=%s " | ||
937 | + "--export type=vhost-user-blk,id=disk%d,addr.type=unix,addr.path=%s," | ||
938 | + "node-name=disk%i,writable=on ", | ||
939 | + i, img_path, i, sock_path, i); | ||
940 | + | ||
941 | + g_string_append_printf(cmd_line, "-chardev socket,id=char%d,path=%s ", | ||
942 | + i + 1, sock_path); | ||
943 | + } | ||
944 | + | ||
945 | + g_test_message("starting vhost-user backend: %s", | ||
946 | + storage_daemon_command->str); | ||
947 | + pid_t pid = fork(); | ||
948 | + if (pid == 0) { | ||
949 | + /* | ||
950 | + * Close standard file descriptors so tap-driver.pl pipe detects when | ||
951 | + * our parent terminates. | ||
952 | + */ | ||
953 | + close(0); | ||
954 | + close(1); | ||
955 | + open("/dev/null", O_RDONLY); | ||
956 | + open("/dev/null", O_WRONLY); | ||
957 | + | ||
958 | + execlp("/bin/sh", "sh", "-c", storage_daemon_command->str, NULL); | ||
959 | + exit(1); | ||
960 | + } | ||
961 | + g_string_free(storage_daemon_command, true); | ||
962 | + | ||
963 | + qsd = g_new(QemuStorageDaemonState, 1); | ||
964 | + qsd->pid = pid; | ||
965 | + | ||
966 | + /* Make sure qemu-storage-daemon is stopped */ | ||
967 | + qtest_add_abrt_handler(quit_storage_daemon, qsd); | ||
968 | + g_test_queue_destroy(quit_storage_daemon, qsd); | ||
969 | +} | ||
970 | + | ||
971 | +static void *vhost_user_blk_test_setup(GString *cmd_line, void *arg) | ||
972 | +{ | ||
973 | + start_vhost_user_blk(cmd_line, 1); | ||
974 | + return arg; | ||
975 | +} | ||
976 | + | ||
977 | +/* | ||
978 | + * Setup for hotplug. | ||
979 | + * | ||
980 | + * Since vhost-user server only serves one vhost-user client one time, | ||
981 | + * another exprot | ||
982 | + * | ||
983 | + */ | ||
984 | +static void *vhost_user_blk_hotplug_test_setup(GString *cmd_line, void *arg) | ||
985 | +{ | ||
986 | + /* "-chardev socket,id=char2" is used for pci_hotplug*/ | ||
987 | + start_vhost_user_blk(cmd_line, 2); | ||
988 | + return arg; | ||
989 | +} | ||
990 | + | ||
991 | +static void register_vhost_user_blk_test(void) | ||
992 | +{ | ||
993 | + QOSGraphTestOptions opts = { | ||
994 | + .before = vhost_user_blk_test_setup, | ||
995 | + }; | ||
996 | + | ||
997 | + /* | ||
998 | + * tests for vhost-user-blk and vhost-user-blk-pci | ||
999 | + * The tests are borrowed from tests/virtio-blk-test.c. But some tests | ||
1000 | + * regarding block_resize don't work for vhost-user-blk. | ||
1001 | + * vhost-user-blk device doesn't have -drive, so tests containing | ||
1002 | + * block_resize are also abandoned, | ||
1003 | + * - config | ||
1004 | + * - resize | ||
1005 | + */ | ||
1006 | + qos_add_test("basic", "vhost-user-blk", basic, &opts); | ||
1007 | + qos_add_test("indirect", "vhost-user-blk", indirect, &opts); | ||
1008 | + qos_add_test("idx", "vhost-user-blk-pci", idx, &opts); | ||
1009 | + qos_add_test("nxvirtq", "vhost-user-blk-pci", | ||
1010 | + test_nonexistent_virtqueue, &opts); | ||
1011 | + | ||
1012 | + opts.before = vhost_user_blk_hotplug_test_setup; | ||
1013 | + qos_add_test("hotplug", "vhost-user-blk-pci", pci_hotplug, &opts); | ||
1014 | +} | ||
1015 | + | ||
1016 | +libqos_init(register_vhost_user_blk_test); | ||
1017 | diff --git a/MAINTAINERS b/MAINTAINERS | ||
1018 | index XXXXXXX..XXXXXXX 100644 | ||
1019 | --- a/MAINTAINERS | ||
1020 | +++ b/MAINTAINERS | ||
1021 | @@ -XXX,XX +XXX,XX @@ F: block/export/vhost-user-blk-server.c | ||
1022 | F: block/export/vhost-user-blk-server.h | ||
1023 | F: include/qemu/vhost-user-server.h | ||
1024 | F: tests/qtest/libqos/vhost-user-blk.c | ||
1025 | +F: tests/qtest/libqos/vhost-user-blk.h | ||
1026 | +F: tests/qtest/vhost-user-blk-test.c | ||
1027 | F: util/vhost-user-server.c | ||
1028 | |||
1029 | FUSE block device exports | ||
1030 | diff --git a/tests/qtest/libqos/meson.build b/tests/qtest/libqos/meson.build | ||
1031 | index XXXXXXX..XXXXXXX 100644 | ||
1032 | --- a/tests/qtest/libqos/meson.build | ||
1033 | +++ b/tests/qtest/libqos/meson.build | ||
1034 | @@ -XXX,XX +XXX,XX @@ libqos_srcs = files('../libqtest.c', | ||
1035 | 'virtio-9p.c', | ||
1036 | 'virtio-balloon.c', | ||
1037 | 'virtio-blk.c', | ||
1038 | + 'vhost-user-blk.c', | ||
1039 | 'virtio-mmio.c', | ||
1040 | 'virtio-net.c', | ||
1041 | 'virtio-pci.c', | ||
1042 | diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build | ||
1043 | index XXXXXXX..XXXXXXX 100644 | ||
1044 | --- a/tests/qtest/meson.build | ||
1045 | +++ b/tests/qtest/meson.build | ||
1046 | @@ -XXX,XX +XXX,XX @@ if have_virtfs | ||
1047 | qos_test_ss.add(files('virtio-9p-test.c')) | ||
1048 | endif | ||
1049 | qos_test_ss.add(when: 'CONFIG_VHOST_USER', if_true: files('vhost-user-test.c')) | ||
1050 | +if have_vhost_user_blk_server | ||
1051 | + qos_test_ss.add(files('vhost-user-blk-test.c')) | ||
1052 | +endif | ||
1053 | |||
1054 | tpmemu_files = ['tpm-emu.c', 'tpm-util.c', 'tpm-tests.c'] | ||
1055 | |||
1056 | @@ -XXX,XX +XXX,XX @@ foreach dir : target_dirs | ||
1057 | endif | ||
1058 | qtest_env.set('G_TEST_DBUS_DAEMON', meson.source_root() / 'tests/dbus-vmstate-daemon.sh') | ||
1059 | qtest_env.set('QTEST_QEMU_BINARY', './qemu-system-' + target_base) | ||
1060 | + qtest_env.set('QTEST_QEMU_STORAGE_DAEMON_BINARY', './storage-daemon/qemu-storage-daemon') | ||
1061 | |||
1062 | foreach test : target_qtests | ||
1063 | # Executables are shared across targets, declare them only the first time we | ||
1064 | -- | 171 | -- |
1065 | 2.30.2 | 172 | 2.48.1 |
1066 | |||
1067 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Device models have a relatively complex way to set up their block | ||
2 | backends, in which blk_attach_dev() sets blk->disable_perm = true. | ||
3 | We want to support inactive images in exports, too, so that | ||
4 | qemu-storage-daemon can be used with migration. Because they don't use | ||
5 | blk_attach_dev(), they need another way to set this flag. The most | ||
6 | convenient is to do this automatically when an inactive node is attached | ||
7 | to a BlockBackend that can be inactivated. | ||
1 | 8 | ||
9 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
10 | Acked-by: Fabiano Rosas <farosas@suse.de> | ||
11 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
12 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
13 | Message-ID: <20250204211407.381505-10-kwolf@redhat.com> | ||
14 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
15 | --- | ||
16 | block/block-backend.c | 14 ++++++++++++-- | ||
17 | 1 file changed, 12 insertions(+), 2 deletions(-) | ||
18 | |||
19 | diff --git a/block/block-backend.c b/block/block-backend.c | ||
20 | index XXXXXXX..XXXXXXX 100644 | ||
21 | --- a/block/block-backend.c | ||
22 | +++ b/block/block-backend.c | ||
23 | @@ -XXX,XX +XXX,XX @@ void blk_remove_bs(BlockBackend *blk) | ||
24 | int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp) | ||
25 | { | ||
26 | ThrottleGroupMember *tgm = &blk->public.throttle_group_member; | ||
27 | + uint64_t perm, shared_perm; | ||
28 | |||
29 | GLOBAL_STATE_CODE(); | ||
30 | bdrv_ref(bs); | ||
31 | bdrv_graph_wrlock(); | ||
32 | + | ||
33 | + if ((bs->open_flags & BDRV_O_INACTIVE) && blk_can_inactivate(blk)) { | ||
34 | + blk->disable_perm = true; | ||
35 | + perm = 0; | ||
36 | + shared_perm = BLK_PERM_ALL; | ||
37 | + } else { | ||
38 | + perm = blk->perm; | ||
39 | + shared_perm = blk->shared_perm; | ||
40 | + } | ||
41 | + | ||
42 | blk->root = bdrv_root_attach_child(bs, "root", &child_root, | ||
43 | BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, | ||
44 | - blk->perm, blk->shared_perm, | ||
45 | - blk, errp); | ||
46 | + perm, shared_perm, blk, errp); | ||
47 | bdrv_graph_wrunlock(); | ||
48 | if (blk->root == NULL) { | ||
49 | return -EPERM; | ||
50 | -- | ||
51 | 2.48.1 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Currently, block exports can't handle inactive images correctly. | ||
2 | Incoming write requests would run into assertion failures. Make sure | ||
3 | that we return an error when creating an export can't activate the | ||
4 | image. | ||
1 | 5 | ||
6 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
7 | Acked-by: Fabiano Rosas <farosas@suse.de> | ||
8 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
9 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
10 | Message-ID: <20250204211407.381505-11-kwolf@redhat.com> | ||
11 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
12 | --- | ||
13 | block/export/export.c | 6 +++++- | ||
14 | 1 file changed, 5 insertions(+), 1 deletion(-) | ||
15 | |||
16 | diff --git a/block/export/export.c b/block/export/export.c | ||
17 | index XXXXXXX..XXXXXXX 100644 | ||
18 | --- a/block/export/export.c | ||
19 | +++ b/block/export/export.c | ||
20 | @@ -XXX,XX +XXX,XX @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp) | ||
21 | * ctx was acquired in the caller. | ||
22 | */ | ||
23 | bdrv_graph_rdlock_main_loop(); | ||
24 | - bdrv_activate(bs, NULL); | ||
25 | + ret = bdrv_activate(bs, errp); | ||
26 | + if (ret < 0) { | ||
27 | + bdrv_graph_rdunlock_main_loop(); | ||
28 | + goto fail; | ||
29 | + } | ||
30 | bdrv_graph_rdunlock_main_loop(); | ||
31 | |||
32 | perm = BLK_PERM_CONSISTENT_READ; | ||
33 | -- | ||
34 | 2.48.1 | diff view generated by jsdifflib |
1 | The error path needs to call tran_finalize(), too. | 1 | So far the assumption has always been that if we try to inactivate a |
---|---|---|---|
2 | node, it is already idle. This doesn't hold true any more if we allow | ||
3 | inactivating exported nodes because we can't know when new external | ||
4 | requests come in. | ||
2 | 5 | ||
3 | Fixes: CID 1452773 | 6 | Drain the node around setting BDRV_O_INACTIVE so that requests can't |
4 | Fixes: 548a74c0dbc858edd1a7ee3045b5f2fe710bd8b1 | 7 | start operating on an active node and then in the middle it suddenly |
8 | becomes inactive. With this change, it's enough for exports to check | ||
9 | for new requests that they operate on an active node (or, like reads, | ||
10 | are allowed even on an inactive node). | ||
11 | |||
5 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
6 | Message-Id: <20210503110555.24001-2-kwolf@redhat.com> | 13 | Acked-by: Fabiano Rosas <farosas@suse.de> |
7 | Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 14 | Message-ID: <20250204211407.381505-12-kwolf@redhat.com> |
8 | Reviewed-by: Max Reitz <mreitz@redhat.com> | 15 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> |
16 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
9 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 17 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
10 | --- | 18 | --- |
11 | block.c | 7 ++++--- | 19 | block.c | 2 ++ |
12 | 1 file changed, 4 insertions(+), 3 deletions(-) | 20 | 1 file changed, 2 insertions(+) |
13 | 21 | ||
14 | diff --git a/block.c b/block.c | 22 | diff --git a/block.c b/block.c |
15 | index XXXXXXX..XXXXXXX 100644 | 23 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/block.c | 24 | --- a/block.c |
17 | +++ b/block.c | 25 | +++ b/block.c |
18 | @@ -XXX,XX +XXX,XX @@ BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs, | 26 | @@ -XXX,XX +XXX,XX @@ bdrv_inactivate_recurse(BlockDriverState *bs, bool top_level) |
19 | child_role, perm, shared_perm, opaque, | 27 | return -EPERM; |
20 | &child, tran, errp); | ||
21 | if (ret < 0) { | ||
22 | - bdrv_unref(child_bs); | ||
23 | - return NULL; | ||
24 | + assert(child == NULL); | ||
25 | + goto out; | ||
26 | } | 28 | } |
27 | 29 | ||
28 | ret = bdrv_refresh_perms(child_bs, errp); | 30 | + bdrv_drained_begin(bs); |
29 | - tran_finalize(tran, ret); | 31 | bs->open_flags |= BDRV_O_INACTIVE; |
30 | 32 | + bdrv_drained_end(bs); | |
31 | +out: | 33 | |
32 | + tran_finalize(tran, ret); | 34 | /* |
33 | bdrv_unref(child_bs); | 35 | * Update permissions, they may differ for inactive nodes. |
34 | return child; | ||
35 | } | ||
36 | -- | 36 | -- |
37 | 2.30.2 | 37 | 2.48.1 |
38 | |||
39 | diff view generated by jsdifflib |
1 | We have to set errp before jumping to virtio_err, otherwise the caller | 1 | Add an option in BlockExportOptions to allow creating an export on an |
---|---|---|---|
2 | (virtio_device_realize()) will take this as success and crash when it | 2 | inactive node without activating the node. This mode needs to be |
3 | later tries to access things that we've already freed in the error path. | 3 | explicitly supported by the export type (so that it doesn't perform any |
4 | operations that are forbidden for inactive nodes), so this patch alone | ||
5 | doesn't allow this option to be successfully used yet. | ||
4 | 6 | ||
5 | Fixes: 77542d431491788d1e8e79d93ce10172ef207775 | ||
6 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 7 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
7 | Message-Id: <20210429171316.162022-2-kwolf@redhat.com> | 8 | Acked-by: Fabiano Rosas <farosas@suse.de> |
8 | Reviewed-by: Michael S. Tsirkin <mst@redhat.com> | ||
9 | Reviewed-by: Eric Blake <eblake@redhat.com> | 9 | Reviewed-by: Eric Blake <eblake@redhat.com> |
10 | Acked-by: Raphael Norwitz <raphael.norwitz@nutanix.com> | 10 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> |
11 | Message-ID: <20250204211407.381505-13-kwolf@redhat.com> | ||
11 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
12 | --- | 13 | --- |
13 | hw/block/vhost-user-blk.c | 4 +--- | 14 | qapi/block-export.json | 10 +++++++++- |
14 | 1 file changed, 1 insertion(+), 3 deletions(-) | 15 | include/block/export.h | 3 +++ |
16 | block/export/export.c | 31 +++++++++++++++++++++---------- | ||
17 | 3 files changed, 33 insertions(+), 11 deletions(-) | ||
15 | 18 | ||
16 | diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c | 19 | diff --git a/qapi/block-export.json b/qapi/block-export.json |
17 | index XXXXXXX..XXXXXXX 100644 | 20 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/hw/block/vhost-user-blk.c | 21 | --- a/qapi/block-export.json |
19 | +++ b/hw/block/vhost-user-blk.c | 22 | +++ b/qapi/block-export.json |
20 | @@ -XXX,XX +XXX,XX @@ static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp) | 23 | @@ -XXX,XX +XXX,XX @@ |
24 | # cannot be moved to the iothread. The default is false. | ||
25 | # (since: 5.2) | ||
26 | # | ||
27 | +# @allow-inactive: If true, the export allows the exported node to be inactive. | ||
28 | +# If it is created for an inactive block node, the node remains inactive. If | ||
29 | +# the export type doesn't support running on an inactive node, an error is | ||
30 | +# returned. If false, inactive block nodes are automatically activated before | ||
31 | +# creating the export and trying to inactivate them later fails. | ||
32 | +# (since: 10.0; default: false) | ||
33 | +# | ||
34 | # Since: 4.2 | ||
35 | ## | ||
36 | { 'union': 'BlockExportOptions', | ||
37 | @@ -XXX,XX +XXX,XX @@ | ||
38 | '*iothread': 'str', | ||
39 | 'node-name': 'str', | ||
40 | '*writable': 'bool', | ||
41 | - '*writethrough': 'bool' }, | ||
42 | + '*writethrough': 'bool', | ||
43 | + '*allow-inactive': 'bool' }, | ||
44 | 'discriminator': 'type', | ||
45 | 'data': { | ||
46 | 'nbd': 'BlockExportOptionsNbd', | ||
47 | diff --git a/include/block/export.h b/include/block/export.h | ||
48 | index XXXXXXX..XXXXXXX 100644 | ||
49 | --- a/include/block/export.h | ||
50 | +++ b/include/block/export.h | ||
51 | @@ -XXX,XX +XXX,XX @@ typedef struct BlockExportDriver { | ||
52 | */ | ||
53 | size_t instance_size; | ||
54 | |||
55 | + /* True if the export type supports running on an inactive node */ | ||
56 | + bool supports_inactive; | ||
57 | + | ||
58 | /* Creates and starts a new block export */ | ||
59 | int (*create)(BlockExport *, BlockExportOptions *, Error **); | ||
60 | |||
61 | diff --git a/block/export/export.c b/block/export/export.c | ||
62 | index XXXXXXX..XXXXXXX 100644 | ||
63 | --- a/block/export/export.c | ||
64 | +++ b/block/export/export.c | ||
65 | @@ -XXX,XX +XXX,XX @@ static const BlockExportDriver *blk_exp_find_driver(BlockExportType type) | ||
66 | BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp) | ||
21 | { | 67 | { |
22 | VirtIODevice *vdev = VIRTIO_DEVICE(dev); | 68 | bool fixed_iothread = export->has_fixed_iothread && export->fixed_iothread; |
23 | VHostUserBlk *s = VHOST_USER_BLK(vdev); | 69 | + bool allow_inactive = export->has_allow_inactive && export->allow_inactive; |
24 | - Error *err = NULL; | 70 | const BlockExportDriver *drv; |
25 | int i, ret; | 71 | BlockExport *exp = NULL; |
26 | 72 | BlockDriverState *bs; | |
27 | if (!s->chardev.chr) { | 73 | @@ -XXX,XX +XXX,XX @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp) |
28 | @@ -XXX,XX +XXX,XX @@ static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp) | 74 | } |
29 | NULL, true); | ||
30 | |||
31 | reconnect: | ||
32 | - if (qemu_chr_fe_wait_connected(&s->chardev, &err) < 0) { | ||
33 | - error_report_err(err); | ||
34 | + if (qemu_chr_fe_wait_connected(&s->chardev, errp) < 0) { | ||
35 | goto virtio_err; | ||
36 | } | 75 | } |
37 | 76 | ||
77 | - /* | ||
78 | - * Block exports are used for non-shared storage migration. Make sure | ||
79 | - * that BDRV_O_INACTIVE is cleared and the image is ready for write | ||
80 | - * access since the export could be available before migration handover. | ||
81 | - * ctx was acquired in the caller. | ||
82 | - */ | ||
83 | bdrv_graph_rdlock_main_loop(); | ||
84 | - ret = bdrv_activate(bs, errp); | ||
85 | - if (ret < 0) { | ||
86 | - bdrv_graph_rdunlock_main_loop(); | ||
87 | - goto fail; | ||
88 | + if (allow_inactive) { | ||
89 | + if (!drv->supports_inactive) { | ||
90 | + error_setg(errp, "Export type does not support inactive exports"); | ||
91 | + bdrv_graph_rdunlock_main_loop(); | ||
92 | + goto fail; | ||
93 | + } | ||
94 | + } else { | ||
95 | + /* | ||
96 | + * Block exports are used for non-shared storage migration. Make sure | ||
97 | + * that BDRV_O_INACTIVE is cleared and the image is ready for write | ||
98 | + * access since the export could be available before migration handover. | ||
99 | + */ | ||
100 | + ret = bdrv_activate(bs, errp); | ||
101 | + if (ret < 0) { | ||
102 | + bdrv_graph_rdunlock_main_loop(); | ||
103 | + goto fail; | ||
104 | + } | ||
105 | } | ||
106 | bdrv_graph_rdunlock_main_loop(); | ||
107 | |||
108 | @@ -XXX,XX +XXX,XX @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp) | ||
109 | if (!fixed_iothread) { | ||
110 | blk_set_allow_aio_context_change(blk, true); | ||
111 | } | ||
112 | + if (allow_inactive) { | ||
113 | + blk_set_force_allow_inactivate(blk); | ||
114 | + } | ||
115 | |||
116 | ret = blk_insert_bs(blk, bs, errp); | ||
117 | if (ret < 0) { | ||
38 | -- | 118 | -- |
39 | 2.30.2 | 119 | 2.48.1 |
40 | |||
41 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | In order to support running an NBD export on inactive nodes, we must | ||
2 | make sure to return errors for any operations that aren't allowed on | ||
3 | inactive nodes. Reads are the only operation we know we need for | ||
4 | inactive images, so to err on the side of caution, return errors for | ||
5 | everything else, even if some operations could possibly be okay. | ||
1 | 6 | ||
7 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
8 | Acked-by: Fabiano Rosas <farosas@suse.de> | ||
9 | Message-ID: <20250204211407.381505-14-kwolf@redhat.com> | ||
10 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
11 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
13 | --- | ||
14 | nbd/server.c | 17 +++++++++++++++++ | ||
15 | 1 file changed, 17 insertions(+) | ||
16 | |||
17 | diff --git a/nbd/server.c b/nbd/server.c | ||
18 | index XXXXXXX..XXXXXXX 100644 | ||
19 | --- a/nbd/server.c | ||
20 | +++ b/nbd/server.c | ||
21 | @@ -XXX,XX +XXX,XX @@ static void nbd_export_delete(BlockExport *blk_exp) | ||
22 | const BlockExportDriver blk_exp_nbd = { | ||
23 | .type = BLOCK_EXPORT_TYPE_NBD, | ||
24 | .instance_size = sizeof(NBDExport), | ||
25 | + .supports_inactive = true, | ||
26 | .create = nbd_export_create, | ||
27 | .delete = nbd_export_delete, | ||
28 | .request_shutdown = nbd_export_request_shutdown, | ||
29 | @@ -XXX,XX +XXX,XX @@ static coroutine_fn int nbd_handle_request(NBDClient *client, | ||
30 | NBDExport *exp = client->exp; | ||
31 | char *msg; | ||
32 | size_t i; | ||
33 | + bool inactive; | ||
34 | + | ||
35 | + WITH_GRAPH_RDLOCK_GUARD() { | ||
36 | + inactive = bdrv_is_inactive(blk_bs(exp->common.blk)); | ||
37 | + if (inactive) { | ||
38 | + switch (request->type) { | ||
39 | + case NBD_CMD_READ: | ||
40 | + /* These commands are allowed on inactive nodes */ | ||
41 | + break; | ||
42 | + default: | ||
43 | + /* Return an error for the rest */ | ||
44 | + return nbd_send_generic_reply(client, request, -EPERM, | ||
45 | + "export is inactive", errp); | ||
46 | + } | ||
47 | + } | ||
48 | + } | ||
49 | |||
50 | switch (request->type) { | ||
51 | case NBD_CMD_CACHE: | ||
52 | -- | ||
53 | 2.48.1 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | The open-coded form of this filter has been copied into enough tests | ||
2 | that it's better to move it into iotests.py. | ||
1 | 3 | ||
4 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
5 | Acked-by: Fabiano Rosas <farosas@suse.de> | ||
6 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
7 | Message-ID: <20250204211407.381505-15-kwolf@redhat.com> | ||
8 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
9 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
10 | --- | ||
11 | tests/qemu-iotests/iotests.py | 4 ++++ | ||
12 | tests/qemu-iotests/041 | 4 +--- | ||
13 | tests/qemu-iotests/165 | 4 +--- | ||
14 | tests/qemu-iotests/tests/copy-before-write | 3 +-- | ||
15 | tests/qemu-iotests/tests/migrate-bitmaps-test | 7 +++---- | ||
16 | 5 files changed, 10 insertions(+), 12 deletions(-) | ||
17 | |||
18 | diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py | ||
19 | index XXXXXXX..XXXXXXX 100644 | ||
20 | --- a/tests/qemu-iotests/iotests.py | ||
21 | +++ b/tests/qemu-iotests/iotests.py | ||
22 | @@ -XXX,XX +XXX,XX @@ def _filter(_key, value): | ||
23 | def filter_nbd_exports(output: str) -> str: | ||
24 | return re.sub(r'((min|opt|max) block): [0-9]+', r'\1: XXX', output) | ||
25 | |||
26 | +def filter_qtest(output: str) -> str: | ||
27 | + output = re.sub(r'^\[I \d+\.\d+\] OPENED\n', '', output) | ||
28 | + output = re.sub(r'\n?\[I \+\d+\.\d+\] CLOSED\n?$', '', output) | ||
29 | + return output | ||
30 | |||
31 | Msg = TypeVar('Msg', Dict[str, Any], List[Any], str) | ||
32 | |||
33 | diff --git a/tests/qemu-iotests/041 b/tests/qemu-iotests/041 | ||
34 | index XXXXXXX..XXXXXXX 100755 | ||
35 | --- a/tests/qemu-iotests/041 | ||
36 | +++ b/tests/qemu-iotests/041 | ||
37 | @@ -XXX,XX +XXX,XX @@ class TestRepairQuorum(iotests.QMPTestCase): | ||
38 | |||
39 | # Check the full error message now | ||
40 | self.vm.shutdown() | ||
41 | - log = self.vm.get_log() | ||
42 | - log = re.sub(r'^\[I \d+\.\d+\] OPENED\n', '', log) | ||
43 | + log = iotests.filter_qtest(self.vm.get_log()) | ||
44 | log = re.sub(r'^Formatting.*\n', '', log) | ||
45 | - log = re.sub(r'\n\[I \+\d+\.\d+\] CLOSED\n?$', '', log) | ||
46 | log = re.sub(r'^%s: ' % os.path.basename(iotests.qemu_prog), '', log) | ||
47 | |||
48 | self.assertEqual(log, | ||
49 | diff --git a/tests/qemu-iotests/165 b/tests/qemu-iotests/165 | ||
50 | index XXXXXXX..XXXXXXX 100755 | ||
51 | --- a/tests/qemu-iotests/165 | ||
52 | +++ b/tests/qemu-iotests/165 | ||
53 | @@ -XXX,XX +XXX,XX @@ class TestPersistentDirtyBitmap(iotests.QMPTestCase): | ||
54 | self.vm.shutdown() | ||
55 | |||
56 | #catch 'Persistent bitmaps are lost' possible error | ||
57 | - log = self.vm.get_log() | ||
58 | - log = re.sub(r'^\[I \d+\.\d+\] OPENED\n', '', log) | ||
59 | - log = re.sub(r'\[I \+\d+\.\d+\] CLOSED\n?$', '', log) | ||
60 | + log = iotests.filter_qtest(self.vm.get_log()) | ||
61 | if log: | ||
62 | print(log) | ||
63 | |||
64 | diff --git a/tests/qemu-iotests/tests/copy-before-write b/tests/qemu-iotests/tests/copy-before-write | ||
65 | index XXXXXXX..XXXXXXX 100755 | ||
66 | --- a/tests/qemu-iotests/tests/copy-before-write | ||
67 | +++ b/tests/qemu-iotests/tests/copy-before-write | ||
68 | @@ -XXX,XX +XXX,XX @@ class TestCbwError(iotests.QMPTestCase): | ||
69 | |||
70 | self.vm.shutdown() | ||
71 | log = self.vm.get_log() | ||
72 | - log = re.sub(r'^\[I \d+\.\d+\] OPENED\n', '', log) | ||
73 | - log = re.sub(r'\[I \+\d+\.\d+\] CLOSED\n?$', '', log) | ||
74 | + log = iotests.filter_qtest(log) | ||
75 | log = iotests.filter_qemu_io(log) | ||
76 | return log | ||
77 | |||
78 | diff --git a/tests/qemu-iotests/tests/migrate-bitmaps-test b/tests/qemu-iotests/tests/migrate-bitmaps-test | ||
79 | index XXXXXXX..XXXXXXX 100755 | ||
80 | --- a/tests/qemu-iotests/tests/migrate-bitmaps-test | ||
81 | +++ b/tests/qemu-iotests/tests/migrate-bitmaps-test | ||
82 | @@ -XXX,XX +XXX,XX @@ class TestDirtyBitmapMigration(iotests.QMPTestCase): | ||
83 | |||
84 | # catch 'Could not reopen qcow2 layer: Bitmap already exists' | ||
85 | # possible error | ||
86 | - log = self.vm_a.get_log() | ||
87 | - log = re.sub(r'^\[I \d+\.\d+\] OPENED\n', '', log) | ||
88 | - log = re.sub(r'^(wrote .* bytes at offset .*\n.*KiB.*ops.*sec.*\n){3}', | ||
89 | + log = iotests.filter_qtest(self.vm_a.get_log()) | ||
90 | + log = re.sub(r'^(wrote .* bytes at offset .*\n' | ||
91 | + r'.*KiB.*ops.*sec.*\n?){3}', | ||
92 | '', log) | ||
93 | - log = re.sub(r'\[I \+\d+\.\d+\] CLOSED\n?$', '', log) | ||
94 | self.assertEqual(log, '') | ||
95 | |||
96 | # test that bitmap is still persistent | ||
97 | -- | ||
98 | 2.48.1 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Test that it's possible to migrate a VM that uses an image on shared | ||
2 | storage through qemu-storage-daemon. | ||
1 | 3 | ||
4 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
5 | Acked-by: Fabiano Rosas <farosas@suse.de> | ||
6 | Reviewed-by: Eric Blake <eblake@redhat.com> | ||
7 | Message-ID: <20250204211407.381505-16-kwolf@redhat.com> | ||
8 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
9 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
10 | --- | ||
11 | tests/qemu-iotests/tests/qsd-migrate | 140 +++++++++++++++++++++++ | ||
12 | tests/qemu-iotests/tests/qsd-migrate.out | 59 ++++++++++ | ||
13 | 2 files changed, 199 insertions(+) | ||
14 | create mode 100755 tests/qemu-iotests/tests/qsd-migrate | ||
15 | create mode 100644 tests/qemu-iotests/tests/qsd-migrate.out | ||
16 | |||
17 | diff --git a/tests/qemu-iotests/tests/qsd-migrate b/tests/qemu-iotests/tests/qsd-migrate | ||
18 | new file mode 100755 | ||
19 | index XXXXXXX..XXXXXXX | ||
20 | --- /dev/null | ||
21 | +++ b/tests/qemu-iotests/tests/qsd-migrate | ||
22 | @@ -XXX,XX +XXX,XX @@ | ||
23 | +#!/usr/bin/env python3 | ||
24 | +# group: rw quick | ||
25 | +# | ||
26 | +# Copyright (C) Red Hat, Inc. | ||
27 | +# | ||
28 | +# This program is free software; you can redistribute it and/or modify | ||
29 | +# it under the terms of the GNU General Public License as published by | ||
30 | +# the Free Software Foundation; either version 2 of the License, or | ||
31 | +# (at your option) any later version. | ||
32 | +# | ||
33 | +# This program is distributed in the hope that it will be useful, | ||
34 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
35 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
36 | +# GNU General Public License for more details. | ||
37 | +# | ||
38 | +# You should have received a copy of the GNU General Public License | ||
39 | +# along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
40 | +# | ||
41 | +# Creator/Owner: Kevin Wolf <kwolf@redhat.com> | ||
42 | + | ||
43 | +import iotests | ||
44 | + | ||
45 | +from iotests import filter_qemu_io, filter_qtest | ||
46 | + | ||
47 | +iotests.script_initialize(supported_fmts=['generic'], | ||
48 | + supported_protocols=['file'], | ||
49 | + supported_platforms=['linux']) | ||
50 | + | ||
51 | +with iotests.FilePath('disk.img') as path, \ | ||
52 | + iotests.FilePath('nbd-src.sock', base_dir=iotests.sock_dir) as nbd_src, \ | ||
53 | + iotests.FilePath('nbd-dst.sock', base_dir=iotests.sock_dir) as nbd_dst, \ | ||
54 | + iotests.FilePath('migrate.sock', base_dir=iotests.sock_dir) as mig_sock, \ | ||
55 | + iotests.VM(path_suffix="-src") as vm_src, \ | ||
56 | + iotests.VM(path_suffix="-dst") as vm_dst: | ||
57 | + | ||
58 | + img_size = '10M' | ||
59 | + | ||
60 | + iotests.log('Preparing disk...') | ||
61 | + iotests.qemu_img_create('-f', iotests.imgfmt, path, img_size) | ||
62 | + | ||
63 | + iotests.log('Launching source QSD...') | ||
64 | + qsd_src = iotests.QemuStorageDaemon( | ||
65 | + '--blockdev', f'file,node-name=disk-file,filename={path}', | ||
66 | + '--blockdev', f'{iotests.imgfmt},file=disk-file,node-name=disk-fmt', | ||
67 | + '--nbd-server', f'addr.type=unix,addr.path={nbd_src}', | ||
68 | + '--export', 'nbd,id=exp0,node-name=disk-fmt,writable=true,' | ||
69 | + 'allow-inactive=true', | ||
70 | + qmp=True, | ||
71 | + ) | ||
72 | + | ||
73 | + iotests.log('Launching source VM...') | ||
74 | + vm_src.add_args('-blockdev', f'nbd,node-name=disk,server.type=unix,' | ||
75 | + f'server.path={nbd_src},export=disk-fmt') | ||
76 | + vm_src.add_args('-device', 'virtio-blk,drive=disk,id=virtio0') | ||
77 | + vm_src.launch() | ||
78 | + | ||
79 | + iotests.log('Launching destination QSD...') | ||
80 | + qsd_dst = iotests.QemuStorageDaemon( | ||
81 | + '--blockdev', f'file,node-name=disk-file,filename={path},active=off', | ||
82 | + '--blockdev', f'{iotests.imgfmt},file=disk-file,node-name=disk-fmt,' | ||
83 | + f'active=off', | ||
84 | + '--nbd-server', f'addr.type=unix,addr.path={nbd_dst}', | ||
85 | + '--export', 'nbd,id=exp0,node-name=disk-fmt,writable=true,' | ||
86 | + 'allow-inactive=true', | ||
87 | + qmp=True, | ||
88 | + instance_id='b', | ||
89 | + ) | ||
90 | + | ||
91 | + iotests.log('Launching destination VM...') | ||
92 | + vm_dst.add_args('-blockdev', f'nbd,node-name=disk,server.type=unix,' | ||
93 | + f'server.path={nbd_dst},export=disk-fmt') | ||
94 | + vm_dst.add_args('-device', 'virtio-blk,drive=disk,id=virtio0') | ||
95 | + vm_dst.add_args('-incoming', f'unix:{mig_sock}') | ||
96 | + vm_dst.launch() | ||
97 | + | ||
98 | + iotests.log('\nTest I/O on the source') | ||
99 | + vm_src.hmp_qemu_io('virtio0/virtio-backend', 'write -P 0x11 0 4k', | ||
100 | + use_log=True, qdev=True) | ||
101 | + vm_src.hmp_qemu_io('virtio0/virtio-backend', 'read -P 0x11 0 4k', | ||
102 | + use_log=True, qdev=True) | ||
103 | + | ||
104 | + iotests.log('\nStarting migration...') | ||
105 | + | ||
106 | + mig_caps = [ | ||
107 | + {'capability': 'events', 'state': True}, | ||
108 | + {'capability': 'pause-before-switchover', 'state': True}, | ||
109 | + ] | ||
110 | + vm_src.qmp_log('migrate-set-capabilities', capabilities=mig_caps) | ||
111 | + vm_dst.qmp_log('migrate-set-capabilities', capabilities=mig_caps) | ||
112 | + vm_src.qmp_log('migrate', uri=f'unix:{mig_sock}', | ||
113 | + filters=[iotests.filter_qmp_testfiles]) | ||
114 | + | ||
115 | + vm_src.event_wait('MIGRATION', | ||
116 | + match={'data': {'status': 'pre-switchover'}}) | ||
117 | + | ||
118 | + iotests.log('\nPre-switchover: Reconfigure QSD instances') | ||
119 | + | ||
120 | + iotests.log(qsd_src.qmp('blockdev-set-active', {'active': False})) | ||
121 | + | ||
122 | + # Reading is okay from both sides while the image is inactive. Note that | ||
123 | + # the destination may have stale data until it activates the image, though. | ||
124 | + vm_src.hmp_qemu_io('virtio0/virtio-backend', 'read -P 0x11 0 4k', | ||
125 | + use_log=True, qdev=True) | ||
126 | + vm_dst.hmp_qemu_io('virtio0/virtio-backend', 'read 0 4k', | ||
127 | + use_log=True, qdev=True) | ||
128 | + | ||
129 | + iotests.log(qsd_dst.qmp('blockdev-set-active', {'active': True})) | ||
130 | + | ||
131 | + iotests.log('\nCompleting migration...') | ||
132 | + | ||
133 | + vm_src.qmp_log('migrate-continue', state='pre-switchover') | ||
134 | + vm_dst.event_wait('MIGRATION', match={'data': {'status': 'completed'}}) | ||
135 | + | ||
136 | + iotests.log('\nTest I/O on the destination') | ||
137 | + | ||
138 | + # Now the destination must see what the source wrote | ||
139 | + vm_dst.hmp_qemu_io('virtio0/virtio-backend', 'read -P 0x11 0 4k', | ||
140 | + use_log=True, qdev=True) | ||
141 | + | ||
142 | + # And be able to overwrite it | ||
143 | + vm_dst.hmp_qemu_io('virtio0/virtio-backend', 'write -P 0x22 0 4k', | ||
144 | + use_log=True, qdev=True) | ||
145 | + vm_dst.hmp_qemu_io('virtio0/virtio-backend', 'read -P 0x22 0 4k', | ||
146 | + use_log=True, qdev=True) | ||
147 | + | ||
148 | + iotests.log('\nDone') | ||
149 | + | ||
150 | + vm_src.shutdown() | ||
151 | + iotests.log('\n--- vm_src log ---') | ||
152 | + log = vm_src.get_log() | ||
153 | + if log: | ||
154 | + iotests.log(log, [filter_qtest, filter_qemu_io]) | ||
155 | + qsd_src.stop() | ||
156 | + | ||
157 | + vm_dst.shutdown() | ||
158 | + iotests.log('\n--- vm_dst log ---') | ||
159 | + log = vm_dst.get_log() | ||
160 | + if log: | ||
161 | + iotests.log(log, [filter_qtest, filter_qemu_io]) | ||
162 | + qsd_dst.stop() | ||
163 | diff --git a/tests/qemu-iotests/tests/qsd-migrate.out b/tests/qemu-iotests/tests/qsd-migrate.out | ||
164 | new file mode 100644 | ||
165 | index XXXXXXX..XXXXXXX | ||
166 | --- /dev/null | ||
167 | +++ b/tests/qemu-iotests/tests/qsd-migrate.out | ||
168 | @@ -XXX,XX +XXX,XX @@ | ||
169 | +Preparing disk... | ||
170 | +Launching source QSD... | ||
171 | +Launching source VM... | ||
172 | +Launching destination QSD... | ||
173 | +Launching destination VM... | ||
174 | + | ||
175 | +Test I/O on the source | ||
176 | +{"execute": "human-monitor-command", "arguments": {"command-line": "qemu-io -d virtio0/virtio-backend \"write -P 0x11 0 4k\""}} | ||
177 | +{"return": ""} | ||
178 | +{"execute": "human-monitor-command", "arguments": {"command-line": "qemu-io -d virtio0/virtio-backend \"read -P 0x11 0 4k\""}} | ||
179 | +{"return": ""} | ||
180 | + | ||
181 | +Starting migration... | ||
182 | +{"execute": "migrate-set-capabilities", "arguments": {"capabilities": [{"capability": "events", "state": true}, {"capability": "pause-before-switchover", "state": true}]}} | ||
183 | +{"return": {}} | ||
184 | +{"execute": "migrate-set-capabilities", "arguments": {"capabilities": [{"capability": "events", "state": true}, {"capability": "pause-before-switchover", "state": true}]}} | ||
185 | +{"return": {}} | ||
186 | +{"execute": "migrate", "arguments": {"uri": "unix:SOCK_DIR/PID-migrate.sock"}} | ||
187 | +{"return": {}} | ||
188 | + | ||
189 | +Pre-switchover: Reconfigure QSD instances | ||
190 | +{"return": {}} | ||
191 | +{"execute": "human-monitor-command", "arguments": {"command-line": "qemu-io -d virtio0/virtio-backend \"read -P 0x11 0 4k\""}} | ||
192 | +{"return": ""} | ||
193 | +{"execute": "human-monitor-command", "arguments": {"command-line": "qemu-io -d virtio0/virtio-backend \"read 0 4k\""}} | ||
194 | +{"return": ""} | ||
195 | +{"return": {}} | ||
196 | + | ||
197 | +Completing migration... | ||
198 | +{"execute": "migrate-continue", "arguments": {"state": "pre-switchover"}} | ||
199 | +{"return": {}} | ||
200 | + | ||
201 | +Test I/O on the destination | ||
202 | +{"execute": "human-monitor-command", "arguments": {"command-line": "qemu-io -d virtio0/virtio-backend \"read -P 0x11 0 4k\""}} | ||
203 | +{"return": ""} | ||
204 | +{"execute": "human-monitor-command", "arguments": {"command-line": "qemu-io -d virtio0/virtio-backend \"write -P 0x22 0 4k\""}} | ||
205 | +{"return": ""} | ||
206 | +{"execute": "human-monitor-command", "arguments": {"command-line": "qemu-io -d virtio0/virtio-backend \"read -P 0x22 0 4k\""}} | ||
207 | +{"return": ""} | ||
208 | + | ||
209 | +Done | ||
210 | + | ||
211 | +--- vm_src log --- | ||
212 | +wrote 4096/4096 bytes at offset 0 | ||
213 | +4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) | ||
214 | +read 4096/4096 bytes at offset 0 | ||
215 | +4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) | ||
216 | +read 4096/4096 bytes at offset 0 | ||
217 | +4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) | ||
218 | + | ||
219 | +--- vm_dst log --- | ||
220 | +read 4096/4096 bytes at offset 0 | ||
221 | +4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) | ||
222 | +read 4096/4096 bytes at offset 0 | ||
223 | +4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) | ||
224 | +wrote 4096/4096 bytes at offset 0 | ||
225 | +4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) | ||
226 | +read 4096/4096 bytes at offset 0 | ||
227 | +4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) | ||
228 | -- | ||
229 | 2.48.1 | diff view generated by jsdifflib |
1 | From: Stefan Hajnoczi <stefanha@redhat.com> | 1 | This tests different types of operations on inactive block nodes |
---|---|---|---|
2 | (including graph changes, block jobs and NBD exports) to make sure that | ||
3 | users manually activating and inactivating nodes doesn't break things. | ||
2 | 4 | ||
3 | The checks in vu_blk_sect_range_ok() assume VIRTIO_BLK_SECTOR_SIZE is | 5 | Support for inactive nodes in other export types will have to come with |
4 | equal to BDRV_SECTOR_SIZE. This is true, but let's add a | 6 | separate test cases because they have different dependencies like blkio |
5 | QEMU_BUILD_BUG_ON() to make it explicit. | 7 | or root permissions and we don't want to disable this basic test when |
8 | they are not fulfilled. | ||
6 | 9 | ||
7 | We might as well check that the request buffer size is a multiple of | 10 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
8 | VIRTIO_BLK_SECTOR_SIZE while we're at it. | 11 | Acked-by: Fabiano Rosas <farosas@suse.de> |
9 | 12 | Message-ID: <20250204211407.381505-17-kwolf@redhat.com> | |
10 | Suggested-by: Max Reitz <mreitz@redhat.com> | ||
11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
12 | Message-Id: <20210331142727.391465-1-stefanha@redhat.com> | ||
13 | Reviewed-by: Eric Blake <eblake@redhat.com> | 13 | Reviewed-by: Eric Blake <eblake@redhat.com> |
14 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
14 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 15 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
15 | --- | 16 | --- |
16 | block/export/vhost-user-blk-server.c | 9 ++++++++- | 17 | tests/qemu-iotests/iotests.py | 4 + |
17 | 1 file changed, 8 insertions(+), 1 deletion(-) | 18 | tests/qemu-iotests/tests/inactive-node-nbd | 303 ++++++++++++++++++ |
19 | .../qemu-iotests/tests/inactive-node-nbd.out | 239 ++++++++++++++ | ||
20 | 3 files changed, 546 insertions(+) | ||
21 | create mode 100755 tests/qemu-iotests/tests/inactive-node-nbd | ||
22 | create mode 100644 tests/qemu-iotests/tests/inactive-node-nbd.out | ||
18 | 23 | ||
19 | diff --git a/block/export/vhost-user-blk-server.c b/block/export/vhost-user-blk-server.c | 24 | diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py |
20 | index XXXXXXX..XXXXXXX 100644 | 25 | index XXXXXXX..XXXXXXX 100644 |
21 | --- a/block/export/vhost-user-blk-server.c | 26 | --- a/tests/qemu-iotests/iotests.py |
22 | +++ b/block/export/vhost-user-blk-server.c | 27 | +++ b/tests/qemu-iotests/iotests.py |
23 | @@ -XXX,XX +XXX,XX @@ static void vu_blk_req_complete(VuBlkReq *req) | 28 | @@ -XXX,XX +XXX,XX @@ def add_incoming(self, addr): |
24 | static bool vu_blk_sect_range_ok(VuBlkExport *vexp, uint64_t sector, | 29 | self._args.append(addr) |
25 | size_t size) | 30 | return self |
26 | { | 31 | |
27 | - uint64_t nb_sectors = size >> BDRV_SECTOR_BITS; | 32 | + def add_paused(self): |
28 | + uint64_t nb_sectors; | 33 | + self._args.append('-S') |
29 | uint64_t total_sectors; | 34 | + return self |
30 | 35 | + | |
31 | + if (size % VIRTIO_BLK_SECTOR_SIZE) { | 36 | def hmp(self, command_line: str, use_log: bool = False) -> QMPMessage: |
32 | + return false; | 37 | cmd = 'human-monitor-command' |
38 | kwargs: Dict[str, Any] = {'command-line': command_line} | ||
39 | diff --git a/tests/qemu-iotests/tests/inactive-node-nbd b/tests/qemu-iotests/tests/inactive-node-nbd | ||
40 | new file mode 100755 | ||
41 | index XXXXXXX..XXXXXXX | ||
42 | --- /dev/null | ||
43 | +++ b/tests/qemu-iotests/tests/inactive-node-nbd | ||
44 | @@ -XXX,XX +XXX,XX @@ | ||
45 | +#!/usr/bin/env python3 | ||
46 | +# group: rw quick | ||
47 | +# | ||
48 | +# Copyright (C) Red Hat, Inc. | ||
49 | +# | ||
50 | +# This program is free software; you can redistribute it and/or modify | ||
51 | +# it under the terms of the GNU General Public License as published by | ||
52 | +# the Free Software Foundation; either version 2 of the License, or | ||
53 | +# (at your option) any later version. | ||
54 | +# | ||
55 | +# This program is distributed in the hope that it will be useful, | ||
56 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
57 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
58 | +# GNU General Public License for more details. | ||
59 | +# | ||
60 | +# You should have received a copy of the GNU General Public License | ||
61 | +# along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
62 | +# | ||
63 | +# Creator/Owner: Kevin Wolf <kwolf@redhat.com> | ||
64 | + | ||
65 | +import iotests | ||
66 | + | ||
67 | +from iotests import QemuIoInteractive | ||
68 | +from iotests import filter_qemu_io, filter_qtest, filter_qmp_testfiles | ||
69 | + | ||
70 | +iotests.script_initialize(supported_fmts=['generic'], | ||
71 | + supported_protocols=['file'], | ||
72 | + supported_platforms=['linux']) | ||
73 | + | ||
74 | +def get_export(node_name='disk-fmt', allow_inactive=None): | ||
75 | + exp = { | ||
76 | + 'id': 'exp0', | ||
77 | + 'type': 'nbd', | ||
78 | + 'node-name': node_name, | ||
79 | + 'writable': True, | ||
33 | + } | 80 | + } |
34 | + | 81 | + |
35 | + nb_sectors = size >> VIRTIO_BLK_SECTOR_BITS; | 82 | + if allow_inactive is not None: |
36 | + | 83 | + exp['allow-inactive'] = allow_inactive |
37 | + QEMU_BUILD_BUG_ON(BDRV_SECTOR_SIZE != VIRTIO_BLK_SECTOR_SIZE); | 84 | + |
38 | if (nb_sectors > BDRV_REQUEST_MAX_SECTORS) { | 85 | + return exp |
39 | return false; | 86 | + |
40 | } | 87 | +def node_is_active(_vm, node_name): |
88 | + nodes = _vm.cmd('query-named-block-nodes', flat=True) | ||
89 | + node = next(n for n in nodes if n['node-name'] == node_name) | ||
90 | + return node['active'] | ||
91 | + | ||
92 | +with iotests.FilePath('disk.img') as path, \ | ||
93 | + iotests.FilePath('snap.qcow2') as snap_path, \ | ||
94 | + iotests.FilePath('snap2.qcow2') as snap2_path, \ | ||
95 | + iotests.FilePath('target.img') as target_path, \ | ||
96 | + iotests.FilePath('nbd.sock', base_dir=iotests.sock_dir) as nbd_sock, \ | ||
97 | + iotests.VM() as vm: | ||
98 | + | ||
99 | + img_size = '10M' | ||
100 | + | ||
101 | + iotests.log('Preparing disk...') | ||
102 | + iotests.qemu_img_create('-f', iotests.imgfmt, path, img_size) | ||
103 | + iotests.qemu_img_create('-f', iotests.imgfmt, target_path, img_size) | ||
104 | + | ||
105 | + iotests.qemu_img_create('-f', 'qcow2', '-b', path, '-F', iotests.imgfmt, | ||
106 | + snap_path) | ||
107 | + iotests.qemu_img_create('-f', 'qcow2', '-b', snap_path, '-F', 'qcow2', | ||
108 | + snap2_path) | ||
109 | + | ||
110 | + iotests.log('Launching VM...') | ||
111 | + vm.add_blockdev(f'file,node-name=disk-file,filename={path}') | ||
112 | + vm.add_blockdev(f'{iotests.imgfmt},file=disk-file,node-name=disk-fmt,' | ||
113 | + 'active=off') | ||
114 | + vm.add_blockdev(f'file,node-name=target-file,filename={target_path}') | ||
115 | + vm.add_blockdev(f'{iotests.imgfmt},file=target-file,node-name=target-fmt') | ||
116 | + vm.add_blockdev(f'file,node-name=snap-file,filename={snap_path}') | ||
117 | + vm.add_blockdev(f'file,node-name=snap2-file,filename={snap2_path}') | ||
118 | + | ||
119 | + # Actually running the VM activates all images | ||
120 | + vm.add_paused() | ||
121 | + | ||
122 | + vm.launch() | ||
123 | + vm.qmp_log('nbd-server-start', | ||
124 | + addr={'type': 'unix', 'data':{'path': nbd_sock}}, | ||
125 | + filters=[filter_qmp_testfiles]) | ||
126 | + | ||
127 | + iotests.log('\n=== Creating export of inactive node ===') | ||
128 | + | ||
129 | + iotests.log('\nExports activate nodes without allow-inactive') | ||
130 | + iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt')) | ||
131 | + vm.qmp_log('block-export-add', **get_export()) | ||
132 | + iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt')) | ||
133 | + vm.qmp_log('query-block-exports') | ||
134 | + vm.qmp_log('block-export-del', id='exp0') | ||
135 | + vm.event_wait('BLOCK_EXPORT_DELETED') | ||
136 | + vm.qmp_log('query-block-exports') | ||
137 | + | ||
138 | + iotests.log('\nExports activate nodes with allow-inactive=false') | ||
139 | + vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=False) | ||
140 | + iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt')) | ||
141 | + vm.qmp_log('block-export-add', **get_export(allow_inactive=False)) | ||
142 | + iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt')) | ||
143 | + vm.qmp_log('query-block-exports') | ||
144 | + vm.qmp_log('block-export-del', id='exp0') | ||
145 | + vm.event_wait('BLOCK_EXPORT_DELETED') | ||
146 | + vm.qmp_log('query-block-exports') | ||
147 | + | ||
148 | + iotests.log('\nExport leaves nodes inactive with allow-inactive=true') | ||
149 | + vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=False) | ||
150 | + iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt')) | ||
151 | + vm.qmp_log('block-export-add', **get_export(allow_inactive=True)) | ||
152 | + iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt')) | ||
153 | + vm.qmp_log('query-block-exports') | ||
154 | + vm.qmp_log('block-export-del', id='exp0') | ||
155 | + vm.event_wait('BLOCK_EXPORT_DELETED') | ||
156 | + vm.qmp_log('query-block-exports') | ||
157 | + | ||
158 | + iotests.log('\n=== Inactivating node with existing export ===') | ||
159 | + | ||
160 | + iotests.log('\nInactivating nodes with an export fails without ' | ||
161 | + 'allow-inactive') | ||
162 | + vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=True) | ||
163 | + vm.qmp_log('block-export-add', **get_export(node_name='disk-fmt')) | ||
164 | + vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=False) | ||
165 | + iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt')) | ||
166 | + vm.qmp_log('query-block-exports') | ||
167 | + vm.qmp_log('block-export-del', id='exp0') | ||
168 | + vm.event_wait('BLOCK_EXPORT_DELETED') | ||
169 | + vm.qmp_log('query-block-exports') | ||
170 | + | ||
171 | + iotests.log('\nInactivating nodes with an export fails with ' | ||
172 | + 'allow-inactive=false') | ||
173 | + vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=True) | ||
174 | + vm.qmp_log('block-export-add', | ||
175 | + **get_export(node_name='disk-fmt', allow_inactive=False)) | ||
176 | + vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=False) | ||
177 | + iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt')) | ||
178 | + vm.qmp_log('query-block-exports') | ||
179 | + vm.qmp_log('block-export-del', id='exp0') | ||
180 | + vm.event_wait('BLOCK_EXPORT_DELETED') | ||
181 | + vm.qmp_log('query-block-exports') | ||
182 | + | ||
183 | + iotests.log('\nInactivating nodes with an export works with ' | ||
184 | + 'allow-inactive=true') | ||
185 | + vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=True) | ||
186 | + vm.qmp_log('block-export-add', | ||
187 | + **get_export(node_name='disk-fmt', allow_inactive=True)) | ||
188 | + vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=False) | ||
189 | + iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt')) | ||
190 | + vm.qmp_log('query-block-exports') | ||
191 | + vm.qmp_log('block-export-del', id='exp0') | ||
192 | + vm.event_wait('BLOCK_EXPORT_DELETED') | ||
193 | + vm.qmp_log('query-block-exports') | ||
194 | + | ||
195 | + iotests.log('\n=== Inactive nodes with parent ===') | ||
196 | + | ||
197 | + iotests.log('\nInactivating nodes with an active parent fails') | ||
198 | + vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=True) | ||
199 | + vm.qmp_log('blockdev-set-active', node_name='disk-file', active=False) | ||
200 | + iotests.log('disk-file active: %s' % node_is_active(vm, 'disk-file')) | ||
201 | + iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt')) | ||
202 | + | ||
203 | + iotests.log('\nInactivating nodes with an inactive parent works') | ||
204 | + vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=False) | ||
205 | + vm.qmp_log('blockdev-set-active', node_name='disk-file', active=False) | ||
206 | + iotests.log('disk-file active: %s' % node_is_active(vm, 'disk-file')) | ||
207 | + iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt')) | ||
208 | + | ||
209 | + iotests.log('\nCreating active parent node with an inactive child fails') | ||
210 | + vm.qmp_log('blockdev-add', driver='raw', file='disk-fmt', | ||
211 | + node_name='disk-filter') | ||
212 | + vm.qmp_log('blockdev-add', driver='raw', file='disk-fmt', | ||
213 | + node_name='disk-filter', active=True) | ||
214 | + | ||
215 | + iotests.log('\nCreating inactive parent node with an inactive child works') | ||
216 | + vm.qmp_log('blockdev-add', driver='raw', file='disk-fmt', | ||
217 | + node_name='disk-filter', active=False) | ||
218 | + vm.qmp_log('blockdev-del', node_name='disk-filter') | ||
219 | + | ||
220 | + iotests.log('\n=== Resizing an inactive node ===') | ||
221 | + vm.qmp_log('block_resize', node_name='disk-fmt', size=16*1024*1024) | ||
222 | + | ||
223 | + iotests.log('\n=== Taking a snapshot of an inactive node ===') | ||
224 | + | ||
225 | + iotests.log('\nActive overlay over inactive backing file automatically ' | ||
226 | + 'makes both inactive for compatibility') | ||
227 | + vm.qmp_log('blockdev-add', driver='qcow2', node_name='snap-fmt', | ||
228 | + file='snap-file', backing=None) | ||
229 | + iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt')) | ||
230 | + iotests.log('snap-fmt active: %s' % node_is_active(vm, 'snap-fmt')) | ||
231 | + vm.qmp_log('blockdev-snapshot', node='disk-fmt', overlay='snap-fmt') | ||
232 | + iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt')) | ||
233 | + iotests.log('snap-fmt active: %s' % node_is_active(vm, 'snap-fmt')) | ||
234 | + vm.qmp_log('blockdev-del', node_name='snap-fmt') | ||
235 | + | ||
236 | + iotests.log('\nInactive overlay over inactive backing file just works') | ||
237 | + vm.qmp_log('blockdev-add', driver='qcow2', node_name='snap-fmt', | ||
238 | + file='snap-file', backing=None, active=False) | ||
239 | + vm.qmp_log('blockdev-snapshot', node='disk-fmt', overlay='snap-fmt') | ||
240 | + | ||
241 | + iotests.log('\n=== Block jobs with inactive nodes ===') | ||
242 | + | ||
243 | + iotests.log('\nStreaming into an inactive node') | ||
244 | + vm.qmp_log('block-stream', device='snap-fmt', | ||
245 | + filters=[iotests.filter_qmp_generated_node_ids]) | ||
246 | + | ||
247 | + iotests.log('\nCommitting an inactive root node (active commit)') | ||
248 | + vm.qmp_log('block-commit', job_id='job0', device='snap-fmt', | ||
249 | + filters=[iotests.filter_qmp_generated_node_ids]) | ||
250 | + | ||
251 | + iotests.log('\nCommitting an inactive intermediate node to inactive base') | ||
252 | + vm.qmp_log('blockdev-add', driver='qcow2', node_name='snap2-fmt', | ||
253 | + file='snap2-file', backing='snap-fmt', active=False) | ||
254 | + | ||
255 | + iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt')) | ||
256 | + iotests.log('snap-fmt active: %s' % node_is_active(vm, 'snap-fmt')) | ||
257 | + iotests.log('snap2-fmt active: %s' % node_is_active(vm, 'snap2-fmt')) | ||
258 | + | ||
259 | + vm.qmp_log('block-commit', job_id='job0', device='snap2-fmt', | ||
260 | + top_node='snap-fmt', | ||
261 | + filters=[iotests.filter_qmp_generated_node_ids]) | ||
262 | + | ||
263 | + iotests.log('\nCommitting an inactive intermediate node to active base') | ||
264 | + vm.qmp_log('blockdev-set-active', node_name='disk-fmt', active=True) | ||
265 | + vm.qmp_log('block-commit', job_id='job0', device='snap2-fmt', | ||
266 | + top_node='snap-fmt', | ||
267 | + filters=[iotests.filter_qmp_generated_node_ids]) | ||
268 | + | ||
269 | + iotests.log('\nMirror from inactive source to active target') | ||
270 | + vm.qmp_log('blockdev-mirror', job_id='job0', device='snap2-fmt', | ||
271 | + target='target-fmt', sync='full', | ||
272 | + filters=[iotests.filter_qmp_generated_node_ids]) | ||
273 | + | ||
274 | + iotests.log('\nMirror from active source to inactive target') | ||
275 | + | ||
276 | + iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt')) | ||
277 | + iotests.log('snap-fmt active: %s' % node_is_active(vm, 'snap-fmt')) | ||
278 | + iotests.log('snap2-fmt active: %s' % node_is_active(vm, 'snap2-fmt')) | ||
279 | + iotests.log('target-fmt active: %s' % node_is_active(vm, 'target-fmt')) | ||
280 | + | ||
281 | + # Activating snap2-fmt recursively activates the whole backing chain | ||
282 | + vm.qmp_log('blockdev-set-active', node_name='snap2-fmt', active=True) | ||
283 | + vm.qmp_log('blockdev-set-active', node_name='target-fmt', active=False) | ||
284 | + | ||
285 | + iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt')) | ||
286 | + iotests.log('snap-fmt active: %s' % node_is_active(vm, 'snap-fmt')) | ||
287 | + iotests.log('snap2-fmt active: %s' % node_is_active(vm, 'snap2-fmt')) | ||
288 | + iotests.log('target-fmt active: %s' % node_is_active(vm, 'target-fmt')) | ||
289 | + | ||
290 | + vm.qmp_log('blockdev-mirror', job_id='job0', device='snap2-fmt', | ||
291 | + target='target-fmt', sync='full', | ||
292 | + filters=[iotests.filter_qmp_generated_node_ids]) | ||
293 | + | ||
294 | + iotests.log('\nBackup from active source to inactive target') | ||
295 | + | ||
296 | + vm.qmp_log('blockdev-backup', job_id='job0', device='snap2-fmt', | ||
297 | + target='target-fmt', sync='full', | ||
298 | + filters=[iotests.filter_qmp_generated_node_ids]) | ||
299 | + | ||
300 | + iotests.log('\nBackup from inactive source to active target') | ||
301 | + | ||
302 | + # Inactivating snap2-fmt recursively inactivates the whole backing chain | ||
303 | + vm.qmp_log('blockdev-set-active', node_name='snap2-fmt', active=False) | ||
304 | + vm.qmp_log('blockdev-set-active', node_name='target-fmt', active=True) | ||
305 | + | ||
306 | + iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt')) | ||
307 | + iotests.log('snap-fmt active: %s' % node_is_active(vm, 'snap-fmt')) | ||
308 | + iotests.log('snap2-fmt active: %s' % node_is_active(vm, 'snap2-fmt')) | ||
309 | + iotests.log('target-fmt active: %s' % node_is_active(vm, 'target-fmt')) | ||
310 | + | ||
311 | + vm.qmp_log('blockdev-backup', job_id='job0', device='snap2-fmt', | ||
312 | + target='target-fmt', sync='full', | ||
313 | + filters=[iotests.filter_qmp_generated_node_ids]) | ||
314 | + | ||
315 | + iotests.log('\n=== Accessing export on inactive node ===') | ||
316 | + | ||
317 | + # Use the target node because it has the right image format and isn't the | ||
318 | + # (read-only) backing file of a qcow2 node | ||
319 | + vm.qmp_log('blockdev-set-active', node_name='target-fmt', active=False) | ||
320 | + vm.qmp_log('block-export-add', | ||
321 | + **get_export(node_name='target-fmt', allow_inactive=True)) | ||
322 | + | ||
323 | + # The read should succeed, everything else should fail gracefully | ||
324 | + qemu_io = QemuIoInteractive('-f', 'raw', | ||
325 | + f'nbd+unix:///target-fmt?socket={nbd_sock}') | ||
326 | + iotests.log(qemu_io.cmd('read 0 64k'), filters=[filter_qemu_io]) | ||
327 | + iotests.log(qemu_io.cmd('write 0 64k'), filters=[filter_qemu_io]) | ||
328 | + iotests.log(qemu_io.cmd('write -z 0 64k'), filters=[filter_qemu_io]) | ||
329 | + iotests.log(qemu_io.cmd('write -zu 0 64k'), filters=[filter_qemu_io]) | ||
330 | + iotests.log(qemu_io.cmd('discard 0 64k'), filters=[filter_qemu_io]) | ||
331 | + iotests.log(qemu_io.cmd('flush'), filters=[filter_qemu_io]) | ||
332 | + iotests.log(qemu_io.cmd('map'), filters=[filter_qemu_io]) | ||
333 | + qemu_io.close() | ||
334 | + | ||
335 | + iotests.log('\n=== Resuming VM activates all images ===') | ||
336 | + vm.qmp_log('cont') | ||
337 | + | ||
338 | + iotests.log('disk-fmt active: %s' % node_is_active(vm, 'disk-fmt')) | ||
339 | + iotests.log('snap-fmt active: %s' % node_is_active(vm, 'snap-fmt')) | ||
340 | + iotests.log('snap2-fmt active: %s' % node_is_active(vm, 'snap2-fmt')) | ||
341 | + iotests.log('target-fmt active: %s' % node_is_active(vm, 'target-fmt')) | ||
342 | + | ||
343 | + iotests.log('\nShutting down...') | ||
344 | + vm.shutdown() | ||
345 | + log = vm.get_log() | ||
346 | + if log: | ||
347 | + iotests.log(log, [filter_qtest, filter_qemu_io]) | ||
348 | diff --git a/tests/qemu-iotests/tests/inactive-node-nbd.out b/tests/qemu-iotests/tests/inactive-node-nbd.out | ||
349 | new file mode 100644 | ||
350 | index XXXXXXX..XXXXXXX | ||
351 | --- /dev/null | ||
352 | +++ b/tests/qemu-iotests/tests/inactive-node-nbd.out | ||
353 | @@ -XXX,XX +XXX,XX @@ | ||
354 | +Preparing disk... | ||
355 | +Launching VM... | ||
356 | +{"execute": "nbd-server-start", "arguments": {"addr": {"data": {"path": "SOCK_DIR/PID-nbd.sock"}, "type": "unix"}}} | ||
357 | +{"return": {}} | ||
358 | + | ||
359 | +=== Creating export of inactive node === | ||
360 | + | ||
361 | +Exports activate nodes without allow-inactive | ||
362 | +disk-fmt active: False | ||
363 | +{"execute": "block-export-add", "arguments": {"id": "exp0", "node-name": "disk-fmt", "type": "nbd", "writable": true}} | ||
364 | +{"return": {}} | ||
365 | +disk-fmt active: True | ||
366 | +{"execute": "query-block-exports", "arguments": {}} | ||
367 | +{"return": [{"id": "exp0", "node-name": "disk-fmt", "shutting-down": false, "type": "nbd"}]} | ||
368 | +{"execute": "block-export-del", "arguments": {"id": "exp0"}} | ||
369 | +{"return": {}} | ||
370 | +{"execute": "query-block-exports", "arguments": {}} | ||
371 | +{"return": []} | ||
372 | + | ||
373 | +Exports activate nodes with allow-inactive=false | ||
374 | +{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "disk-fmt"}} | ||
375 | +{"return": {}} | ||
376 | +disk-fmt active: False | ||
377 | +{"execute": "block-export-add", "arguments": {"allow-inactive": false, "id": "exp0", "node-name": "disk-fmt", "type": "nbd", "writable": true}} | ||
378 | +{"return": {}} | ||
379 | +disk-fmt active: True | ||
380 | +{"execute": "query-block-exports", "arguments": {}} | ||
381 | +{"return": [{"id": "exp0", "node-name": "disk-fmt", "shutting-down": false, "type": "nbd"}]} | ||
382 | +{"execute": "block-export-del", "arguments": {"id": "exp0"}} | ||
383 | +{"return": {}} | ||
384 | +{"execute": "query-block-exports", "arguments": {}} | ||
385 | +{"return": []} | ||
386 | + | ||
387 | +Export leaves nodes inactive with allow-inactive=true | ||
388 | +{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "disk-fmt"}} | ||
389 | +{"return": {}} | ||
390 | +disk-fmt active: False | ||
391 | +{"execute": "block-export-add", "arguments": {"allow-inactive": true, "id": "exp0", "node-name": "disk-fmt", "type": "nbd", "writable": true}} | ||
392 | +{"return": {}} | ||
393 | +disk-fmt active: False | ||
394 | +{"execute": "query-block-exports", "arguments": {}} | ||
395 | +{"return": [{"id": "exp0", "node-name": "disk-fmt", "shutting-down": false, "type": "nbd"}]} | ||
396 | +{"execute": "block-export-del", "arguments": {"id": "exp0"}} | ||
397 | +{"return": {}} | ||
398 | +{"execute": "query-block-exports", "arguments": {}} | ||
399 | +{"return": []} | ||
400 | + | ||
401 | +=== Inactivating node with existing export === | ||
402 | + | ||
403 | +Inactivating nodes with an export fails without allow-inactive | ||
404 | +{"execute": "blockdev-set-active", "arguments": {"active": true, "node-name": "disk-fmt"}} | ||
405 | +{"return": {}} | ||
406 | +{"execute": "block-export-add", "arguments": {"id": "exp0", "node-name": "disk-fmt", "type": "nbd", "writable": true}} | ||
407 | +{"return": {}} | ||
408 | +{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "disk-fmt"}} | ||
409 | +{"error": {"class": "GenericError", "desc": "Failed to inactivate node: Operation not permitted"}} | ||
410 | +disk-fmt active: True | ||
411 | +{"execute": "query-block-exports", "arguments": {}} | ||
412 | +{"return": [{"id": "exp0", "node-name": "disk-fmt", "shutting-down": false, "type": "nbd"}]} | ||
413 | +{"execute": "block-export-del", "arguments": {"id": "exp0"}} | ||
414 | +{"return": {}} | ||
415 | +{"execute": "query-block-exports", "arguments": {}} | ||
416 | +{"return": []} | ||
417 | + | ||
418 | +Inactivating nodes with an export fails with allow-inactive=false | ||
419 | +{"execute": "blockdev-set-active", "arguments": {"active": true, "node-name": "disk-fmt"}} | ||
420 | +{"return": {}} | ||
421 | +{"execute": "block-export-add", "arguments": {"allow-inactive": false, "id": "exp0", "node-name": "disk-fmt", "type": "nbd", "writable": true}} | ||
422 | +{"return": {}} | ||
423 | +{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "disk-fmt"}} | ||
424 | +{"error": {"class": "GenericError", "desc": "Failed to inactivate node: Operation not permitted"}} | ||
425 | +disk-fmt active: True | ||
426 | +{"execute": "query-block-exports", "arguments": {}} | ||
427 | +{"return": [{"id": "exp0", "node-name": "disk-fmt", "shutting-down": false, "type": "nbd"}]} | ||
428 | +{"execute": "block-export-del", "arguments": {"id": "exp0"}} | ||
429 | +{"return": {}} | ||
430 | +{"execute": "query-block-exports", "arguments": {}} | ||
431 | +{"return": []} | ||
432 | + | ||
433 | +Inactivating nodes with an export works with allow-inactive=true | ||
434 | +{"execute": "blockdev-set-active", "arguments": {"active": true, "node-name": "disk-fmt"}} | ||
435 | +{"return": {}} | ||
436 | +{"execute": "block-export-add", "arguments": {"allow-inactive": true, "id": "exp0", "node-name": "disk-fmt", "type": "nbd", "writable": true}} | ||
437 | +{"return": {}} | ||
438 | +{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "disk-fmt"}} | ||
439 | +{"return": {}} | ||
440 | +disk-fmt active: False | ||
441 | +{"execute": "query-block-exports", "arguments": {}} | ||
442 | +{"return": [{"id": "exp0", "node-name": "disk-fmt", "shutting-down": false, "type": "nbd"}]} | ||
443 | +{"execute": "block-export-del", "arguments": {"id": "exp0"}} | ||
444 | +{"return": {}} | ||
445 | +{"execute": "query-block-exports", "arguments": {}} | ||
446 | +{"return": []} | ||
447 | + | ||
448 | +=== Inactive nodes with parent === | ||
449 | + | ||
450 | +Inactivating nodes with an active parent fails | ||
451 | +{"execute": "blockdev-set-active", "arguments": {"active": true, "node-name": "disk-fmt"}} | ||
452 | +{"return": {}} | ||
453 | +{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "disk-file"}} | ||
454 | +{"error": {"class": "GenericError", "desc": "Node has active parent node"}} | ||
455 | +disk-file active: True | ||
456 | +disk-fmt active: True | ||
457 | + | ||
458 | +Inactivating nodes with an inactive parent works | ||
459 | +{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "disk-fmt"}} | ||
460 | +{"return": {}} | ||
461 | +{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "disk-file"}} | ||
462 | +{"return": {}} | ||
463 | +disk-file active: False | ||
464 | +disk-fmt active: False | ||
465 | + | ||
466 | +Creating active parent node with an inactive child fails | ||
467 | +{"execute": "blockdev-add", "arguments": {"driver": "raw", "file": "disk-fmt", "node-name": "disk-filter"}} | ||
468 | +{"error": {"class": "GenericError", "desc": "Inactive 'disk-fmt' can't be a file child of active 'disk-filter'"}} | ||
469 | +{"execute": "blockdev-add", "arguments": {"active": true, "driver": "raw", "file": "disk-fmt", "node-name": "disk-filter"}} | ||
470 | +{"error": {"class": "GenericError", "desc": "Inactive 'disk-fmt' can't be a file child of active 'disk-filter'"}} | ||
471 | + | ||
472 | +Creating inactive parent node with an inactive child works | ||
473 | +{"execute": "blockdev-add", "arguments": {"active": false, "driver": "raw", "file": "disk-fmt", "node-name": "disk-filter"}} | ||
474 | +{"return": {}} | ||
475 | +{"execute": "blockdev-del", "arguments": {"node-name": "disk-filter"}} | ||
476 | +{"return": {}} | ||
477 | + | ||
478 | +=== Resizing an inactive node === | ||
479 | +{"execute": "block_resize", "arguments": {"node-name": "disk-fmt", "size": 16777216}} | ||
480 | +{"error": {"class": "GenericError", "desc": "Permission 'resize' unavailable on inactive node"}} | ||
481 | + | ||
482 | +=== Taking a snapshot of an inactive node === | ||
483 | + | ||
484 | +Active overlay over inactive backing file automatically makes both inactive for compatibility | ||
485 | +{"execute": "blockdev-add", "arguments": {"backing": null, "driver": "qcow2", "file": "snap-file", "node-name": "snap-fmt"}} | ||
486 | +{"return": {}} | ||
487 | +disk-fmt active: False | ||
488 | +snap-fmt active: True | ||
489 | +{"execute": "blockdev-snapshot", "arguments": {"node": "disk-fmt", "overlay": "snap-fmt"}} | ||
490 | +{"return": {}} | ||
491 | +disk-fmt active: False | ||
492 | +snap-fmt active: False | ||
493 | +{"execute": "blockdev-del", "arguments": {"node-name": "snap-fmt"}} | ||
494 | +{"return": {}} | ||
495 | + | ||
496 | +Inactive overlay over inactive backing file just works | ||
497 | +{"execute": "blockdev-add", "arguments": {"active": false, "backing": null, "driver": "qcow2", "file": "snap-file", "node-name": "snap-fmt"}} | ||
498 | +{"return": {}} | ||
499 | +{"execute": "blockdev-snapshot", "arguments": {"node": "disk-fmt", "overlay": "snap-fmt"}} | ||
500 | +{"return": {}} | ||
501 | + | ||
502 | +=== Block jobs with inactive nodes === | ||
503 | + | ||
504 | +Streaming into an inactive node | ||
505 | +{"execute": "block-stream", "arguments": {"device": "snap-fmt"}} | ||
506 | +{"error": {"class": "GenericError", "desc": "Could not create node: Inactive 'snap-fmt' can't be a file child of active 'NODE_NAME'"}} | ||
507 | + | ||
508 | +Committing an inactive root node (active commit) | ||
509 | +{"execute": "block-commit", "arguments": {"device": "snap-fmt", "job-id": "job0"}} | ||
510 | +{"error": {"class": "GenericError", "desc": "Inactive 'snap-fmt' can't be a backing child of active 'NODE_NAME'"}} | ||
511 | + | ||
512 | +Committing an inactive intermediate node to inactive base | ||
513 | +{"execute": "blockdev-add", "arguments": {"active": false, "backing": "snap-fmt", "driver": "qcow2", "file": "snap2-file", "node-name": "snap2-fmt"}} | ||
514 | +{"return": {}} | ||
515 | +disk-fmt active: False | ||
516 | +snap-fmt active: False | ||
517 | +snap2-fmt active: False | ||
518 | +{"execute": "block-commit", "arguments": {"device": "snap2-fmt", "job-id": "job0", "top-node": "snap-fmt"}} | ||
519 | +{"error": {"class": "GenericError", "desc": "Inactive 'snap-fmt' can't be a backing child of active 'NODE_NAME'"}} | ||
520 | + | ||
521 | +Committing an inactive intermediate node to active base | ||
522 | +{"execute": "blockdev-set-active", "arguments": {"active": true, "node-name": "disk-fmt"}} | ||
523 | +{"return": {}} | ||
524 | +{"execute": "block-commit", "arguments": {"device": "snap2-fmt", "job-id": "job0", "top-node": "snap-fmt"}} | ||
525 | +{"error": {"class": "GenericError", "desc": "Inactive 'snap-fmt' can't be a backing child of active 'NODE_NAME'"}} | ||
526 | + | ||
527 | +Mirror from inactive source to active target | ||
528 | +{"execute": "blockdev-mirror", "arguments": {"device": "snap2-fmt", "job-id": "job0", "sync": "full", "target": "target-fmt"}} | ||
529 | +{"error": {"class": "GenericError", "desc": "Inactive 'snap2-fmt' can't be a backing child of active 'NODE_NAME'"}} | ||
530 | + | ||
531 | +Mirror from active source to inactive target | ||
532 | +disk-fmt active: True | ||
533 | +snap-fmt active: False | ||
534 | +snap2-fmt active: False | ||
535 | +target-fmt active: True | ||
536 | +{"execute": "blockdev-set-active", "arguments": {"active": true, "node-name": "snap2-fmt"}} | ||
537 | +{"return": {}} | ||
538 | +{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "target-fmt"}} | ||
539 | +{"return": {}} | ||
540 | +disk-fmt active: True | ||
541 | +snap-fmt active: True | ||
542 | +snap2-fmt active: True | ||
543 | +target-fmt active: False | ||
544 | +{"execute": "blockdev-mirror", "arguments": {"device": "snap2-fmt", "job-id": "job0", "sync": "full", "target": "target-fmt"}} | ||
545 | +{"error": {"class": "GenericError", "desc": "Permission 'write' unavailable on inactive node"}} | ||
546 | + | ||
547 | +Backup from active source to inactive target | ||
548 | +{"execute": "blockdev-backup", "arguments": {"device": "snap2-fmt", "job-id": "job0", "sync": "full", "target": "target-fmt"}} | ||
549 | +{"error": {"class": "GenericError", "desc": "Could not create node: Inactive 'target-fmt' can't be a target child of active 'NODE_NAME'"}} | ||
550 | + | ||
551 | +Backup from inactive source to active target | ||
552 | +{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "snap2-fmt"}} | ||
553 | +{"return": {}} | ||
554 | +{"execute": "blockdev-set-active", "arguments": {"active": true, "node-name": "target-fmt"}} | ||
555 | +{"return": {}} | ||
556 | +disk-fmt active: False | ||
557 | +snap-fmt active: False | ||
558 | +snap2-fmt active: False | ||
559 | +target-fmt active: True | ||
560 | +{"execute": "blockdev-backup", "arguments": {"device": "snap2-fmt", "job-id": "job0", "sync": "full", "target": "target-fmt"}} | ||
561 | +{"error": {"class": "GenericError", "desc": "Could not create node: Inactive 'snap2-fmt' can't be a file child of active 'NODE_NAME'"}} | ||
562 | + | ||
563 | +=== Accessing export on inactive node === | ||
564 | +{"execute": "blockdev-set-active", "arguments": {"active": false, "node-name": "target-fmt"}} | ||
565 | +{"return": {}} | ||
566 | +{"execute": "block-export-add", "arguments": {"allow-inactive": true, "id": "exp0", "node-name": "target-fmt", "type": "nbd", "writable": true}} | ||
567 | +{"return": {}} | ||
568 | +read 65536/65536 bytes at offset 0 | ||
569 | +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) | ||
570 | + | ||
571 | +write failed: Operation not permitted | ||
572 | + | ||
573 | +write failed: Operation not permitted | ||
574 | + | ||
575 | +write failed: Operation not permitted | ||
576 | + | ||
577 | +discard failed: Operation not permitted | ||
578 | + | ||
579 | + | ||
580 | +qemu-io: Failed to get allocation status: Operation not permitted | ||
581 | + | ||
582 | + | ||
583 | +=== Resuming VM activates all images === | ||
584 | +{"execute": "cont", "arguments": {}} | ||
585 | +{"return": {}} | ||
586 | +disk-fmt active: True | ||
587 | +snap-fmt active: True | ||
588 | +snap2-fmt active: True | ||
589 | +target-fmt active: True | ||
590 | + | ||
591 | +Shutting down... | ||
592 | + | ||
41 | -- | 593 | -- |
42 | 2.30.2 | 594 | 2.48.1 |
43 | |||
44 | diff view generated by jsdifflib |
1 | From: Stefan Hajnoczi <stefanha@redhat.com> | 1 | From: Stefan Hajnoczi <stefanha@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Exercise input validation code paths in | 3 | BLOCK_OP_TYPE_DATAPLANE prevents BlockDriverState from being used by |
4 | block/export/vhost-user-blk-server.c. | 4 | virtio-blk/virtio-scsi with IOThread. Commit b112a65c52aa ("block: |
5 | declare blockjobs and dataplane friends!") eliminated the main reason | ||
6 | for this blocker in 2014. | ||
7 | |||
8 | Nowadays the block layer supports I/O from multiple AioContexts, so | ||
9 | there is even less reason to block IOThread users. Any legitimate | ||
10 | reasons related to interference would probably also apply to | ||
11 | non-IOThread users. | ||
12 | |||
13 | The only remaining users are bdrv_op_unblock(BLOCK_OP_TYPE_DATAPLANE) | ||
14 | calls after bdrv_op_block_all(). If we remove BLOCK_OP_TYPE_DATAPLANE | ||
15 | their behavior doesn't change. | ||
16 | |||
17 | Existing bdrv_op_block_all() callers that don't explicitly unblock | ||
18 | BLOCK_OP_TYPE_DATAPLANE seem to do so simply because no one bothered to | ||
19 | rather than because it is necessary to keep BLOCK_OP_TYPE_DATAPLANE | ||
20 | blocked. | ||
5 | 21 | ||
6 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 22 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
7 | Message-Id: <20210309094106.196911-5-stefanha@redhat.com> | 23 | Message-ID: <20250203182529.269066-1-stefanha@redhat.com> |
8 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 24 | Reviewed-by: Eric Blake <eblake@redhat.com> |
9 | Message-Id: <20210322092327.150720-4-stefanha@redhat.com> | 25 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
10 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 26 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
11 | --- | 27 | --- |
12 | tests/qtest/vhost-user-blk-test.c | 124 ++++++++++++++++++++++++++++++ | 28 | include/block/block-common.h | 1 - |
13 | 1 file changed, 124 insertions(+) | 29 | block/replication.c | 1 - |
30 | blockjob.c | 2 -- | ||
31 | hw/block/virtio-blk.c | 9 --------- | ||
32 | hw/scsi/virtio-scsi.c | 3 --- | ||
33 | 5 files changed, 16 deletions(-) | ||
14 | 34 | ||
15 | diff --git a/tests/qtest/vhost-user-blk-test.c b/tests/qtest/vhost-user-blk-test.c | 35 | diff --git a/include/block/block-common.h b/include/block/block-common.h |
16 | index XXXXXXX..XXXXXXX 100644 | 36 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/tests/qtest/vhost-user-blk-test.c | 37 | --- a/include/block/block-common.h |
18 | +++ b/tests/qtest/vhost-user-blk-test.c | 38 | +++ b/include/block/block-common.h |
19 | @@ -XXX,XX +XXX,XX @@ static uint64_t virtio_blk_request(QGuestAllocator *alloc, QVirtioDevice *d, | 39 | @@ -XXX,XX +XXX,XX @@ typedef enum BlockOpType { |
20 | return addr; | 40 | BLOCK_OP_TYPE_CHANGE, |
21 | } | 41 | BLOCK_OP_TYPE_COMMIT_SOURCE, |
22 | 42 | BLOCK_OP_TYPE_COMMIT_TARGET, | |
23 | +static void test_invalid_discard_write_zeroes(QVirtioDevice *dev, | 43 | - BLOCK_OP_TYPE_DATAPLANE, |
24 | + QGuestAllocator *alloc, | 44 | BLOCK_OP_TYPE_DRIVE_DEL, |
25 | + QTestState *qts, | 45 | BLOCK_OP_TYPE_EJECT, |
26 | + QVirtQueue *vq, | 46 | BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT, |
27 | + uint32_t type) | 47 | diff --git a/block/replication.c b/block/replication.c |
28 | +{ | 48 | index XXXXXXX..XXXXXXX 100644 |
29 | + QVirtioBlkReq req; | 49 | --- a/block/replication.c |
30 | + struct virtio_blk_discard_write_zeroes dwz_hdr; | 50 | +++ b/block/replication.c |
31 | + struct virtio_blk_discard_write_zeroes dwz_hdr2[2]; | 51 | @@ -XXX,XX +XXX,XX @@ static void replication_start(ReplicationState *rs, ReplicationMode mode, |
32 | + uint64_t req_addr; | 52 | return; |
33 | + uint32_t free_head; | 53 | } |
34 | + uint8_t status; | 54 | bdrv_op_block_all(top_bs, s->blocker); |
35 | + | 55 | - bdrv_op_unblock(top_bs, BLOCK_OP_TYPE_DATAPLANE, s->blocker); |
36 | + /* More than one dwz is not supported */ | 56 | |
37 | + req.type = type; | 57 | bdrv_graph_wrunlock(); |
38 | + req.data = (char *) dwz_hdr2; | 58 | |
39 | + dwz_hdr2[0].sector = 0; | 59 | diff --git a/blockjob.c b/blockjob.c |
40 | + dwz_hdr2[0].num_sectors = 1; | 60 | index XXXXXXX..XXXXXXX 100644 |
41 | + dwz_hdr2[0].flags = 0; | 61 | --- a/blockjob.c |
42 | + dwz_hdr2[1].sector = 1; | 62 | +++ b/blockjob.c |
43 | + dwz_hdr2[1].num_sectors = 1; | 63 | @@ -XXX,XX +XXX,XX @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver, |
44 | + dwz_hdr2[1].flags = 0; | 64 | goto fail; |
45 | + | ||
46 | + virtio_blk_fix_dwz_hdr(dev, &dwz_hdr2[0]); | ||
47 | + virtio_blk_fix_dwz_hdr(dev, &dwz_hdr2[1]); | ||
48 | + | ||
49 | + req_addr = virtio_blk_request(alloc, dev, &req, sizeof(dwz_hdr2)); | ||
50 | + | ||
51 | + free_head = qvirtqueue_add(qts, vq, req_addr, 16, false, true); | ||
52 | + qvirtqueue_add(qts, vq, req_addr + 16, sizeof(dwz_hdr2), false, true); | ||
53 | + qvirtqueue_add(qts, vq, req_addr + 16 + sizeof(dwz_hdr2), 1, true, | ||
54 | + false); | ||
55 | + | ||
56 | + qvirtqueue_kick(qts, dev, vq, free_head); | ||
57 | + | ||
58 | + qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL, | ||
59 | + QVIRTIO_BLK_TIMEOUT_US); | ||
60 | + status = readb(req_addr + 16 + sizeof(dwz_hdr2)); | ||
61 | + g_assert_cmpint(status, ==, VIRTIO_BLK_S_UNSUPP); | ||
62 | + | ||
63 | + guest_free(alloc, req_addr); | ||
64 | + | ||
65 | + /* num_sectors must be less than config->max_write_zeroes_sectors */ | ||
66 | + req.type = type; | ||
67 | + req.data = (char *) &dwz_hdr; | ||
68 | + dwz_hdr.sector = 0; | ||
69 | + dwz_hdr.num_sectors = 0xffffffff; | ||
70 | + dwz_hdr.flags = 0; | ||
71 | + | ||
72 | + virtio_blk_fix_dwz_hdr(dev, &dwz_hdr); | ||
73 | + | ||
74 | + req_addr = virtio_blk_request(alloc, dev, &req, sizeof(dwz_hdr)); | ||
75 | + | ||
76 | + free_head = qvirtqueue_add(qts, vq, req_addr, 16, false, true); | ||
77 | + qvirtqueue_add(qts, vq, req_addr + 16, sizeof(dwz_hdr), false, true); | ||
78 | + qvirtqueue_add(qts, vq, req_addr + 16 + sizeof(dwz_hdr), 1, true, | ||
79 | + false); | ||
80 | + | ||
81 | + qvirtqueue_kick(qts, dev, vq, free_head); | ||
82 | + | ||
83 | + qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL, | ||
84 | + QVIRTIO_BLK_TIMEOUT_US); | ||
85 | + status = readb(req_addr + 16 + sizeof(dwz_hdr)); | ||
86 | + g_assert_cmpint(status, ==, VIRTIO_BLK_S_IOERR); | ||
87 | + | ||
88 | + guest_free(alloc, req_addr); | ||
89 | + | ||
90 | + /* sector must be less than the device capacity */ | ||
91 | + req.type = type; | ||
92 | + req.data = (char *) &dwz_hdr; | ||
93 | + dwz_hdr.sector = TEST_IMAGE_SIZE / 512 + 1; | ||
94 | + dwz_hdr.num_sectors = 1; | ||
95 | + dwz_hdr.flags = 0; | ||
96 | + | ||
97 | + virtio_blk_fix_dwz_hdr(dev, &dwz_hdr); | ||
98 | + | ||
99 | + req_addr = virtio_blk_request(alloc, dev, &req, sizeof(dwz_hdr)); | ||
100 | + | ||
101 | + free_head = qvirtqueue_add(qts, vq, req_addr, 16, false, true); | ||
102 | + qvirtqueue_add(qts, vq, req_addr + 16, sizeof(dwz_hdr), false, true); | ||
103 | + qvirtqueue_add(qts, vq, req_addr + 16 + sizeof(dwz_hdr), 1, true, | ||
104 | + false); | ||
105 | + | ||
106 | + qvirtqueue_kick(qts, dev, vq, free_head); | ||
107 | + | ||
108 | + qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL, | ||
109 | + QVIRTIO_BLK_TIMEOUT_US); | ||
110 | + status = readb(req_addr + 16 + sizeof(dwz_hdr)); | ||
111 | + g_assert_cmpint(status, ==, VIRTIO_BLK_S_IOERR); | ||
112 | + | ||
113 | + guest_free(alloc, req_addr); | ||
114 | + | ||
115 | + /* reserved flag bits must be zero */ | ||
116 | + req.type = type; | ||
117 | + req.data = (char *) &dwz_hdr; | ||
118 | + dwz_hdr.sector = 0; | ||
119 | + dwz_hdr.num_sectors = 1; | ||
120 | + dwz_hdr.flags = ~VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP; | ||
121 | + | ||
122 | + virtio_blk_fix_dwz_hdr(dev, &dwz_hdr); | ||
123 | + | ||
124 | + req_addr = virtio_blk_request(alloc, dev, &req, sizeof(dwz_hdr)); | ||
125 | + | ||
126 | + free_head = qvirtqueue_add(qts, vq, req_addr, 16, false, true); | ||
127 | + qvirtqueue_add(qts, vq, req_addr + 16, sizeof(dwz_hdr), false, true); | ||
128 | + qvirtqueue_add(qts, vq, req_addr + 16 + sizeof(dwz_hdr), 1, true, | ||
129 | + false); | ||
130 | + | ||
131 | + qvirtqueue_kick(qts, dev, vq, free_head); | ||
132 | + | ||
133 | + qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL, | ||
134 | + QVIRTIO_BLK_TIMEOUT_US); | ||
135 | + status = readb(req_addr + 16 + sizeof(dwz_hdr)); | ||
136 | + g_assert_cmpint(status, ==, VIRTIO_BLK_S_UNSUPP); | ||
137 | + | ||
138 | + guest_free(alloc, req_addr); | ||
139 | +} | ||
140 | + | ||
141 | /* Returns the request virtqueue so the caller can perform further tests */ | ||
142 | static QVirtQueue *test_basic(QVirtioDevice *dev, QGuestAllocator *alloc) | ||
143 | { | ||
144 | @@ -XXX,XX +XXX,XX @@ static QVirtQueue *test_basic(QVirtioDevice *dev, QGuestAllocator *alloc) | ||
145 | g_free(data); | ||
146 | |||
147 | guest_free(alloc, req_addr); | ||
148 | + | ||
149 | + test_invalid_discard_write_zeroes(dev, alloc, qts, vq, | ||
150 | + VIRTIO_BLK_T_WRITE_ZEROES); | ||
151 | } | 65 | } |
152 | 66 | ||
153 | if (features & (1u << VIRTIO_BLK_F_DISCARD)) { | 67 | - bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker); |
154 | @@ -XXX,XX +XXX,XX @@ static QVirtQueue *test_basic(QVirtioDevice *dev, QGuestAllocator *alloc) | 68 | - |
155 | g_assert_cmpint(status, ==, 0); | 69 | if (!block_job_set_speed(job, speed, errp)) { |
156 | 70 | goto fail; | |
157 | guest_free(alloc, req_addr); | ||
158 | + | ||
159 | + test_invalid_discard_write_zeroes(dev, alloc, qts, vq, | ||
160 | + VIRTIO_BLK_T_DISCARD); | ||
161 | } | 71 | } |
162 | 72 | diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c | |
163 | if (features & (1u << VIRTIO_F_ANY_LAYOUT)) { | 73 | index XXXXXXX..XXXXXXX 100644 |
74 | --- a/hw/block/virtio-blk.c | ||
75 | +++ b/hw/block/virtio-blk.c | ||
76 | @@ -XXX,XX +XXX,XX @@ static bool virtio_blk_vq_aio_context_init(VirtIOBlock *s, Error **errp) | ||
77 | error_setg(errp, "ioeventfd is required for iothread"); | ||
78 | return false; | ||
79 | } | ||
80 | - | ||
81 | - /* | ||
82 | - * If ioeventfd is (re-)enabled while the guest is running there could | ||
83 | - * be block jobs that can conflict. | ||
84 | - */ | ||
85 | - if (blk_op_is_blocked(conf->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) { | ||
86 | - error_prepend(errp, "cannot start virtio-blk ioeventfd: "); | ||
87 | - return false; | ||
88 | - } | ||
89 | } | ||
90 | |||
91 | s->vq_aio_context = g_new(AioContext *, conf->num_queues); | ||
92 | diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c | ||
93 | index XXXXXXX..XXXXXXX 100644 | ||
94 | --- a/hw/scsi/virtio-scsi.c | ||
95 | +++ b/hw/scsi/virtio-scsi.c | ||
96 | @@ -XXX,XX +XXX,XX @@ static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev, | ||
97 | int ret; | ||
98 | |||
99 | if (s->ctx && !s->dataplane_fenced) { | ||
100 | - if (blk_op_is_blocked(sd->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) { | ||
101 | - return; | ||
102 | - } | ||
103 | ret = blk_set_aio_context(sd->conf.blk, s->ctx, errp); | ||
104 | if (ret < 0) { | ||
105 | return; | ||
164 | -- | 106 | -- |
165 | 2.30.2 | 107 | 2.48.1 |
166 | |||
167 | diff view generated by jsdifflib |