1 | The following changes since commit 2ccad61746ca7de5dd3e25146062264387e43bd4: | 1 | The following changes since commit 52ed34cbddde1cb89b2ac263e758e349a77f21e1: |
---|---|---|---|
2 | 2 | ||
3 | Merge tag 'pull-tcg-20221109' of https://gitlab.com/rth7680/qemu into staging (2022-11-09 13:26:45 -0500) | 3 | Merge tag 'pull-request-2023-06-26' of https://gitlab.com/thuth/qemu into staging (2023-06-26 10:38:41 +0200) |
4 | 4 | ||
5 | are available in the Git repository at: | 5 | are available in the Git repository at: |
6 | 6 | ||
7 | https://repo.or.cz/qemu/kevin.git tags/for-upstream | 7 | https://repo.or.cz/qemu/kevin.git tags/for-upstream |
8 | 8 | ||
9 | for you to fetch changes up to b04af371af685c12970ea93027dc6d8bf86265aa: | 9 | for you to fetch changes up to 17362398ee1a7f04e8006a46333145d8b707fd35: |
10 | 10 | ||
11 | tests/stream-under-throttle: New test (2022-11-11 13:02:43 +0100) | 11 | block: use bdrv_co_debug_event in coroutine context (2023-06-28 09:46:34 +0200) |
12 | 12 | ||
13 | ---------------------------------------------------------------- | 13 | ---------------------------------------------------------------- |
14 | Block layer patches | 14 | Block layer patches |
15 | 15 | ||
16 | - Fix deadlock in graph modification with iothreads | 16 | - Re-enable the graph lock |
17 | - mirror: Fix non-converging cases for active mirror | 17 | - More fixes to coroutine_fn marking |
18 | - qapi: Fix BlockdevOptionsNvmeIoUring @path description | ||
19 | - blkio: Set BlockDriver::has_variable_length to false | ||
20 | 18 | ||
21 | ---------------------------------------------------------------- | 19 | ---------------------------------------------------------------- |
22 | Alberto Faria (2): | 20 | Kevin Wolf (11): |
23 | qapi/block-core: Fix BlockdevOptionsNvmeIoUring @path description | 21 | iotests: Test active commit with iothread and background I/O |
24 | block/blkio: Set BlockDriver::has_variable_length to false | 22 | qdev-properties-system: Lock AioContext for blk_insert_bs() |
23 | test-block-iothread: Lock AioContext for blk_insert_bs() | ||
24 | block: Fix AioContext locking in bdrv_open_child() | ||
25 | block: Fix AioContext locking in bdrv_attach_child_common() | ||
26 | block: Fix AioContext locking in bdrv_reopen_parse_file_or_backing() | ||
27 | block: Fix AioContext locking in bdrv_open_inherit() | ||
28 | block: Fix AioContext locking in bdrv_open_backing_file() | ||
29 | blockjob: Fix AioContext locking in block_job_add_bdrv() | ||
30 | graph-lock: Unlock the AioContext while polling | ||
31 | Revert "graph-lock: Disable locking for now" | ||
25 | 32 | ||
26 | Hanna Reitz (9): | 33 | Paolo Bonzini (12): |
27 | block/mirror: Do not wait for active writes | 34 | file-posix: remove incorrect coroutine_fn calls |
28 | block/mirror: Drop mirror_wait_for_any_operation() | 35 | qed: mark more functions as coroutine_fns and GRAPH_RDLOCK |
29 | block/mirror: Fix NULL s->job in active writes | 36 | vpc: mark more functions as coroutine_fns and GRAPH_RDLOCK |
30 | iotests/151: Test that active mirror progresses | 37 | bochs: mark more functions as coroutine_fns and GRAPH_RDLOCK |
31 | iotests/151: Test active requests on mirror start | 38 | block: mark another function as coroutine_fns and GRAPH_UNLOCKED |
32 | block: Make bdrv_child_get_parent_aio_context I/O | 39 | cloop: mark more functions as coroutine_fns and GRAPH_RDLOCK |
33 | block-backend: Update ctx immediately after root | 40 | dmg: mark more functions as coroutine_fns and GRAPH_RDLOCK |
34 | block: Start/end drain on correct AioContext | 41 | vmdk: mark more functions as coroutine_fns and GRAPH_RDLOCK |
35 | tests/stream-under-throttle: New test | 42 | vhdx: mark more functions as coroutine_fns and GRAPH_RDLOCK |
43 | qcow2: mark more functions as coroutine_fns and GRAPH_RDLOCK | ||
44 | block: use bdrv_co_getlength in coroutine context | ||
45 | block: use bdrv_co_debug_event in coroutine context | ||
36 | 46 | ||
37 | qapi/block-core.json | 2 +- | 47 | block/qcow2.h | 33 +++-- |
38 | include/block/block-global-state.h | 1 - | 48 | block/vhdx.h | 5 +- |
39 | include/block/block-io.h | 2 + | 49 | include/block/block-io.h | 7 ++ |
40 | include/block/block_int-common.h | 4 +- | 50 | include/block/graph-lock.h | 6 +- |
41 | block.c | 2 +- | 51 | block.c | 114 ++++++++++++++++-- |
42 | block/blkio.c | 1 - | 52 | block/bochs.c | 7 +- |
43 | block/block-backend.c | 9 +- | 53 | block/cloop.c | 9 +- |
44 | block/io.c | 6 +- | 54 | block/dmg.c | 21 ++-- |
45 | block/mirror.c | 78 ++++--- | 55 | block/file-posix.c | 29 +++-- |
46 | blockjob.c | 3 +- | 56 | block/graph-lock.c | 43 +++---- |
47 | tests/qemu-iotests/151 | 227 ++++++++++++++++++++- | 57 | block/io.c | 14 +-- |
48 | tests/qemu-iotests/151.out | 4 +- | 58 | block/parallels.c | 4 +- |
49 | tests/qemu-iotests/tests/stream-under-throttle | 121 +++++++++++ | 59 | block/qcow.c | 30 ++--- |
50 | tests/qemu-iotests/tests/stream-under-throttle.out | 5 + | 60 | block/qcow2-bitmap.c | 26 ++-- |
51 | 14 files changed, 424 insertions(+), 41 deletions(-) | 61 | block/qcow2-cluster.c | 24 ++-- |
52 | create mode 100755 tests/qemu-iotests/tests/stream-under-throttle | 62 | block/qcow2-refcount.c | 134 +++++++++++---------- |
53 | create mode 100644 tests/qemu-iotests/tests/stream-under-throttle.out | 63 | block/qcow2.c | 20 +-- |
64 | block/qed-check.c | 5 +- | ||
65 | block/qed-table.c | 6 +- | ||
66 | block/qed.c | 15 +-- | ||
67 | block/raw-format.c | 4 +- | ||
68 | block/vhdx-log.c | 36 +++--- | ||
69 | block/vhdx.c | 73 ++++++----- | ||
70 | block/vmdk.c | 55 ++++----- | ||
71 | block/vpc.c | 52 ++++---- | ||
72 | blockjob.c | 17 ++- | ||
73 | hw/core/qdev-properties-system.c | 8 +- | ||
74 | tests/unit/test-block-iothread.c | 7 +- | ||
75 | tests/qemu-iotests/tests/iothreads-commit-active | 85 +++++++++++++ | ||
76 | .../qemu-iotests/tests/iothreads-commit-active.out | 23 ++++ | ||
77 | 30 files changed, 573 insertions(+), 339 deletions(-) | ||
78 | create mode 100755 tests/qemu-iotests/tests/iothreads-commit-active | ||
79 | create mode 100644 tests/qemu-iotests/tests/iothreads-commit-active.out | diff view generated by jsdifflib |
1 | From: Hanna Reitz <hreitz@redhat.com> | 1 | This is a better regression test for the bugs hidden by commit 80fc5d26 |
---|---|---|---|
2 | ('graph-lock: Disable locking for now'). With that commit reverted, it | ||
3 | hangs instantaneously and reliably for me. | ||
2 | 4 | ||
3 | Test streaming a base image into the top image underneath two throttle | 5 | It is important to have a reliable test like this, because the following |
4 | nodes. This was reported to make qemu 7.1 hang | 6 | commits will set out to fix the actual root cause of the deadlocks and |
5 | (https://gitlab.com/qemu-project/qemu/-/issues/1215), so this serves as | 7 | then finally revert commit 80fc5d26, which was only a stopgap solution. |
6 | a regression test. | ||
7 | 8 | ||
8 | Signed-off-by: Hanna Reitz <hreitz@redhat.com> | 9 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
9 | Message-Id: <20221110160921.33158-1-hreitz@redhat.com> | 10 | Message-ID: <20230605085711.21261-2-kwolf@redhat.com> |
10 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 11 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> |
11 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
12 | --- | 13 | --- |
13 | .../qemu-iotests/tests/stream-under-throttle | 121 ++++++++++++++++++ | 14 | .../tests/iothreads-commit-active | 85 +++++++++++++++++++ |
14 | .../tests/stream-under-throttle.out | 5 + | 15 | .../tests/iothreads-commit-active.out | 23 +++++ |
15 | 2 files changed, 126 insertions(+) | 16 | 2 files changed, 108 insertions(+) |
16 | create mode 100755 tests/qemu-iotests/tests/stream-under-throttle | 17 | create mode 100755 tests/qemu-iotests/tests/iothreads-commit-active |
17 | create mode 100644 tests/qemu-iotests/tests/stream-under-throttle.out | 18 | create mode 100644 tests/qemu-iotests/tests/iothreads-commit-active.out |
18 | 19 | ||
19 | diff --git a/tests/qemu-iotests/tests/stream-under-throttle b/tests/qemu-iotests/tests/stream-under-throttle | 20 | diff --git a/tests/qemu-iotests/tests/iothreads-commit-active b/tests/qemu-iotests/tests/iothreads-commit-active |
20 | new file mode 100755 | 21 | new file mode 100755 |
21 | index XXXXXXX..XXXXXXX | 22 | index XXXXXXX..XXXXXXX |
22 | --- /dev/null | 23 | --- /dev/null |
23 | +++ b/tests/qemu-iotests/tests/stream-under-throttle | 24 | +++ b/tests/qemu-iotests/tests/iothreads-commit-active |
24 | @@ -XXX,XX +XXX,XX @@ | 25 | @@ -XXX,XX +XXX,XX @@ |
25 | +#!/usr/bin/env python3 | 26 | +#!/usr/bin/env python3 |
26 | +# group: rw | 27 | +# group: rw quick auto |
27 | +# | 28 | +# |
28 | +# Test streaming with throttle nodes on top | 29 | +# Copyright (C) 2023 Red Hat, Inc. |
29 | +# | ||
30 | +# Copyright (C) 2022 Red Hat, Inc. | ||
31 | +# | 30 | +# |
32 | +# This program is free software; you can redistribute it and/or modify | 31 | +# This program is free software; you can redistribute it and/or modify |
33 | +# it under the terms of the GNU General Public License as published by | 32 | +# it under the terms of the GNU General Public License as published by |
34 | +# the Free Software Foundation; either version 2 of the License, or | 33 | +# the Free Software Foundation; either version 2 of the License, or |
35 | +# (at your option) any later version. | 34 | +# (at your option) any later version. |
... | ... | ||
40 | +# GNU General Public License for more details. | 39 | +# GNU General Public License for more details. |
41 | +# | 40 | +# |
42 | +# You should have received a copy of the GNU General Public License | 41 | +# You should have received a copy of the GNU General Public License |
43 | +# along with this program. If not, see <http://www.gnu.org/licenses/>. | 42 | +# along with this program. If not, see <http://www.gnu.org/licenses/>. |
44 | +# | 43 | +# |
44 | +# Creator/Owner: Kevin Wolf <kwolf@redhat.com> | ||
45 | + | 45 | + |
46 | +import asyncio | 46 | +import asyncio |
47 | +import os | ||
48 | +from typing import List | ||
49 | +import iotests | 47 | +import iotests |
50 | +from iotests import qemu_img_create, qemu_io | ||
51 | + | 48 | + |
49 | +iotests.script_initialize(supported_fmts=['qcow2'], | ||
50 | + supported_platforms=['linux']) | ||
51 | +iotests.verify_virtio_scsi_pci_or_ccw() | ||
52 | + | 52 | + |
53 | +image_size = 256 * 1024 * 1024 | 53 | +with iotests.FilePath('disk0.img') as img_path, \ |
54 | +base_img = os.path.join(iotests.test_dir, 'base.img') | 54 | + iotests.FilePath('disk0-snap.img') as snap_path, \ |
55 | +top_img = os.path.join(iotests.test_dir, 'top.img') | 55 | + iotests.FilePath('mirror-src.img') as src_path, \ |
56 | + iotests.FilePath('mirror-dst.img') as dst_path, \ | ||
57 | + iotests.VM() as vm: | ||
56 | + | 58 | + |
59 | + img_size = '10M' | ||
60 | + iotests.qemu_img_create('-f', iotests.imgfmt, img_path, img_size) | ||
61 | + iotests.qemu_img_create('-f', iotests.imgfmt, '-b', img_path, | ||
62 | + '-F', iotests.imgfmt, snap_path) | ||
63 | + iotests.qemu_img_create('-f', iotests.imgfmt, src_path, img_size) | ||
64 | + iotests.qemu_img_create('-f', iotests.imgfmt, dst_path, img_size) | ||
57 | + | 65 | + |
58 | +class TcgVM(iotests.VM): | 66 | + iotests.qemu_io_log('-c', 'write 0 64k', img_path) |
59 | + ''' | 67 | + iotests.qemu_io_log('-c', 'write 1M 64k', snap_path) |
60 | + Variant of iotests.VM that uses -accel tcg. Simply using | 68 | + iotests.qemu_io_log('-c', 'write 3M 64k', snap_path) |
61 | + iotests.VM.add_args('-accel', 'tcg') is not sufficient, because that will | ||
62 | + put -accel qtest before -accel tcg, and -accel arguments are prioritized in | ||
63 | + the order they appear. | ||
64 | + ''' | ||
65 | + @property | ||
66 | + def _base_args(self) -> List[str]: | ||
67 | + # Put -accel tcg first so it takes precedence | ||
68 | + return ['-accel', 'tcg'] + super()._base_args | ||
69 | + | 69 | + |
70 | + iotests.qemu_io_log('-c', f'write 0 {img_size}', src_path) | ||
70 | + | 71 | + |
71 | +class TestStreamWithThrottle(iotests.QMPTestCase): | 72 | + iotests.log('Launching VM...') |
72 | + def setUp(self) -> None: | 73 | + vm.add_object('iothread,id=iothread0') |
73 | + ''' | 74 | + vm.add_object('throttle-group,x-bps-write=1048576,id=tg0') |
74 | + Create a simple backing chain between two images, write something to | 75 | + vm.add_blockdev(f'file,node-name=disk0-file,filename={img_path}') |
75 | + the base image. Attach them to the VM underneath two throttle nodes, | 76 | + vm.add_blockdev('qcow2,node-name=disk0-fmt,file=disk0-file') |
76 | + one of which has actually no limits set, but the other does. Then put | 77 | + vm.add_drive(snap_path, 'backing=disk0-fmt,node-name=disk0', |
77 | + a virtio-blk device on top. | 78 | + interface='none') |
78 | + This test configuration has been taken from | 79 | + vm.add_device('virtio-scsi,iothread=iothread0') |
79 | + https://gitlab.com/qemu-project/qemu/-/issues/1215 | 80 | + vm.add_device('scsi-hd,drive=drive0') |
80 | + ''' | ||
81 | + qemu_img_create('-f', iotests.imgfmt, base_img, str(image_size)) | ||
82 | + qemu_img_create('-f', iotests.imgfmt, '-b', base_img, '-F', | ||
83 | + iotests.imgfmt, top_img, str(image_size)) | ||
84 | + | 81 | + |
85 | + # Write something to stream | 82 | + vm.add_blockdev(f'file,filename={src_path},node-name=mirror-src-file') |
86 | + qemu_io(base_img, '-c', f'write 0 {image_size}') | 83 | + vm.add_blockdev('qcow2,file=mirror-src-file,node-name=mirror-src') |
84 | + vm.add_blockdev(f'file,filename={dst_path},node-name=mirror-dst-file') | ||
85 | + vm.add_blockdev('qcow2,file=mirror-dst-file,node-name=mirror-dst-fmt') | ||
86 | + vm.add_blockdev('throttle,throttle-group=tg0,file=mirror-dst-fmt,' | ||
87 | + 'node-name=mirror-dst') | ||
88 | + vm.add_device('scsi-hd,drive=mirror-src') | ||
87 | + | 89 | + |
88 | + blockdev = { | 90 | + vm.launch() |
89 | + 'driver': 'throttle', | ||
90 | + 'node-name': 'throttled-node', | ||
91 | + 'throttle-group': 'thrgr-limited', | ||
92 | + 'file': { | ||
93 | + 'driver': 'throttle', | ||
94 | + 'throttle-group': 'thrgr-unlimited', | ||
95 | + 'file': { | ||
96 | + 'driver': iotests.imgfmt, | ||
97 | + 'node-name': 'unthrottled-node', | ||
98 | + 'file': { | ||
99 | + 'driver': 'file', | ||
100 | + 'filename': top_img | ||
101 | + } | ||
102 | + } | ||
103 | + } | ||
104 | + } | ||
105 | + | 91 | + |
106 | + # Issue 1215 is not reproducible in qtest mode, which is why we need to | 92 | + # The background I/O is created on unrelated nodes (so that they won't be |
107 | + # create an -accel tcg VM | 93 | + # drained together with the other ones), but on the same iothread |
108 | + self.vm = TcgVM() | 94 | + iotests.log('Creating some background I/O...') |
109 | + self.vm.add_object('iothread,id=iothr0') | 95 | + iotests.log(vm.qmp('blockdev-mirror', job_id='job0', sync='full', |
110 | + self.vm.add_object('throttle-group,id=thrgr-unlimited') | 96 | + device='mirror-src', target='mirror-dst', |
111 | + self.vm.add_object('throttle-group,id=thrgr-limited,' | 97 | + auto_dismiss=False)) |
112 | + 'x-iops-total=10000,x-bps-total=104857600') | ||
113 | + self.vm.add_blockdev(self.vm.qmp_to_opts(blockdev)) | ||
114 | + self.vm.add_device('virtio-blk,iothread=iothr0,drive=throttled-node') | ||
115 | + self.vm.launch() | ||
116 | + | 98 | + |
117 | + def tearDown(self) -> None: | 99 | + iotests.log('Starting active commit...') |
118 | + self.vm.shutdown() | 100 | + iotests.log(vm.qmp('block-commit', device='disk0', job_id='job1', |
119 | + os.remove(top_img) | 101 | + auto_dismiss=False)) |
120 | + os.remove(base_img) | ||
121 | + | 102 | + |
122 | + def test_stream(self) -> None: | 103 | + # Should succeed and not time out |
123 | + ''' | 104 | + try: |
124 | + Do a simple stream beneath the two throttle nodes. Should complete | 105 | + vm.run_job('job1', wait=5.0) |
125 | + with no problems. | 106 | + vm.shutdown() |
126 | + ''' | 107 | + except asyncio.TimeoutError: |
127 | + result = self.vm.qmp('block-stream', | 108 | + # VM may be stuck, kill it |
128 | + job_id='stream', | 109 | + vm.kill() |
129 | + device='unthrottled-node') | 110 | + raise |
130 | + self.assert_qmp(result, 'return', {}) | 111 | diff --git a/tests/qemu-iotests/tests/iothreads-commit-active.out b/tests/qemu-iotests/tests/iothreads-commit-active.out |
131 | + | ||
132 | + # Should succeed and not time out | ||
133 | + try: | ||
134 | + self.vm.run_job('stream') | ||
135 | + except asyncio.exceptions.TimeoutError: | ||
136 | + # VM may be stuck, kill it before tearDown() | ||
137 | + self.vm.kill() | ||
138 | + raise | ||
139 | + | ||
140 | + | ||
141 | +if __name__ == '__main__': | ||
142 | + # Must support backing images | ||
143 | + iotests.main(supported_fmts=['qcow', 'qcow2', 'qed'], | ||
144 | + supported_protocols=['file'], | ||
145 | + required_fmts=['throttle']) | ||
146 | diff --git a/tests/qemu-iotests/tests/stream-under-throttle.out b/tests/qemu-iotests/tests/stream-under-throttle.out | ||
147 | new file mode 100644 | 112 | new file mode 100644 |
148 | index XXXXXXX..XXXXXXX | 113 | index XXXXXXX..XXXXXXX |
149 | --- /dev/null | 114 | --- /dev/null |
150 | +++ b/tests/qemu-iotests/tests/stream-under-throttle.out | 115 | +++ b/tests/qemu-iotests/tests/iothreads-commit-active.out |
151 | @@ -XXX,XX +XXX,XX @@ | 116 | @@ -XXX,XX +XXX,XX @@ |
152 | +. | 117 | +wrote 65536/65536 bytes at offset 0 |
153 | +---------------------------------------------------------------------- | 118 | +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) |
154 | +Ran 1 tests | ||
155 | + | 119 | + |
156 | +OK | 120 | +wrote 65536/65536 bytes at offset 1048576 |
121 | +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) | ||
122 | + | ||
123 | +wrote 65536/65536 bytes at offset 3145728 | ||
124 | +64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) | ||
125 | + | ||
126 | +wrote 10485760/10485760 bytes at offset 0 | ||
127 | +10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) | ||
128 | + | ||
129 | +Launching VM... | ||
130 | +Creating some background I/O... | ||
131 | +{"return": {}} | ||
132 | +Starting active commit... | ||
133 | +{"return": {}} | ||
134 | +{"execute": "job-complete", "arguments": {"id": "job1"}} | ||
135 | +{"return": {}} | ||
136 | +{"data": {"device": "job1", "len": 131072, "offset": 131072, "speed": 0, "type": "commit"}, "event": "BLOCK_JOB_READY", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} | ||
137 | +{"data": {"device": "job1", "len": 131072, "offset": 131072, "speed": 0, "type": "commit"}, "event": "BLOCK_JOB_COMPLETED", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} | ||
138 | +{"execute": "job-dismiss", "arguments": {"id": "job1"}} | ||
139 | +{"return": {}} | ||
157 | -- | 140 | -- |
158 | 2.38.1 | 141 | 2.41.0 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | blk_insert_bs() requires that callers hold the AioContext lock for the | ||
2 | node that should be inserted. Take it. | ||
1 | 3 | ||
4 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
5 | Message-ID: <20230605085711.21261-3-kwolf@redhat.com> | ||
6 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
7 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
8 | --- | ||
9 | hw/core/qdev-properties-system.c | 8 ++++++-- | ||
10 | 1 file changed, 6 insertions(+), 2 deletions(-) | ||
11 | |||
12 | diff --git a/hw/core/qdev-properties-system.c b/hw/core/qdev-properties-system.c | ||
13 | index XXXXXXX..XXXXXXX 100644 | ||
14 | --- a/hw/core/qdev-properties-system.c | ||
15 | +++ b/hw/core/qdev-properties-system.c | ||
16 | @@ -XXX,XX +XXX,XX @@ static void set_drive_helper(Object *obj, Visitor *v, const char *name, | ||
17 | * aware of iothreads require their BlockBackends to be in the main | ||
18 | * AioContext. | ||
19 | */ | ||
20 | - ctx = iothread ? bdrv_get_aio_context(bs) : qemu_get_aio_context(); | ||
21 | - blk = blk_new(ctx, 0, BLK_PERM_ALL); | ||
22 | + ctx = bdrv_get_aio_context(bs); | ||
23 | + blk = blk_new(iothread ? ctx : qemu_get_aio_context(), | ||
24 | + 0, BLK_PERM_ALL); | ||
25 | blk_created = true; | ||
26 | |||
27 | + aio_context_acquire(ctx); | ||
28 | ret = blk_insert_bs(blk, bs, errp); | ||
29 | + aio_context_release(ctx); | ||
30 | + | ||
31 | if (ret < 0) { | ||
32 | goto fail; | ||
33 | } | ||
34 | -- | ||
35 | 2.41.0 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | blk_insert_bs() requires that callers hold the AioContext lock for the | ||
2 | node that should be inserted. Take it. | ||
1 | 3 | ||
4 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
5 | Message-ID: <20230605085711.21261-4-kwolf@redhat.com> | ||
6 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
7 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
8 | --- | ||
9 | tests/unit/test-block-iothread.c | 7 ++++++- | ||
10 | 1 file changed, 6 insertions(+), 1 deletion(-) | ||
11 | |||
12 | diff --git a/tests/unit/test-block-iothread.c b/tests/unit/test-block-iothread.c | ||
13 | index XXXXXXX..XXXXXXX 100644 | ||
14 | --- a/tests/unit/test-block-iothread.c | ||
15 | +++ b/tests/unit/test-block-iothread.c | ||
16 | @@ -XXX,XX +XXX,XX @@ static void test_attach_second_node(void) | ||
17 | BlockDriverState *bs, *filter; | ||
18 | QDict *options; | ||
19 | |||
20 | + aio_context_acquire(main_ctx); | ||
21 | blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL); | ||
22 | bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort); | ||
23 | blk_insert_bs(blk, bs, &error_abort); | ||
24 | @@ -XXX,XX +XXX,XX @@ static void test_attach_second_node(void) | ||
25 | qdict_put_str(options, "driver", "raw"); | ||
26 | qdict_put_str(options, "file", "base"); | ||
27 | |||
28 | - aio_context_acquire(main_ctx); | ||
29 | filter = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort); | ||
30 | aio_context_release(main_ctx); | ||
31 | |||
32 | @@ -XXX,XX +XXX,XX @@ static void test_attach_preserve_blk_ctx(void) | ||
33 | { | ||
34 | IOThread *iothread = iothread_new(); | ||
35 | AioContext *ctx = iothread_get_aio_context(iothread); | ||
36 | + AioContext *main_ctx = qemu_get_aio_context(); | ||
37 | BlockBackend *blk; | ||
38 | BlockDriverState *bs; | ||
39 | |||
40 | + aio_context_acquire(main_ctx); | ||
41 | blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL); | ||
42 | bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort); | ||
43 | bs->total_sectors = 65536 / BDRV_SECTOR_SIZE; | ||
44 | @@ -XXX,XX +XXX,XX @@ static void test_attach_preserve_blk_ctx(void) | ||
45 | blk_insert_bs(blk, bs, &error_abort); | ||
46 | g_assert(blk_get_aio_context(blk) == ctx); | ||
47 | g_assert(bdrv_get_aio_context(bs) == ctx); | ||
48 | + aio_context_release(main_ctx); | ||
49 | |||
50 | /* Remove the node again */ | ||
51 | aio_context_acquire(ctx); | ||
52 | @@ -XXX,XX +XXX,XX @@ static void test_attach_preserve_blk_ctx(void) | ||
53 | g_assert(bdrv_get_aio_context(bs) == qemu_get_aio_context()); | ||
54 | |||
55 | /* Re-attach the node */ | ||
56 | + aio_context_acquire(main_ctx); | ||
57 | blk_insert_bs(blk, bs, &error_abort); | ||
58 | + aio_context_release(main_ctx); | ||
59 | g_assert(blk_get_aio_context(blk) == ctx); | ||
60 | g_assert(bdrv_get_aio_context(bs) == ctx); | ||
61 | |||
62 | -- | ||
63 | 2.41.0 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | bdrv_attach_child() requires that the caller holds the AioContext lock | ||
2 | for the new child node. Take it in bdrv_open_child() and document that | ||
3 | the caller must not hold any AioContext apart from the main AioContext. | ||
1 | 4 | ||
5 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
6 | Message-ID: <20230605085711.21261-5-kwolf@redhat.com> | ||
7 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
8 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
9 | --- | ||
10 | block.c | 13 +++++++++++-- | ||
11 | 1 file changed, 11 insertions(+), 2 deletions(-) | ||
12 | |||
13 | diff --git a/block.c b/block.c | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/block.c | ||
16 | +++ b/block.c | ||
17 | @@ -XXX,XX +XXX,XX @@ done: | ||
18 | * | ||
19 | * The BlockdevRef will be removed from the options QDict. | ||
20 | * | ||
21 | + * The caller must hold the lock of the main AioContext and no other AioContext. | ||
22 | * @parent can move to a different AioContext in this function. Callers must | ||
23 | * make sure that their AioContext locking is still correct after this. | ||
24 | */ | ||
25 | @@ -XXX,XX +XXX,XX @@ BdrvChild *bdrv_open_child(const char *filename, | ||
26 | bool allow_none, Error **errp) | ||
27 | { | ||
28 | BlockDriverState *bs; | ||
29 | + BdrvChild *child; | ||
30 | + AioContext *ctx; | ||
31 | |||
32 | GLOBAL_STATE_CODE(); | ||
33 | |||
34 | @@ -XXX,XX +XXX,XX @@ BdrvChild *bdrv_open_child(const char *filename, | ||
35 | return NULL; | ||
36 | } | ||
37 | |||
38 | - return bdrv_attach_child(parent, bs, bdref_key, child_class, child_role, | ||
39 | - errp); | ||
40 | + ctx = bdrv_get_aio_context(bs); | ||
41 | + aio_context_acquire(ctx); | ||
42 | + child = bdrv_attach_child(parent, bs, bdref_key, child_class, child_role, | ||
43 | + errp); | ||
44 | + aio_context_release(ctx); | ||
45 | + | ||
46 | + return child; | ||
47 | } | ||
48 | |||
49 | /* | ||
50 | * Wrapper on bdrv_open_child() for most popular case: open primary child of bs. | ||
51 | * | ||
52 | + * The caller must hold the lock of the main AioContext and no other AioContext. | ||
53 | * @parent can move to a different AioContext in this function. Callers must | ||
54 | * make sure that their AioContext locking is still correct after this. | ||
55 | */ | ||
56 | -- | ||
57 | 2.41.0 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | The function can move the child node to a different AioContext. In this | ||
2 | case, it also must take the AioContext lock for the new context before | ||
3 | calling functions that require the caller to hold the AioContext for the | ||
4 | child node. | ||
1 | 5 | ||
6 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
7 | Message-ID: <20230605085711.21261-6-kwolf@redhat.com> | ||
8 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
9 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
10 | --- | ||
11 | block.c | 21 ++++++++++++++++++++- | ||
12 | 1 file changed, 20 insertions(+), 1 deletion(-) | ||
13 | |||
14 | diff --git a/block.c b/block.c | ||
15 | index XXXXXXX..XXXXXXX 100644 | ||
16 | --- a/block.c | ||
17 | +++ b/block.c | ||
18 | @@ -XXX,XX +XXX,XX @@ static TransactionActionDrv bdrv_attach_child_common_drv = { | ||
19 | * Function doesn't update permissions, caller is responsible for this. | ||
20 | * | ||
21 | * Returns new created child. | ||
22 | + * | ||
23 | + * The caller must hold the AioContext lock for @child_bs. Both @parent_bs and | ||
24 | + * @child_bs can move to a different AioContext in this function. Callers must | ||
25 | + * make sure that their AioContext locking is still correct after this. | ||
26 | */ | ||
27 | static BdrvChild *bdrv_attach_child_common(BlockDriverState *child_bs, | ||
28 | const char *child_name, | ||
29 | @@ -XXX,XX +XXX,XX @@ static BdrvChild *bdrv_attach_child_common(BlockDriverState *child_bs, | ||
30 | Transaction *tran, Error **errp) | ||
31 | { | ||
32 | BdrvChild *new_child; | ||
33 | - AioContext *parent_ctx; | ||
34 | + AioContext *parent_ctx, *new_child_ctx; | ||
35 | AioContext *child_ctx = bdrv_get_aio_context(child_bs); | ||
36 | |||
37 | assert(child_class->get_parent_desc); | ||
38 | @@ -XXX,XX +XXX,XX @@ static BdrvChild *bdrv_attach_child_common(BlockDriverState *child_bs, | ||
39 | } | ||
40 | } | ||
41 | |||
42 | + new_child_ctx = bdrv_get_aio_context(child_bs); | ||
43 | + if (new_child_ctx != child_ctx) { | ||
44 | + aio_context_release(child_ctx); | ||
45 | + aio_context_acquire(new_child_ctx); | ||
46 | + } | ||
47 | + | ||
48 | bdrv_ref(child_bs); | ||
49 | /* | ||
50 | * Let every new BdrvChild start with a drained parent. Inserting the child | ||
51 | @@ -XXX,XX +XXX,XX @@ static BdrvChild *bdrv_attach_child_common(BlockDriverState *child_bs, | ||
52 | }; | ||
53 | tran_add(tran, &bdrv_attach_child_common_drv, s); | ||
54 | |||
55 | + if (new_child_ctx != child_ctx) { | ||
56 | + aio_context_release(new_child_ctx); | ||
57 | + aio_context_acquire(child_ctx); | ||
58 | + } | ||
59 | + | ||
60 | return new_child; | ||
61 | } | ||
62 | |||
63 | /* | ||
64 | * Function doesn't update permissions, caller is responsible for this. | ||
65 | + * | ||
66 | + * The caller must hold the AioContext lock for @child_bs. Both @parent_bs and | ||
67 | + * @child_bs can move to a different AioContext in this function. Callers must | ||
68 | + * make sure that their AioContext locking is still correct after this. | ||
69 | */ | ||
70 | static BdrvChild *bdrv_attach_child_noperm(BlockDriverState *parent_bs, | ||
71 | BlockDriverState *child_bs, | ||
72 | -- | ||
73 | 2.41.0 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | bdrv_set_file_or_backing_noperm() requires the caller to hold the | ||
2 | AioContext lock for the child node, but we hold the one for the parent | ||
3 | node in bdrv_reopen_parse_file_or_backing(). Take the other one | ||
4 | temporarily. | ||
1 | 5 | ||
6 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
7 | Message-ID: <20230605085711.21261-7-kwolf@redhat.com> | ||
8 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
9 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
10 | --- | ||
11 | block.c | 35 +++++++++++++++++++++++++++++++++-- | ||
12 | 1 file changed, 33 insertions(+), 2 deletions(-) | ||
13 | |||
14 | diff --git a/block.c b/block.c | ||
15 | index XXXXXXX..XXXXXXX 100644 | ||
16 | --- a/block.c | ||
17 | +++ b/block.c | ||
18 | @@ -XXX,XX +XXX,XX @@ static BdrvChildRole bdrv_backing_role(BlockDriverState *bs) | ||
19 | * callers which don't need their own reference any more must call bdrv_unref(). | ||
20 | * | ||
21 | * Function doesn't update permissions, caller is responsible for this. | ||
22 | + * | ||
23 | + * The caller must hold the AioContext lock for @child_bs. Both @parent_bs and | ||
24 | + * @child_bs can move to a different AioContext in this function. Callers must | ||
25 | + * make sure that their AioContext locking is still correct after this. | ||
26 | */ | ||
27 | static int bdrv_set_file_or_backing_noperm(BlockDriverState *parent_bs, | ||
28 | BlockDriverState *child_bs, | ||
29 | @@ -XXX,XX +XXX,XX @@ out: | ||
30 | return 0; | ||
31 | } | ||
32 | |||
33 | +/* | ||
34 | + * The caller must hold the AioContext lock for @backing_hd. Both @bs and | ||
35 | + * @backing_hd can move to a different AioContext in this function. Callers must | ||
36 | + * make sure that their AioContext locking is still correct after this. | ||
37 | + */ | ||
38 | static int bdrv_set_backing_noperm(BlockDriverState *bs, | ||
39 | BlockDriverState *backing_hd, | ||
40 | Transaction *tran, Error **errp) | ||
41 | @@ -XXX,XX +XXX,XX @@ int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only, | ||
42 | * backing BlockDriverState (or NULL). | ||
43 | * | ||
44 | * Return 0 on success, otherwise return < 0 and set @errp. | ||
45 | + * | ||
46 | + * The caller must hold the AioContext lock of @reopen_state->bs. | ||
47 | + * @reopen_state->bs can move to a different AioContext in this function. | ||
48 | + * Callers must make sure that their AioContext locking is still correct after | ||
49 | + * this. | ||
50 | */ | ||
51 | static int bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state, | ||
52 | bool is_backing, Transaction *tran, | ||
53 | @@ -XXX,XX +XXX,XX @@ static int bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state, | ||
54 | const char *child_name = is_backing ? "backing" : "file"; | ||
55 | QObject *value; | ||
56 | const char *str; | ||
57 | + AioContext *ctx, *old_ctx; | ||
58 | + int ret; | ||
59 | |||
60 | GLOBAL_STATE_CODE(); | ||
61 | |||
62 | @@ -XXX,XX +XXX,XX @@ static int bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state, | ||
63 | reopen_state->old_file_bs = old_child_bs; | ||
64 | } | ||
65 | |||
66 | - return bdrv_set_file_or_backing_noperm(bs, new_child_bs, is_backing, | ||
67 | - tran, errp); | ||
68 | + old_ctx = bdrv_get_aio_context(bs); | ||
69 | + ctx = bdrv_get_aio_context(new_child_bs); | ||
70 | + if (old_ctx != ctx) { | ||
71 | + aio_context_release(old_ctx); | ||
72 | + aio_context_acquire(ctx); | ||
73 | + } | ||
74 | + | ||
75 | + ret = bdrv_set_file_or_backing_noperm(bs, new_child_bs, is_backing, | ||
76 | + tran, errp); | ||
77 | + | ||
78 | + if (old_ctx != ctx) { | ||
79 | + aio_context_release(ctx); | ||
80 | + aio_context_acquire(old_ctx); | ||
81 | + } | ||
82 | + | ||
83 | + return ret; | ||
84 | } | ||
85 | |||
86 | /* | ||
87 | @@ -XXX,XX +XXX,XX @@ static int bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state, | ||
88 | * It is the responsibility of the caller to then call the abort() or | ||
89 | * commit() for any other BDS that have been left in a prepare() state | ||
90 | * | ||
91 | + * The caller must hold the AioContext lock of @reopen_state->bs. | ||
92 | */ | ||
93 | static int bdrv_reopen_prepare(BDRVReopenState *reopen_state, | ||
94 | BlockReopenQueue *queue, | ||
95 | -- | ||
96 | 2.41.0 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | bdrv_open_inherit() calls several functions for which it needs to hold | ||
2 | the AioContext lock, but currently doesn't. This includes calls in | ||
3 | bdrv_append_temp_snapshot(), for which bdrv_open_inherit() is the only | ||
4 | caller. Fix the locking in these places. | ||
1 | 5 | ||
6 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
7 | Message-ID: <20230605085711.21261-8-kwolf@redhat.com> | ||
8 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
9 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
10 | --- | ||
11 | block.c | 25 ++++++++++++++++++++++++- | ||
12 | 1 file changed, 24 insertions(+), 1 deletion(-) | ||
13 | |||
14 | diff --git a/block.c b/block.c | ||
15 | index XXXXXXX..XXXXXXX 100644 | ||
16 | --- a/block.c | ||
17 | +++ b/block.c | ||
18 | @@ -XXX,XX +XXX,XX @@ static BlockDriverState *bdrv_append_temp_snapshot(BlockDriverState *bs, | ||
19 | int64_t total_size; | ||
20 | QemuOpts *opts = NULL; | ||
21 | BlockDriverState *bs_snapshot = NULL; | ||
22 | + AioContext *ctx = bdrv_get_aio_context(bs); | ||
23 | int ret; | ||
24 | |||
25 | GLOBAL_STATE_CODE(); | ||
26 | @@ -XXX,XX +XXX,XX @@ static BlockDriverState *bdrv_append_temp_snapshot(BlockDriverState *bs, | ||
27 | instead of opening 'filename' directly */ | ||
28 | |||
29 | /* Get the required size from the image */ | ||
30 | + aio_context_acquire(ctx); | ||
31 | total_size = bdrv_getlength(bs); | ||
32 | + aio_context_release(ctx); | ||
33 | + | ||
34 | if (total_size < 0) { | ||
35 | error_setg_errno(errp, -total_size, "Could not get image size"); | ||
36 | goto out; | ||
37 | @@ -XXX,XX +XXX,XX @@ static BlockDriverState *bdrv_append_temp_snapshot(BlockDriverState *bs, | ||
38 | goto out; | ||
39 | } | ||
40 | |||
41 | + aio_context_acquire(ctx); | ||
42 | ret = bdrv_append(bs_snapshot, bs, errp); | ||
43 | + aio_context_release(ctx); | ||
44 | + | ||
45 | if (ret < 0) { | ||
46 | bs_snapshot = NULL; | ||
47 | goto out; | ||
48 | @@ -XXX,XX +XXX,XX @@ bdrv_open_inherit(const char *filename, const char *reference, QDict *options, | ||
49 | Error *local_err = NULL; | ||
50 | QDict *snapshot_options = NULL; | ||
51 | int snapshot_flags = 0; | ||
52 | + AioContext *ctx = qemu_get_aio_context(); | ||
53 | |||
54 | assert(!child_class || !flags); | ||
55 | assert(!child_class == !parent); | ||
56 | @@ -XXX,XX +XXX,XX @@ bdrv_open_inherit(const char *filename, const char *reference, QDict *options, | ||
57 | /* Not requesting BLK_PERM_CONSISTENT_READ because we're only | ||
58 | * looking at the header to guess the image format. This works even | ||
59 | * in cases where a guest would not see a consistent state. */ | ||
60 | - file = blk_new(bdrv_get_aio_context(file_bs), 0, BLK_PERM_ALL); | ||
61 | + ctx = bdrv_get_aio_context(file_bs); | ||
62 | + aio_context_acquire(ctx); | ||
63 | + file = blk_new(ctx, 0, BLK_PERM_ALL); | ||
64 | blk_insert_bs(file, file_bs, &local_err); | ||
65 | bdrv_unref(file_bs); | ||
66 | + aio_context_release(ctx); | ||
67 | + | ||
68 | if (local_err) { | ||
69 | goto fail; | ||
70 | } | ||
71 | @@ -XXX,XX +XXX,XX @@ bdrv_open_inherit(const char *filename, const char *reference, QDict *options, | ||
72 | goto fail; | ||
73 | } | ||
74 | |||
75 | + /* The AioContext could have changed during bdrv_open_common() */ | ||
76 | + ctx = bdrv_get_aio_context(bs); | ||
77 | + | ||
78 | if (file) { | ||
79 | + aio_context_acquire(ctx); | ||
80 | blk_unref(file); | ||
81 | + aio_context_release(ctx); | ||
82 | file = NULL; | ||
83 | } | ||
84 | |||
85 | @@ -XXX,XX +XXX,XX @@ bdrv_open_inherit(const char *filename, const char *reference, QDict *options, | ||
86 | * (snapshot_bs); thus, we have to drop the strong reference to bs | ||
87 | * (which we obtained by calling bdrv_new()). bs will not be deleted, | ||
88 | * though, because the overlay still has a reference to it. */ | ||
89 | + aio_context_acquire(ctx); | ||
90 | bdrv_unref(bs); | ||
91 | + aio_context_release(ctx); | ||
92 | bs = snapshot_bs; | ||
93 | } | ||
94 | |||
95 | return bs; | ||
96 | |||
97 | fail: | ||
98 | + aio_context_acquire(ctx); | ||
99 | blk_unref(file); | ||
100 | qobject_unref(snapshot_options); | ||
101 | qobject_unref(bs->explicit_options); | ||
102 | @@ -XXX,XX +XXX,XX @@ fail: | ||
103 | bs->options = NULL; | ||
104 | bs->explicit_options = NULL; | ||
105 | bdrv_unref(bs); | ||
106 | + aio_context_release(ctx); | ||
107 | error_propagate(errp, local_err); | ||
108 | return NULL; | ||
109 | |||
110 | close_and_fail: | ||
111 | + aio_context_acquire(ctx); | ||
112 | bdrv_unref(bs); | ||
113 | + aio_context_release(ctx); | ||
114 | qobject_unref(snapshot_options); | ||
115 | qobject_unref(options); | ||
116 | error_propagate(errp, local_err); | ||
117 | -- | ||
118 | 2.41.0 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | bdrv_set_backing() requires the caller to hold the AioContext lock for | ||
2 | @backing_hd. Take it in bdrv_open_backing_file() before calling the | ||
3 | function. | ||
1 | 4 | ||
5 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
6 | Message-ID: <20230605085711.21261-9-kwolf@redhat.com> | ||
7 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
8 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
9 | --- | ||
10 | block.c | 5 +++++ | ||
11 | 1 file changed, 5 insertions(+) | ||
12 | |||
13 | diff --git a/block.c b/block.c | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/block.c | ||
16 | +++ b/block.c | ||
17 | @@ -XXX,XX +XXX,XX @@ int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options, | ||
18 | int ret = 0; | ||
19 | bool implicit_backing = false; | ||
20 | BlockDriverState *backing_hd; | ||
21 | + AioContext *backing_hd_ctx; | ||
22 | QDict *options; | ||
23 | QDict *tmp_parent_options = NULL; | ||
24 | Error *local_err = NULL; | ||
25 | @@ -XXX,XX +XXX,XX @@ int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options, | ||
26 | |||
27 | /* Hook up the backing file link; drop our reference, bs owns the | ||
28 | * backing_hd reference now */ | ||
29 | + backing_hd_ctx = bdrv_get_aio_context(backing_hd); | ||
30 | + aio_context_acquire(backing_hd_ctx); | ||
31 | ret = bdrv_set_backing_hd(bs, backing_hd, errp); | ||
32 | bdrv_unref(backing_hd); | ||
33 | + aio_context_release(backing_hd_ctx); | ||
34 | + | ||
35 | if (ret < 0) { | ||
36 | goto free_exit; | ||
37 | } | ||
38 | -- | ||
39 | 2.41.0 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | bdrv_root_attach_child() requires callers to hold the AioContext lock | ||
2 | for child_bs. Take it in block_job_add_bdrv() before calling the | ||
3 | function. | ||
1 | 4 | ||
5 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
6 | Message-ID: <20230605085711.21261-10-kwolf@redhat.com> | ||
7 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
8 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
9 | --- | ||
10 | blockjob.c | 17 ++++++++++++----- | ||
11 | 1 file changed, 12 insertions(+), 5 deletions(-) | ||
12 | |||
13 | diff --git a/blockjob.c b/blockjob.c | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/blockjob.c | ||
16 | +++ b/blockjob.c | ||
17 | @@ -XXX,XX +XXX,XX @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs, | ||
18 | uint64_t perm, uint64_t shared_perm, Error **errp) | ||
19 | { | ||
20 | BdrvChild *c; | ||
21 | + AioContext *ctx = bdrv_get_aio_context(bs); | ||
22 | bool need_context_ops; | ||
23 | GLOBAL_STATE_CODE(); | ||
24 | |||
25 | bdrv_ref(bs); | ||
26 | |||
27 | - need_context_ops = bdrv_get_aio_context(bs) != job->job.aio_context; | ||
28 | + need_context_ops = ctx != job->job.aio_context; | ||
29 | |||
30 | - if (need_context_ops && job->job.aio_context != qemu_get_aio_context()) { | ||
31 | - aio_context_release(job->job.aio_context); | ||
32 | + if (need_context_ops) { | ||
33 | + if (job->job.aio_context != qemu_get_aio_context()) { | ||
34 | + aio_context_release(job->job.aio_context); | ||
35 | + } | ||
36 | + aio_context_acquire(ctx); | ||
37 | } | ||
38 | c = bdrv_root_attach_child(bs, name, &child_job, 0, perm, shared_perm, job, | ||
39 | errp); | ||
40 | - if (need_context_ops && job->job.aio_context != qemu_get_aio_context()) { | ||
41 | - aio_context_acquire(job->job.aio_context); | ||
42 | + if (need_context_ops) { | ||
43 | + aio_context_release(ctx); | ||
44 | + if (job->job.aio_context != qemu_get_aio_context()) { | ||
45 | + aio_context_acquire(job->job.aio_context); | ||
46 | + } | ||
47 | } | ||
48 | if (c == NULL) { | ||
49 | return -EPERM; | ||
50 | -- | ||
51 | 2.41.0 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | If the caller keeps the AioContext lock for a block node in an iothread, | ||
2 | polling in bdrv_graph_wrlock() deadlocks if the condition isn't | ||
3 | fulfilled immediately. | ||
1 | 4 | ||
5 | Now that all callers make sure to actually have the AioContext locked | ||
6 | when they call bdrv_replace_child_noperm() like they should, we can | ||
7 | change bdrv_graph_wrlock() to take a BlockDriverState whose AioContext | ||
8 | lock the caller holds (NULL if it doesn't) and unlock it temporarily | ||
9 | while polling. | ||
10 | |||
11 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
12 | Message-ID: <20230605085711.21261-11-kwolf@redhat.com> | ||
13 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
14 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
15 | --- | ||
16 | include/block/graph-lock.h | 6 ++++-- | ||
17 | block.c | 4 ++-- | ||
18 | block/graph-lock.c | 23 ++++++++++++++++++++++- | ||
19 | 3 files changed, 28 insertions(+), 5 deletions(-) | ||
20 | |||
21 | diff --git a/include/block/graph-lock.h b/include/block/graph-lock.h | ||
22 | index XXXXXXX..XXXXXXX 100644 | ||
23 | --- a/include/block/graph-lock.h | ||
24 | +++ b/include/block/graph-lock.h | ||
25 | @@ -XXX,XX +XXX,XX @@ void unregister_aiocontext(AioContext *ctx); | ||
26 | * The wrlock can only be taken from the main loop, with BQL held, as only the | ||
27 | * main loop is allowed to modify the graph. | ||
28 | * | ||
29 | + * If @bs is non-NULL, its AioContext is temporarily released. | ||
30 | + * | ||
31 | * This function polls. Callers must not hold the lock of any AioContext other | ||
32 | - * than the current one. | ||
33 | + * than the current one and the one of @bs. | ||
34 | */ | ||
35 | -void bdrv_graph_wrlock(void) TSA_ACQUIRE(graph_lock) TSA_NO_TSA; | ||
36 | +void bdrv_graph_wrlock(BlockDriverState *bs) TSA_ACQUIRE(graph_lock) TSA_NO_TSA; | ||
37 | |||
38 | /* | ||
39 | * bdrv_graph_wrunlock: | ||
40 | diff --git a/block.c b/block.c | ||
41 | index XXXXXXX..XXXXXXX 100644 | ||
42 | --- a/block.c | ||
43 | +++ b/block.c | ||
44 | @@ -XXX,XX +XXX,XX @@ uint64_t bdrv_qapi_perm_to_blk_perm(BlockPermission qapi_perm) | ||
45 | * Replaces the node that a BdrvChild points to without updating permissions. | ||
46 | * | ||
47 | * If @new_bs is non-NULL, the parent of @child must already be drained through | ||
48 | - * @child. | ||
49 | + * @child and the caller must hold the AioContext lock for @new_bs. | ||
50 | */ | ||
51 | static void bdrv_replace_child_noperm(BdrvChild *child, | ||
52 | BlockDriverState *new_bs) | ||
53 | @@ -XXX,XX +XXX,XX @@ static void bdrv_replace_child_noperm(BdrvChild *child, | ||
54 | } | ||
55 | |||
56 | /* TODO Pull this up into the callers to avoid polling here */ | ||
57 | - bdrv_graph_wrlock(); | ||
58 | + bdrv_graph_wrlock(new_bs); | ||
59 | if (old_bs) { | ||
60 | if (child->klass->detach) { | ||
61 | child->klass->detach(child); | ||
62 | diff --git a/block/graph-lock.c b/block/graph-lock.c | ||
63 | index XXXXXXX..XXXXXXX 100644 | ||
64 | --- a/block/graph-lock.c | ||
65 | +++ b/block/graph-lock.c | ||
66 | @@ -XXX,XX +XXX,XX @@ static uint32_t reader_count(void) | ||
67 | } | ||
68 | #endif | ||
69 | |||
70 | -void bdrv_graph_wrlock(void) | ||
71 | +void bdrv_graph_wrlock(BlockDriverState *bs) | ||
72 | { | ||
73 | + AioContext *ctx = NULL; | ||
74 | + | ||
75 | GLOBAL_STATE_CODE(); | ||
76 | /* | ||
77 | * TODO Some callers hold an AioContext lock when this is called, which | ||
78 | @@ -XXX,XX +XXX,XX @@ void bdrv_graph_wrlock(void) | ||
79 | */ | ||
80 | #if 0 | ||
81 | assert(!qatomic_read(&has_writer)); | ||
82 | +#endif | ||
83 | + | ||
84 | + /* | ||
85 | + * Release only non-mainloop AioContext. The mainloop often relies on the | ||
86 | + * BQL and doesn't lock the main AioContext before doing things. | ||
87 | + */ | ||
88 | + if (bs) { | ||
89 | + ctx = bdrv_get_aio_context(bs); | ||
90 | + if (ctx != qemu_get_aio_context()) { | ||
91 | + aio_context_release(ctx); | ||
92 | + } else { | ||
93 | + ctx = NULL; | ||
94 | + } | ||
95 | + } | ||
96 | |||
97 | +#if 0 | ||
98 | /* Make sure that constantly arriving new I/O doesn't cause starvation */ | ||
99 | bdrv_drain_all_begin_nopoll(); | ||
100 | |||
101 | @@ -XXX,XX +XXX,XX @@ void bdrv_graph_wrlock(void) | ||
102 | |||
103 | bdrv_drain_all_end(); | ||
104 | #endif | ||
105 | + | ||
106 | + if (ctx) { | ||
107 | + aio_context_acquire(bdrv_get_aio_context(bs)); | ||
108 | + } | ||
109 | } | ||
110 | |||
111 | void bdrv_graph_wrunlock(void) | ||
112 | -- | ||
113 | 2.41.0 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Now that bdrv_graph_wrlock() temporarily drops the AioContext lock that | ||
2 | its caller holds, it can poll without causing deadlocks. We can now | ||
3 | re-enable graph locking. | ||
1 | 4 | ||
5 | This reverts commit ad128dff0bf4b6f971d05eb4335a627883a19c1d. | ||
6 | |||
7 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
8 | Message-ID: <20230605085711.21261-12-kwolf@redhat.com> | ||
9 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
10 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
11 | --- | ||
12 | block/graph-lock.c | 26 -------------------------- | ||
13 | 1 file changed, 26 deletions(-) | ||
14 | |||
15 | diff --git a/block/graph-lock.c b/block/graph-lock.c | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/block/graph-lock.c | ||
18 | +++ b/block/graph-lock.c | ||
19 | @@ -XXX,XX +XXX,XX @@ BdrvGraphLock graph_lock; | ||
20 | /* Protects the list of aiocontext and orphaned_reader_count */ | ||
21 | static QemuMutex aio_context_list_lock; | ||
22 | |||
23 | -#if 0 | ||
24 | /* Written and read with atomic operations. */ | ||
25 | static int has_writer; | ||
26 | -#endif | ||
27 | |||
28 | /* | ||
29 | * A reader coroutine could move from an AioContext to another. | ||
30 | @@ -XXX,XX +XXX,XX @@ void unregister_aiocontext(AioContext *ctx) | ||
31 | g_free(ctx->bdrv_graph); | ||
32 | } | ||
33 | |||
34 | -#if 0 | ||
35 | static uint32_t reader_count(void) | ||
36 | { | ||
37 | BdrvGraphRWlock *brdv_graph; | ||
38 | @@ -XXX,XX +XXX,XX @@ static uint32_t reader_count(void) | ||
39 | assert((int32_t)rd >= 0); | ||
40 | return rd; | ||
41 | } | ||
42 | -#endif | ||
43 | |||
44 | void bdrv_graph_wrlock(BlockDriverState *bs) | ||
45 | { | ||
46 | AioContext *ctx = NULL; | ||
47 | |||
48 | GLOBAL_STATE_CODE(); | ||
49 | - /* | ||
50 | - * TODO Some callers hold an AioContext lock when this is called, which | ||
51 | - * causes deadlocks. Reenable once the AioContext locking is cleaned up (or | ||
52 | - * AioContext locks are gone). | ||
53 | - */ | ||
54 | -#if 0 | ||
55 | assert(!qatomic_read(&has_writer)); | ||
56 | -#endif | ||
57 | |||
58 | /* | ||
59 | * Release only non-mainloop AioContext. The mainloop often relies on the | ||
60 | @@ -XXX,XX +XXX,XX @@ void bdrv_graph_wrlock(BlockDriverState *bs) | ||
61 | } | ||
62 | } | ||
63 | |||
64 | -#if 0 | ||
65 | /* Make sure that constantly arriving new I/O doesn't cause starvation */ | ||
66 | bdrv_drain_all_begin_nopoll(); | ||
67 | |||
68 | @@ -XXX,XX +XXX,XX @@ void bdrv_graph_wrlock(BlockDriverState *bs) | ||
69 | } while (reader_count() >= 1); | ||
70 | |||
71 | bdrv_drain_all_end(); | ||
72 | -#endif | ||
73 | |||
74 | if (ctx) { | ||
75 | aio_context_acquire(bdrv_get_aio_context(bs)); | ||
76 | @@ -XXX,XX +XXX,XX @@ void bdrv_graph_wrlock(BlockDriverState *bs) | ||
77 | void bdrv_graph_wrunlock(void) | ||
78 | { | ||
79 | GLOBAL_STATE_CODE(); | ||
80 | -#if 0 | ||
81 | QEMU_LOCK_GUARD(&aio_context_list_lock); | ||
82 | assert(qatomic_read(&has_writer)); | ||
83 | |||
84 | @@ -XXX,XX +XXX,XX @@ void bdrv_graph_wrunlock(void) | ||
85 | |||
86 | /* Wake up all coroutine that are waiting to read the graph */ | ||
87 | qemu_co_enter_all(&reader_queue, &aio_context_list_lock); | ||
88 | -#endif | ||
89 | } | ||
90 | |||
91 | void coroutine_fn bdrv_graph_co_rdlock(void) | ||
92 | { | ||
93 | - /* TODO Reenable when wrlock is reenabled */ | ||
94 | -#if 0 | ||
95 | BdrvGraphRWlock *bdrv_graph; | ||
96 | bdrv_graph = qemu_get_current_aio_context()->bdrv_graph; | ||
97 | |||
98 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn bdrv_graph_co_rdlock(void) | ||
99 | qemu_co_queue_wait(&reader_queue, &aio_context_list_lock); | ||
100 | } | ||
101 | } | ||
102 | -#endif | ||
103 | } | ||
104 | |||
105 | void coroutine_fn bdrv_graph_co_rdunlock(void) | ||
106 | { | ||
107 | -#if 0 | ||
108 | BdrvGraphRWlock *bdrv_graph; | ||
109 | bdrv_graph = qemu_get_current_aio_context()->bdrv_graph; | ||
110 | |||
111 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn bdrv_graph_co_rdunlock(void) | ||
112 | if (qatomic_read(&has_writer)) { | ||
113 | aio_wait_kick(); | ||
114 | } | ||
115 | -#endif | ||
116 | } | ||
117 | |||
118 | void bdrv_graph_rdlock_main_loop(void) | ||
119 | @@ -XXX,XX +XXX,XX @@ void bdrv_graph_rdunlock_main_loop(void) | ||
120 | void assert_bdrv_graph_readable(void) | ||
121 | { | ||
122 | /* reader_count() is slow due to aio_context_list_lock lock contention */ | ||
123 | - /* TODO Reenable when wrlock is reenabled */ | ||
124 | -#if 0 | ||
125 | #ifdef CONFIG_DEBUG_GRAPH_LOCK | ||
126 | assert(qemu_in_main_thread() || reader_count()); | ||
127 | #endif | ||
128 | -#endif | ||
129 | } | ||
130 | |||
131 | void assert_bdrv_graph_writable(void) | ||
132 | { | ||
133 | assert(qemu_in_main_thread()); | ||
134 | - /* TODO Reenable when wrlock is reenabled */ | ||
135 | -#if 0 | ||
136 | assert(qatomic_read(&has_writer)); | ||
137 | -#endif | ||
138 | } | ||
139 | -- | ||
140 | 2.41.0 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Paolo Bonzini <pbonzini@redhat.com> | ||
1 | 2 | ||
3 | raw_co_getlength is called by handle_aiocb_write_zeroes, which is not a coroutine | ||
4 | function. This is harmless because raw_co_getlength does not actually suspend, | ||
5 | but in the interest of clarity make it a non-coroutine_fn that is just wrapped | ||
6 | by the coroutine_fn raw_co_getlength. Likewise, check_cache_dropped was only | ||
7 | a coroutine_fn because it called raw_co_getlength, so it can be made non-coroutine | ||
8 | as well. | ||
9 | |||
10 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
11 | Message-ID: <20230601115145.196465-2-pbonzini@redhat.com> | ||
12 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
13 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
14 | --- | ||
15 | block/file-posix.c | 29 +++++++++++++++++------------ | ||
16 | 1 file changed, 17 insertions(+), 12 deletions(-) | ||
17 | |||
18 | diff --git a/block/file-posix.c b/block/file-posix.c | ||
19 | index XXXXXXX..XXXXXXX 100644 | ||
20 | --- a/block/file-posix.c | ||
21 | +++ b/block/file-posix.c | ||
22 | @@ -XXX,XX +XXX,XX @@ static int fd_open(BlockDriverState *bs) | ||
23 | return -EIO; | ||
24 | } | ||
25 | |||
26 | -static int64_t coroutine_fn raw_co_getlength(BlockDriverState *bs); | ||
27 | +static int64_t raw_getlength(BlockDriverState *bs); | ||
28 | |||
29 | typedef struct RawPosixAIOData { | ||
30 | BlockDriverState *bs; | ||
31 | @@ -XXX,XX +XXX,XX @@ static int handle_aiocb_write_zeroes(void *opaque) | ||
32 | #ifdef CONFIG_FALLOCATE | ||
33 | /* Last resort: we are trying to extend the file with zeroed data. This | ||
34 | * can be done via fallocate(fd, 0) */ | ||
35 | - len = raw_co_getlength(aiocb->bs); | ||
36 | + len = raw_getlength(aiocb->bs); | ||
37 | if (s->has_fallocate && len >= 0 && aiocb->aio_offset >= len) { | ||
38 | int ret = do_fallocate(s->fd, 0, aiocb->aio_offset, aiocb->aio_nbytes); | ||
39 | if (ret == 0 || ret != -ENOTSUP) { | ||
40 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn raw_co_truncate(BlockDriverState *bs, int64_t offset, | ||
41 | } | ||
42 | |||
43 | if (S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode)) { | ||
44 | - int64_t cur_length = raw_co_getlength(bs); | ||
45 | + int64_t cur_length = raw_getlength(bs); | ||
46 | |||
47 | if (offset != cur_length && exact) { | ||
48 | error_setg(errp, "Cannot resize device files"); | ||
49 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn raw_co_truncate(BlockDriverState *bs, int64_t offset, | ||
50 | } | ||
51 | |||
52 | #ifdef __OpenBSD__ | ||
53 | -static int64_t coroutine_fn raw_co_getlength(BlockDriverState *bs) | ||
54 | +static int64_t raw_getlength(BlockDriverState *bs) | ||
55 | { | ||
56 | BDRVRawState *s = bs->opaque; | ||
57 | int fd = s->fd; | ||
58 | @@ -XXX,XX +XXX,XX @@ static int64_t coroutine_fn raw_co_getlength(BlockDriverState *bs) | ||
59 | return st.st_size; | ||
60 | } | ||
61 | #elif defined(__NetBSD__) | ||
62 | -static int64_t coroutine_fn raw_co_getlength(BlockDriverState *bs) | ||
63 | +static int64_t raw_getlength(BlockDriverState *bs) | ||
64 | { | ||
65 | BDRVRawState *s = bs->opaque; | ||
66 | int fd = s->fd; | ||
67 | @@ -XXX,XX +XXX,XX @@ static int64_t coroutine_fn raw_co_getlength(BlockDriverState *bs) | ||
68 | return st.st_size; | ||
69 | } | ||
70 | #elif defined(__sun__) | ||
71 | -static int64_t coroutine_fn raw_co_getlength(BlockDriverState *bs) | ||
72 | +static int64_t raw_getlength(BlockDriverState *bs) | ||
73 | { | ||
74 | BDRVRawState *s = bs->opaque; | ||
75 | struct dk_minfo minfo; | ||
76 | @@ -XXX,XX +XXX,XX @@ static int64_t coroutine_fn raw_co_getlength(BlockDriverState *bs) | ||
77 | return size; | ||
78 | } | ||
79 | #elif defined(CONFIG_BSD) | ||
80 | -static int64_t coroutine_fn raw_co_getlength(BlockDriverState *bs) | ||
81 | +static int64_t raw_getlength(BlockDriverState *bs) | ||
82 | { | ||
83 | BDRVRawState *s = bs->opaque; | ||
84 | int fd = s->fd; | ||
85 | @@ -XXX,XX +XXX,XX @@ again: | ||
86 | return size; | ||
87 | } | ||
88 | #else | ||
89 | -static int64_t coroutine_fn raw_co_getlength(BlockDriverState *bs) | ||
90 | +static int64_t raw_getlength(BlockDriverState *bs) | ||
91 | { | ||
92 | BDRVRawState *s = bs->opaque; | ||
93 | int ret; | ||
94 | @@ -XXX,XX +XXX,XX @@ static int64_t coroutine_fn raw_co_getlength(BlockDriverState *bs) | ||
95 | } | ||
96 | #endif | ||
97 | |||
98 | +static int64_t coroutine_fn raw_co_getlength(BlockDriverState *bs) | ||
99 | +{ | ||
100 | + return raw_getlength(bs); | ||
101 | +} | ||
102 | + | ||
103 | static int64_t coroutine_fn raw_co_get_allocated_file_size(BlockDriverState *bs) | ||
104 | { | ||
105 | struct stat st; | ||
106 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn raw_co_block_status(BlockDriverState *bs, | ||
107 | * round up if necessary. | ||
108 | */ | ||
109 | if (!QEMU_IS_ALIGNED(*pnum, bs->bl.request_alignment)) { | ||
110 | - int64_t file_length = raw_co_getlength(bs); | ||
111 | + int64_t file_length = raw_getlength(bs); | ||
112 | if (file_length > 0) { | ||
113 | /* Ignore errors, this is just a safeguard */ | ||
114 | assert(hole == file_length); | ||
115 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn raw_co_block_status(BlockDriverState *bs, | ||
116 | |||
117 | #if defined(__linux__) | ||
118 | /* Verify that the file is not in the page cache */ | ||
119 | -static void coroutine_fn check_cache_dropped(BlockDriverState *bs, Error **errp) | ||
120 | +static void check_cache_dropped(BlockDriverState *bs, Error **errp) | ||
121 | { | ||
122 | const size_t window_size = 128 * 1024 * 1024; | ||
123 | BDRVRawState *s = bs->opaque; | ||
124 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn check_cache_dropped(BlockDriverState *bs, Error **errp) | ||
125 | page_size = sysconf(_SC_PAGESIZE); | ||
126 | vec = g_malloc(DIV_ROUND_UP(window_size, page_size)); | ||
127 | |||
128 | - end = raw_co_getlength(bs); | ||
129 | + end = raw_getlength(bs); | ||
130 | |||
131 | for (offset = 0; offset < end; offset += window_size) { | ||
132 | void *new_window; | ||
133 | @@ -XXX,XX +XXX,XX @@ static int cdrom_reopen(BlockDriverState *bs) | ||
134 | |||
135 | static bool coroutine_fn cdrom_co_is_inserted(BlockDriverState *bs) | ||
136 | { | ||
137 | - return raw_co_getlength(bs) > 0; | ||
138 | + return raw_getlength(bs) > 0; | ||
139 | } | ||
140 | |||
141 | static void coroutine_fn cdrom_co_eject(BlockDriverState *bs, bool eject_flag) | ||
142 | -- | ||
143 | 2.41.0 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Paolo Bonzini <pbonzini@redhat.com> | ||
1 | 2 | ||
3 | Mark functions as coroutine_fn when they are only called by other coroutine_fns | ||
4 | and they can suspend. Change calls to co_wrappers to use the non-wrapped | ||
5 | functions, which in turn requires adding GRAPH_RDLOCK annotations. | ||
6 | |||
7 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
8 | Message-ID: <20230601115145.196465-3-pbonzini@redhat.com> | ||
9 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | ||
10 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | ||
11 | --- | ||
12 | block/qed-check.c | 5 +++-- | ||
13 | block/qed.c | 7 ++++--- | ||
14 | 2 files changed, 7 insertions(+), 5 deletions(-) | ||
15 | |||
16 | diff --git a/block/qed-check.c b/block/qed-check.c | ||
17 | index XXXXXXX..XXXXXXX 100644 | ||
18 | --- a/block/qed-check.c | ||
19 | +++ b/block/qed-check.c | ||
20 | @@ -XXX,XX +XXX,XX @@ static void qed_check_for_leaks(QEDCheck *check) | ||
21 | /** | ||
22 | * Mark an image clean once it passes check or has been repaired | ||
23 | */ | ||
24 | -static void qed_check_mark_clean(BDRVQEDState *s, BdrvCheckResult *result) | ||
25 | +static void coroutine_fn GRAPH_RDLOCK | ||
26 | +qed_check_mark_clean(BDRVQEDState *s, BdrvCheckResult *result) | ||
27 | { | ||
28 | /* Skip if there were unfixable corruptions or I/O errors */ | ||
29 | if (result->corruptions > 0 || result->check_errors > 0) { | ||
30 | @@ -XXX,XX +XXX,XX @@ static void qed_check_mark_clean(BDRVQEDState *s, BdrvCheckResult *result) | ||
31 | } | ||
32 | |||
33 | /* Ensure fixes reach storage before clearing check bit */ | ||
34 | - bdrv_flush(s->bs); | ||
35 | + bdrv_co_flush(s->bs); | ||
36 | |||
37 | s->header.features &= ~QED_F_NEED_CHECK; | ||
38 | qed_write_header_sync(s); | ||
39 | diff --git a/block/qed.c b/block/qed.c | ||
40 | index XXXXXXX..XXXXXXX 100644 | ||
41 | --- a/block/qed.c | ||
42 | +++ b/block/qed.c | ||
43 | @@ -XXX,XX +XXX,XX @@ static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size, | ||
44 | * | ||
45 | * The string is NUL-terminated. | ||
46 | */ | ||
47 | -static int qed_read_string(BdrvChild *file, uint64_t offset, size_t n, | ||
48 | - char *buf, size_t buflen) | ||
49 | +static int coroutine_fn GRAPH_RDLOCK | ||
50 | +qed_read_string(BdrvChild *file, uint64_t offset, | ||
51 | + size_t n, char *buf, size_t buflen) | ||
52 | { | ||
53 | int ret; | ||
54 | if (n >= buflen) { | ||
55 | return -EINVAL; | ||
56 | } | ||
57 | - ret = bdrv_pread(file, offset, n, buf, 0); | ||
58 | + ret = bdrv_co_pread(file, offset, n, buf, 0); | ||
59 | if (ret < 0) { | ||
60 | return ret; | ||
61 | } | ||
62 | -- | ||
63 | 2.41.0 | diff view generated by jsdifflib |
1 | From: Alberto Faria <afaria@redhat.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Setting it to true can cause the device size to be queried from libblkio | 3 | Mark functions as coroutine_fn when they are only called by other coroutine_fns |
4 | in otherwise fast paths, degrading performance. Set it to false and | 4 | and they can suspend. Change calls to co_wrappers to use the non-wrapped |
5 | require users to refresh the device size explicitly instead. | 5 | functions, which in turn requires adding GRAPH_RDLOCK annotations. |
6 | 6 | ||
7 | Fixes: 4c8f4fda0504 ("block/blkio: Tolerate device size changes") | 7 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> |
8 | Suggested-by: Kevin Wolf <kwolf@redhat.com> | 8 | Message-ID: <20230601115145.196465-4-pbonzini@redhat.com> |
9 | Signed-off-by: Alberto Faria <afaria@redhat.com> | ||
10 | Message-Id: <20221108144433.1334074-1-afaria@redhat.com> | ||
11 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 9 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 10 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
13 | --- | 11 | --- |
14 | block/blkio.c | 1 - | 12 | block/vpc.c | 52 ++++++++++++++++++++++++++-------------------------- |
15 | 1 file changed, 1 deletion(-) | 13 | 1 file changed, 26 insertions(+), 26 deletions(-) |
16 | 14 | ||
17 | diff --git a/block/blkio.c b/block/blkio.c | 15 | diff --git a/block/vpc.c b/block/vpc.c |
18 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/block/blkio.c | 17 | --- a/block/vpc.c |
20 | +++ b/block/blkio.c | 18 | +++ b/block/vpc.c |
21 | @@ -XXX,XX +XXX,XX @@ static void blkio_refresh_limits(BlockDriverState *bs, Error **errp) | 19 | @@ -XXX,XX +XXX,XX @@ static int vpc_reopen_prepare(BDRVReopenState *state, |
22 | { \ | 20 | * operation (the block bitmaps is updated then), 0 otherwise. |
23 | .format_name = name, \ | 21 | * If write is true then err must not be NULL. |
24 | .protocol_name = name, \ | 22 | */ |
25 | - .has_variable_length = true, \ | 23 | -static inline int64_t get_image_offset(BlockDriverState *bs, uint64_t offset, |
26 | .instance_size = sizeof(BDRVBlkioState), \ | 24 | - bool write, int *err) |
27 | .bdrv_file_open = blkio_file_open, \ | 25 | +static int64_t coroutine_fn GRAPH_RDLOCK |
28 | .bdrv_close = blkio_close, \ | 26 | +get_image_offset(BlockDriverState *bs, uint64_t offset, bool write, int *err) |
27 | { | ||
28 | BDRVVPCState *s = bs->opaque; | ||
29 | uint64_t bitmap_offset, block_offset; | ||
30 | @@ -XXX,XX +XXX,XX @@ static inline int64_t get_image_offset(BlockDriverState *bs, uint64_t offset, | ||
31 | |||
32 | s->last_bitmap_offset = bitmap_offset; | ||
33 | memset(bitmap, 0xff, s->bitmap_size); | ||
34 | - r = bdrv_pwrite_sync(bs->file, bitmap_offset, s->bitmap_size, bitmap, | ||
35 | - 0); | ||
36 | + r = bdrv_co_pwrite_sync(bs->file, bitmap_offset, s->bitmap_size, bitmap, 0); | ||
37 | if (r < 0) { | ||
38 | *err = r; | ||
39 | return -2; | ||
40 | @@ -XXX,XX +XXX,XX @@ static inline int64_t get_image_offset(BlockDriverState *bs, uint64_t offset, | ||
41 | * | ||
42 | * Returns 0 on success and < 0 on error | ||
43 | */ | ||
44 | -static int rewrite_footer(BlockDriverState *bs) | ||
45 | +static int coroutine_fn GRAPH_RDLOCK rewrite_footer(BlockDriverState *bs) | ||
46 | { | ||
47 | int ret; | ||
48 | BDRVVPCState *s = bs->opaque; | ||
49 | int64_t offset = s->free_data_block_offset; | ||
50 | |||
51 | - ret = bdrv_pwrite_sync(bs->file, offset, sizeof(s->footer), &s->footer, 0); | ||
52 | + ret = bdrv_co_pwrite_sync(bs->file, offset, sizeof(s->footer), &s->footer, 0); | ||
53 | if (ret < 0) | ||
54 | return ret; | ||
55 | |||
56 | @@ -XXX,XX +XXX,XX @@ static int rewrite_footer(BlockDriverState *bs) | ||
57 | * | ||
58 | * Returns the sectors' offset in the image file on success and < 0 on error | ||
59 | */ | ||
60 | -static int64_t alloc_block(BlockDriverState *bs, int64_t offset) | ||
61 | +static int64_t coroutine_fn GRAPH_RDLOCK | ||
62 | +alloc_block(BlockDriverState *bs, int64_t offset) | ||
63 | { | ||
64 | BDRVVPCState *s = bs->opaque; | ||
65 | int64_t bat_offset; | ||
66 | @@ -XXX,XX +XXX,XX @@ static int64_t alloc_block(BlockDriverState *bs, int64_t offset) | ||
67 | |||
68 | /* Initialize the block's bitmap */ | ||
69 | memset(bitmap, 0xff, s->bitmap_size); | ||
70 | - ret = bdrv_pwrite_sync(bs->file, s->free_data_block_offset, | ||
71 | - s->bitmap_size, bitmap, 0); | ||
72 | + ret = bdrv_co_pwrite_sync(bs->file, s->free_data_block_offset, | ||
73 | + s->bitmap_size, bitmap, 0); | ||
74 | if (ret < 0) { | ||
75 | return ret; | ||
76 | } | ||
77 | @@ -XXX,XX +XXX,XX @@ static int64_t alloc_block(BlockDriverState *bs, int64_t offset) | ||
78 | /* Write BAT entry to disk */ | ||
79 | bat_offset = s->bat_offset + (4 * index); | ||
80 | bat_value = cpu_to_be32(s->pagetable[index]); | ||
81 | - ret = bdrv_pwrite_sync(bs->file, bat_offset, 4, &bat_value, 0); | ||
82 | + ret = bdrv_co_pwrite_sync(bs->file, bat_offset, 4, &bat_value, 0); | ||
83 | if (ret < 0) | ||
84 | goto fail; | ||
85 | |||
86 | @@ -XXX,XX +XXX,XX @@ fail: | ||
87 | return ret; | ||
88 | } | ||
89 | |||
90 | -static int coroutine_fn vpc_co_block_status(BlockDriverState *bs, | ||
91 | - bool want_zero, | ||
92 | - int64_t offset, int64_t bytes, | ||
93 | - int64_t *pnum, int64_t *map, | ||
94 | - BlockDriverState **file) | ||
95 | +static int coroutine_fn GRAPH_RDLOCK | ||
96 | +vpc_co_block_status(BlockDriverState *bs, bool want_zero, | ||
97 | + int64_t offset, int64_t bytes, | ||
98 | + int64_t *pnum, int64_t *map, | ||
99 | + BlockDriverState **file) | ||
100 | { | ||
101 | BDRVVPCState *s = bs->opaque; | ||
102 | int64_t image_offset; | ||
103 | @@ -XXX,XX +XXX,XX @@ static int calculate_geometry(int64_t total_sectors, uint16_t *cyls, | ||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | -static int create_dynamic_disk(BlockBackend *blk, VHDFooter *footer, | ||
108 | - int64_t total_sectors) | ||
109 | +static int coroutine_fn create_dynamic_disk(BlockBackend *blk, VHDFooter *footer, | ||
110 | + int64_t total_sectors) | ||
111 | { | ||
112 | VHDDynDiskHeader dyndisk_header; | ||
113 | uint8_t bat_sector[512]; | ||
114 | @@ -XXX,XX +XXX,XX @@ static int create_dynamic_disk(BlockBackend *blk, VHDFooter *footer, | ||
115 | block_size = 0x200000; | ||
116 | num_bat_entries = DIV_ROUND_UP(total_sectors, block_size / 512); | ||
117 | |||
118 | - ret = blk_pwrite(blk, offset, sizeof(*footer), footer, 0); | ||
119 | + ret = blk_co_pwrite(blk, offset, sizeof(*footer), footer, 0); | ||
120 | if (ret < 0) { | ||
121 | goto fail; | ||
122 | } | ||
123 | |||
124 | offset = 1536 + ((num_bat_entries * 4 + 511) & ~511); | ||
125 | - ret = blk_pwrite(blk, offset, sizeof(*footer), footer, 0); | ||
126 | + ret = blk_co_pwrite(blk, offset, sizeof(*footer), footer, 0); | ||
127 | if (ret < 0) { | ||
128 | goto fail; | ||
129 | } | ||
130 | @@ -XXX,XX +XXX,XX @@ static int create_dynamic_disk(BlockBackend *blk, VHDFooter *footer, | ||
131 | |||
132 | memset(bat_sector, 0xFF, 512); | ||
133 | for (i = 0; i < DIV_ROUND_UP(num_bat_entries * 4, 512); i++) { | ||
134 | - ret = blk_pwrite(blk, offset, 512, bat_sector, 0); | ||
135 | + ret = blk_co_pwrite(blk, offset, 512, bat_sector, 0); | ||
136 | if (ret < 0) { | ||
137 | goto fail; | ||
138 | } | ||
139 | @@ -XXX,XX +XXX,XX @@ static int create_dynamic_disk(BlockBackend *blk, VHDFooter *footer, | ||
140 | /* Write the header */ | ||
141 | offset = 512; | ||
142 | |||
143 | - ret = blk_pwrite(blk, offset, sizeof(dyndisk_header), &dyndisk_header, 0); | ||
144 | + ret = blk_co_pwrite(blk, offset, sizeof(dyndisk_header), &dyndisk_header, 0); | ||
145 | if (ret < 0) { | ||
146 | goto fail; | ||
147 | } | ||
148 | @@ -XXX,XX +XXX,XX @@ static int create_dynamic_disk(BlockBackend *blk, VHDFooter *footer, | ||
149 | return ret; | ||
150 | } | ||
151 | |||
152 | -static int create_fixed_disk(BlockBackend *blk, VHDFooter *footer, | ||
153 | - int64_t total_size, Error **errp) | ||
154 | +static int coroutine_fn create_fixed_disk(BlockBackend *blk, VHDFooter *footer, | ||
155 | + int64_t total_size, Error **errp) | ||
156 | { | ||
157 | int ret; | ||
158 | |||
159 | /* Add footer to total size */ | ||
160 | total_size += sizeof(*footer); | ||
161 | |||
162 | - ret = blk_truncate(blk, total_size, false, PREALLOC_MODE_OFF, 0, errp); | ||
163 | + ret = blk_co_truncate(blk, total_size, false, PREALLOC_MODE_OFF, 0, errp); | ||
164 | if (ret < 0) { | ||
165 | return ret; | ||
166 | } | ||
167 | |||
168 | - ret = blk_pwrite(blk, total_size - sizeof(*footer), sizeof(*footer), | ||
169 | - footer, 0); | ||
170 | + ret = blk_co_pwrite(blk, total_size - sizeof(*footer), sizeof(*footer), | ||
171 | + footer, 0); | ||
172 | if (ret < 0) { | ||
173 | error_setg_errno(errp, -ret, "Unable to write VHD header"); | ||
174 | return ret; | ||
29 | -- | 175 | -- |
30 | 2.38.1 | 176 | 2.41.0 | diff view generated by jsdifflib |
1 | From: Alberto Faria <afaria@redhat.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | The nvme-io_uring BlockDriver's path option must point at the character | 3 | Mark functions as coroutine_fn when they are only called by other coroutine_fns |
4 | device of an NVMe namespace, not at an image file. | 4 | and they can suspend. Change calls to co_wrappers to use the non-wrapped |
5 | functions, which in turn requires adding GRAPH_RDLOCK annotations. | ||
5 | 6 | ||
6 | Fixes: fd66dbd424f5 ("blkio: add libblkio block driver") | 7 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> |
7 | Suggested-by: Stefano Garzarella <sgarzare@redhat.com> | 8 | Message-ID: <20230601115145.196465-5-pbonzini@redhat.com> |
8 | Signed-off-by: Alberto Faria <afaria@redhat.com> | ||
9 | Message-Id: <20221108142347.1322674-1-afaria@redhat.com> | ||
10 | Reviewed-by: Stefano Garzarella <sgarzare@redhat.com> | ||
11 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 9 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 10 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
13 | --- | 11 | --- |
14 | qapi/block-core.json | 2 +- | 12 | block/bochs.c | 7 ++++--- |
15 | 1 file changed, 1 insertion(+), 1 deletion(-) | 13 | 1 file changed, 4 insertions(+), 3 deletions(-) |
16 | 14 | ||
17 | diff --git a/qapi/block-core.json b/qapi/block-core.json | 15 | diff --git a/block/bochs.c b/block/bochs.c |
18 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/qapi/block-core.json | 17 | --- a/block/bochs.c |
20 | +++ b/qapi/block-core.json | 18 | +++ b/block/bochs.c |
21 | @@ -XXX,XX +XXX,XX @@ | 19 | @@ -XXX,XX +XXX,XX @@ static void bochs_refresh_limits(BlockDriverState *bs, Error **errp) |
22 | # | 20 | bs->bl.request_alignment = BDRV_SECTOR_SIZE; /* No sub-sector I/O */ |
23 | # Driver specific block device options for the nvme-io_uring backend. | 21 | } |
24 | # | 22 | |
25 | -# @path: path to the image file | 23 | -static int64_t seek_to_sector(BlockDriverState *bs, int64_t sector_num) |
26 | +# @path: path to the NVMe namespace's character device (e.g. /dev/ng0n1). | 24 | +static int64_t coroutine_fn GRAPH_RDLOCK |
27 | # | 25 | +seek_to_sector(BlockDriverState *bs, int64_t sector_num) |
28 | # Since: 7.2 | 26 | { |
29 | ## | 27 | BDRVBochsState *s = bs->opaque; |
28 | uint64_t offset = sector_num * 512; | ||
29 | @@ -XXX,XX +XXX,XX @@ static int64_t seek_to_sector(BlockDriverState *bs, int64_t sector_num) | ||
30 | (s->extent_blocks + s->bitmap_blocks)); | ||
31 | |||
32 | /* read in bitmap for current extent */ | ||
33 | - ret = bdrv_pread(bs->file, bitmap_offset + (extent_offset / 8), 1, | ||
34 | - &bitmap_entry, 0); | ||
35 | + ret = bdrv_co_pread(bs->file, bitmap_offset + (extent_offset / 8), 1, | ||
36 | + &bitmap_entry, 0); | ||
37 | if (ret < 0) { | ||
38 | return ret; | ||
39 | } | ||
30 | -- | 40 | -- |
31 | 2.38.1 | 41 | 2.41.0 | diff view generated by jsdifflib |
1 | From: Hanna Reitz <hreitz@redhat.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | We want to use bdrv_child_get_parent_aio_context() from | 3 | Mark functions as coroutine_fn when they are only called by other coroutine_fns |
4 | bdrv_parent_drained_{begin,end}_single(), both of which are "I/O or GS" | 4 | and they can suspend. Because this function operates on a BlockBackend, mark it |
5 | functions. | 5 | GRAPH_UNLOCKED. |
6 | 6 | ||
7 | Prior to 3ed4f708fe1, all the implementations were I/O code anyway. | 7 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> |
8 | 3ed4f708fe1 has put block jobs' AioContext field under the job mutex, so | 8 | Message-ID: <20230601115145.196465-6-pbonzini@redhat.com> |
9 | to make child_job_get_parent_aio_context() work in an I/O context, we | ||
10 | need to take that lock there. | ||
11 | |||
12 | Furthermore, blk_root_get_parent_aio_context() is not marked as | ||
13 | anything, but is safe to run in an I/O context, so mark it that way now. | ||
14 | (blk_get_aio_context() is an I/O code function.) | ||
15 | |||
16 | With that done, all implementations explicitly are I/O code, so we can | ||
17 | mark bdrv_child_get_parent_aio_context() as I/O code, too, so callers | ||
18 | know it is safe to run from both GS and I/O contexts. | ||
19 | |||
20 | Signed-off-by: Hanna Reitz <hreitz@redhat.com> | ||
21 | Message-Id: <20221107151321.211175-2-hreitz@redhat.com> | ||
22 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 9 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
23 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 10 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
24 | --- | 11 | --- |
25 | include/block/block-global-state.h | 1 - | 12 | block.c | 11 ++++++----- |
26 | include/block/block-io.h | 2 ++ | 13 | 1 file changed, 6 insertions(+), 5 deletions(-) |
27 | include/block/block_int-common.h | 4 ++-- | ||
28 | block.c | 2 +- | ||
29 | block/block-backend.c | 1 + | ||
30 | blockjob.c | 3 ++- | ||
31 | 6 files changed, 8 insertions(+), 5 deletions(-) | ||
32 | 14 | ||
33 | diff --git a/include/block/block-global-state.h b/include/block/block-global-state.h | ||
34 | index XXXXXXX..XXXXXXX 100644 | ||
35 | --- a/include/block/block-global-state.h | ||
36 | +++ b/include/block/block-global-state.h | ||
37 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn bdrv_co_lock(BlockDriverState *bs); | ||
38 | */ | ||
39 | void coroutine_fn bdrv_co_unlock(BlockDriverState *bs); | ||
40 | |||
41 | -AioContext *bdrv_child_get_parent_aio_context(BdrvChild *c); | ||
42 | bool bdrv_child_change_aio_context(BdrvChild *c, AioContext *ctx, | ||
43 | GHashTable *visited, Transaction *tran, | ||
44 | Error **errp); | ||
45 | diff --git a/include/block/block-io.h b/include/block/block-io.h | ||
46 | index XXXXXXX..XXXXXXX 100644 | ||
47 | --- a/include/block/block-io.h | ||
48 | +++ b/include/block/block-io.h | ||
49 | @@ -XXX,XX +XXX,XX @@ void bdrv_debug_event(BlockDriverState *bs, BlkdebugEvent event); | ||
50 | */ | ||
51 | AioContext *bdrv_get_aio_context(BlockDriverState *bs); | ||
52 | |||
53 | +AioContext *bdrv_child_get_parent_aio_context(BdrvChild *c); | ||
54 | + | ||
55 | /** | ||
56 | * Move the current coroutine to the AioContext of @bs and return the old | ||
57 | * AioContext of the coroutine. Increase bs->in_flight so that draining @bs | ||
58 | diff --git a/include/block/block_int-common.h b/include/block/block_int-common.h | ||
59 | index XXXXXXX..XXXXXXX 100644 | ||
60 | --- a/include/block/block_int-common.h | ||
61 | +++ b/include/block/block_int-common.h | ||
62 | @@ -XXX,XX +XXX,XX @@ struct BdrvChildClass { | ||
63 | GHashTable *visited, Transaction *tran, | ||
64 | Error **errp); | ||
65 | |||
66 | - AioContext *(*get_parent_aio_context)(BdrvChild *child); | ||
67 | - | ||
68 | /* | ||
69 | * I/O API functions. These functions are thread-safe. | ||
70 | * | ||
71 | @@ -XXX,XX +XXX,XX @@ struct BdrvChildClass { | ||
72 | */ | ||
73 | const char *(*get_name)(BdrvChild *child); | ||
74 | |||
75 | + AioContext *(*get_parent_aio_context)(BdrvChild *child); | ||
76 | + | ||
77 | /* | ||
78 | * If this pair of functions is implemented, the parent doesn't issue new | ||
79 | * requests after returning from .drained_begin() until .drained_end() is | ||
80 | diff --git a/block.c b/block.c | 15 | diff --git a/block.c b/block.c |
81 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
82 | --- a/block.c | 17 | --- a/block.c |
83 | +++ b/block.c | 18 | +++ b/block.c |
84 | @@ -XXX,XX +XXX,XX @@ const BdrvChildClass child_of_bds = { | 19 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_create(BlockDriver *drv, const char *filename, |
85 | 20 | * On success, return @blk's actual length. | |
86 | AioContext *bdrv_child_get_parent_aio_context(BdrvChild *c) | 21 | * Otherwise, return -errno. |
22 | */ | ||
23 | -static int64_t create_file_fallback_truncate(BlockBackend *blk, | ||
24 | - int64_t minimum_size, Error **errp) | ||
25 | +static int64_t coroutine_fn GRAPH_UNLOCKED | ||
26 | +create_file_fallback_truncate(BlockBackend *blk, int64_t minimum_size, | ||
27 | + Error **errp) | ||
87 | { | 28 | { |
88 | - GLOBAL_STATE_CODE(); | 29 | Error *local_err = NULL; |
89 | + IO_CODE(); | 30 | int64_t size; |
90 | return c->klass->get_parent_aio_context(c); | 31 | @@ -XXX,XX +XXX,XX @@ static int64_t create_file_fallback_truncate(BlockBackend *blk, |
91 | } | 32 | |
92 | 33 | GLOBAL_STATE_CODE(); | |
93 | diff --git a/block/block-backend.c b/block/block-backend.c | 34 | |
94 | index XXXXXXX..XXXXXXX 100644 | 35 | - ret = blk_truncate(blk, minimum_size, false, PREALLOC_MODE_OFF, 0, |
95 | --- a/block/block-backend.c | 36 | - &local_err); |
96 | +++ b/block/block-backend.c | 37 | + ret = blk_co_truncate(blk, minimum_size, false, PREALLOC_MODE_OFF, 0, |
97 | @@ -XXX,XX +XXX,XX @@ static void blk_root_detach(BdrvChild *child) | 38 | + &local_err); |
98 | static AioContext *blk_root_get_parent_aio_context(BdrvChild *c) | 39 | if (ret < 0 && ret != -ENOTSUP) { |
99 | { | 40 | error_propagate(errp, local_err); |
100 | BlockBackend *blk = c->opaque; | 41 | return ret; |
101 | + IO_CODE(); | 42 | } |
102 | 43 | ||
103 | return blk_get_aio_context(blk); | 44 | - size = blk_getlength(blk); |
104 | } | 45 | + size = blk_co_getlength(blk); |
105 | diff --git a/blockjob.c b/blockjob.c | 46 | if (size < 0) { |
106 | index XXXXXXX..XXXXXXX 100644 | 47 | error_free(local_err); |
107 | --- a/blockjob.c | 48 | error_setg_errno(errp, -size, |
108 | +++ b/blockjob.c | ||
109 | @@ -XXX,XX +XXX,XX @@ static bool child_job_change_aio_ctx(BdrvChild *c, AioContext *ctx, | ||
110 | static AioContext *child_job_get_parent_aio_context(BdrvChild *c) | ||
111 | { | ||
112 | BlockJob *job = c->opaque; | ||
113 | - GLOBAL_STATE_CODE(); | ||
114 | + IO_CODE(); | ||
115 | + JOB_LOCK_GUARD(); | ||
116 | |||
117 | return job->job.aio_context; | ||
118 | } | ||
119 | -- | 49 | -- |
120 | 2.38.1 | 50 | 2.41.0 | diff view generated by jsdifflib |
1 | From: Hanna Reitz <hreitz@redhat.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Have write requests happen to the source node right when we start a | 3 | Mark functions as coroutine_fn when they are only called by other coroutine_fns |
4 | mirror job. The mirror filter node may encounter MirrorBDSOpaque.job | 4 | and they can suspend. Change calls to co_wrappers to use the non-wrapped |
5 | being NULL, but this should not cause a segfault. | 5 | functions, which in turn requires adding GRAPH_RDLOCK annotations. |
6 | 6 | ||
7 | Signed-off-by: Hanna Reitz <hreitz@redhat.com> | 7 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> |
8 | Message-Id: <20221109165452.67927-6-hreitz@redhat.com> | 8 | Message-ID: <20230601115145.196465-7-pbonzini@redhat.com> |
9 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 9 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
10 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 10 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
11 | --- | 11 | --- |
12 | tests/qemu-iotests/151 | 53 +++++++++++++++++++++++++++++++++++--- | 12 | block/cloop.c | 9 +++++---- |
13 | tests/qemu-iotests/151.out | 4 +-- | 13 | 1 file changed, 5 insertions(+), 4 deletions(-) |
14 | 2 files changed, 52 insertions(+), 5 deletions(-) | ||
15 | 14 | ||
16 | diff --git a/tests/qemu-iotests/151 b/tests/qemu-iotests/151 | 15 | diff --git a/block/cloop.c b/block/cloop.c |
17 | index XXXXXXX..XXXXXXX 100755 | ||
18 | --- a/tests/qemu-iotests/151 | ||
19 | +++ b/tests/qemu-iotests/151 | ||
20 | @@ -XXX,XX +XXX,XX @@ | ||
21 | import math | ||
22 | import os | ||
23 | import subprocess | ||
24 | -from typing import List | ||
25 | +import time | ||
26 | +from typing import List, Optional | ||
27 | import iotests | ||
28 | from iotests import qemu_img | ||
29 | |||
30 | @@ -XXX,XX +XXX,XX @@ class TestActiveMirror(iotests.QMPTestCase): | ||
31 | self.potential_writes_in_flight = False | ||
32 | |||
33 | |||
34 | -class TestThrottledWithNbdExport(iotests.QMPTestCase): | ||
35 | +class TestThrottledWithNbdExportBase(iotests.QMPTestCase): | ||
36 | image_len = 128 * 1024 * 1024 # MB | ||
37 | - iops = 16 | ||
38 | + iops: Optional[int] = None | ||
39 | background_processes: List['subprocess.Popen[str]'] = [] | ||
40 | |||
41 | def setUp(self): | ||
42 | + # Must be set by subclasses | ||
43 | + self.assertIsNotNone(self.iops) | ||
44 | + | ||
45 | qemu_img('create', '-f', iotests.imgfmt, source_img, '128M') | ||
46 | qemu_img('create', '-f', iotests.imgfmt, target_img, '128M') | ||
47 | |||
48 | @@ -XXX,XX +XXX,XX @@ class TestThrottledWithNbdExport(iotests.QMPTestCase): | ||
49 | os.remove(source_img) | ||
50 | os.remove(target_img) | ||
51 | |||
52 | + | ||
53 | +class TestLowThrottledWithNbdExport(TestThrottledWithNbdExportBase): | ||
54 | + iops = 16 | ||
55 | + | ||
56 | def testUnderLoad(self): | ||
57 | ''' | ||
58 | Throttle the source node, then issue a whole bunch of external requests | ||
59 | @@ -XXX,XX +XXX,XX @@ class TestThrottledWithNbdExport(iotests.QMPTestCase): | ||
60 | self.assertGreater(start_remaining - end_remaining, 0) | ||
61 | |||
62 | |||
63 | +class TestHighThrottledWithNbdExport(TestThrottledWithNbdExportBase): | ||
64 | + iops = 1024 | ||
65 | + | ||
66 | + def testActiveOnCreation(self): | ||
67 | + ''' | ||
68 | + Issue requests on the mirror source node right as the mirror is | ||
69 | + instated. It's possible that requests occur before the actual job is | ||
70 | + created, but after the node has been put into the graph. Write | ||
71 | + requests across the node must in that case be forwarded to the source | ||
72 | + node without attempting to mirror them (there is no job object yet, so | ||
73 | + attempting to access it would cause a segfault). | ||
74 | + We do this with a lightly throttled node (i.e. quite high IOPS limit). | ||
75 | + Using throttling seems to increase reproductivity, but if the limit is | ||
76 | + too low, all requests allowed per second will be submitted before | ||
77 | + mirror_start_job() gets to the problematic point. | ||
78 | + ''' | ||
79 | + | ||
80 | + # Let qemu-img bench create write requests (enough for two seconds on | ||
81 | + # the virtual clock) | ||
82 | + bench_args = ['bench', '-w', '-d', '1024', '-f', 'nbd', | ||
83 | + '-c', str(self.iops * 2), self.nbd_url] | ||
84 | + p = iotests.qemu_tool_popen(iotests.qemu_img_args + bench_args) | ||
85 | + self.background_processes += [p] | ||
86 | + | ||
87 | + # Give qemu-img bench time to start up and issue requests | ||
88 | + time.sleep(1.0) | ||
89 | + # Flush the request queue, so new requests can come in right as we | ||
90 | + # start blockdev-mirror | ||
91 | + self.vm.qtest(f'clock_step {1 * 1000 * 1000 * 1000}') | ||
92 | + | ||
93 | + result = self.vm.qmp('blockdev-mirror', | ||
94 | + job_id='mirror', | ||
95 | + device='source-node', | ||
96 | + target='target-node', | ||
97 | + sync='full', | ||
98 | + copy_mode='write-blocking') | ||
99 | + self.assert_qmp(result, 'return', {}) | ||
100 | + | ||
101 | + | ||
102 | if __name__ == '__main__': | ||
103 | iotests.main(supported_fmts=['qcow2', 'raw'], | ||
104 | supported_protocols=['file']) | ||
105 | diff --git a/tests/qemu-iotests/151.out b/tests/qemu-iotests/151.out | ||
106 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
107 | --- a/tests/qemu-iotests/151.out | 17 | --- a/block/cloop.c |
108 | +++ b/tests/qemu-iotests/151.out | 18 | +++ b/block/cloop.c |
109 | @@ -XXX,XX +XXX,XX @@ | 19 | @@ -XXX,XX +XXX,XX @@ static void cloop_refresh_limits(BlockDriverState *bs, Error **errp) |
110 | -..... | 20 | bs->bl.request_alignment = BDRV_SECTOR_SIZE; /* No sub-sector I/O */ |
111 | +...... | 21 | } |
112 | ---------------------------------------------------------------------- | 22 | |
113 | -Ran 5 tests | 23 | -static inline int cloop_read_block(BlockDriverState *bs, int block_num) |
114 | +Ran 6 tests | 24 | +static int coroutine_fn GRAPH_RDLOCK |
115 | 25 | +cloop_read_block(BlockDriverState *bs, int block_num) | |
116 | OK | 26 | { |
27 | BDRVCloopState *s = bs->opaque; | ||
28 | |||
29 | @@ -XXX,XX +XXX,XX @@ static inline int cloop_read_block(BlockDriverState *bs, int block_num) | ||
30 | int ret; | ||
31 | uint32_t bytes = s->offsets[block_num + 1] - s->offsets[block_num]; | ||
32 | |||
33 | - ret = bdrv_pread(bs->file, s->offsets[block_num], bytes, | ||
34 | - s->compressed_block, 0); | ||
35 | + ret = bdrv_co_pread(bs->file, s->offsets[block_num], bytes, | ||
36 | + s->compressed_block, 0); | ||
37 | if (ret < 0) { | ||
38 | return -1; | ||
39 | } | ||
40 | @@ -XXX,XX +XXX,XX @@ static inline int cloop_read_block(BlockDriverState *bs, int block_num) | ||
41 | return 0; | ||
42 | } | ||
43 | |||
44 | -static int coroutine_fn | ||
45 | +static int coroutine_fn GRAPH_RDLOCK | ||
46 | cloop_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, | ||
47 | QEMUIOVector *qiov, BdrvRequestFlags flags) | ||
48 | { | ||
117 | -- | 49 | -- |
118 | 2.38.1 | 50 | 2.41.0 | diff view generated by jsdifflib |
1 | From: Hanna Reitz <hreitz@redhat.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Before this series, a mirror job in write-blocking mode would pause | 3 | Mark functions as coroutine_fn when they are only called by other coroutine_fns |
4 | issuing background requests while active requests are in flight. Thus, | 4 | and they can suspend. Change calls to co_wrappers to use the non-wrapped |
5 | if the source is constantly in use by active requests, no actual | 5 | functions, which in turn requires adding GRAPH_RDLOCK annotations. |
6 | progress can be made. | ||
7 | 6 | ||
8 | This series should have fixed that, making the mirror job issue | 7 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> |
9 | background requests even while active requests are in flight. | 8 | Message-ID: <20230601115145.196465-8-pbonzini@redhat.com> |
10 | |||
11 | Have a new test case in 151 verify this. | ||
12 | |||
13 | Signed-off-by: Hanna Reitz <hreitz@redhat.com> | ||
14 | Message-Id: <20221109165452.67927-5-hreitz@redhat.com> | ||
15 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 9 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
16 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 10 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
17 | --- | 11 | --- |
18 | tests/qemu-iotests/151 | 180 ++++++++++++++++++++++++++++++++++++- | 12 | block/dmg.c | 21 +++++++++++---------- |
19 | tests/qemu-iotests/151.out | 4 +- | 13 | 1 file changed, 11 insertions(+), 10 deletions(-) |
20 | 2 files changed, 181 insertions(+), 3 deletions(-) | ||
21 | 14 | ||
22 | diff --git a/tests/qemu-iotests/151 b/tests/qemu-iotests/151 | 15 | diff --git a/block/dmg.c b/block/dmg.c |
23 | index XXXXXXX..XXXXXXX 100755 | ||
24 | --- a/tests/qemu-iotests/151 | ||
25 | +++ b/tests/qemu-iotests/151 | ||
26 | @@ -XXX,XX +XXX,XX @@ | ||
27 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
28 | # | ||
29 | |||
30 | +import math | ||
31 | import os | ||
32 | +import subprocess | ||
33 | +from typing import List | ||
34 | import iotests | ||
35 | from iotests import qemu_img | ||
36 | |||
37 | @@ -XXX,XX +XXX,XX @@ class TestActiveMirror(iotests.QMPTestCase): | ||
38 | self.vm = iotests.VM() | ||
39 | self.vm.add_drive_raw(self.vm.qmp_to_opts(blk_source)) | ||
40 | self.vm.add_blockdev(self.vm.qmp_to_opts(blk_target)) | ||
41 | - self.vm.add_device('virtio-blk,drive=source') | ||
42 | + self.vm.add_device('virtio-blk,id=vblk,drive=source') | ||
43 | self.vm.launch() | ||
44 | |||
45 | def tearDown(self): | ||
46 | @@ -XXX,XX +XXX,XX @@ class TestActiveMirror(iotests.QMPTestCase): | ||
47 | self.potential_writes_in_flight = False | ||
48 | |||
49 | |||
50 | +class TestThrottledWithNbdExport(iotests.QMPTestCase): | ||
51 | + image_len = 128 * 1024 * 1024 # MB | ||
52 | + iops = 16 | ||
53 | + background_processes: List['subprocess.Popen[str]'] = [] | ||
54 | + | ||
55 | + def setUp(self): | ||
56 | + qemu_img('create', '-f', iotests.imgfmt, source_img, '128M') | ||
57 | + qemu_img('create', '-f', iotests.imgfmt, target_img, '128M') | ||
58 | + | ||
59 | + self.vm = iotests.VM() | ||
60 | + self.vm.launch() | ||
61 | + | ||
62 | + result = self.vm.qmp('object-add', **{ | ||
63 | + 'qom-type': 'throttle-group', | ||
64 | + 'id': 'thrgr', | ||
65 | + 'limits': { | ||
66 | + 'iops-total': self.iops, | ||
67 | + 'iops-total-max': self.iops | ||
68 | + } | ||
69 | + }) | ||
70 | + self.assert_qmp(result, 'return', {}) | ||
71 | + | ||
72 | + result = self.vm.qmp('blockdev-add', **{ | ||
73 | + 'node-name': 'source-node', | ||
74 | + 'driver': 'throttle', | ||
75 | + 'throttle-group': 'thrgr', | ||
76 | + 'file': { | ||
77 | + 'driver': iotests.imgfmt, | ||
78 | + 'file': { | ||
79 | + 'driver': 'file', | ||
80 | + 'filename': source_img | ||
81 | + } | ||
82 | + } | ||
83 | + }) | ||
84 | + self.assert_qmp(result, 'return', {}) | ||
85 | + | ||
86 | + result = self.vm.qmp('blockdev-add', **{ | ||
87 | + 'node-name': 'target-node', | ||
88 | + 'driver': iotests.imgfmt, | ||
89 | + 'file': { | ||
90 | + 'driver': 'file', | ||
91 | + 'filename': target_img | ||
92 | + } | ||
93 | + }) | ||
94 | + self.assert_qmp(result, 'return', {}) | ||
95 | + | ||
96 | + self.nbd_sock = iotests.file_path('nbd.sock', | ||
97 | + base_dir=iotests.sock_dir) | ||
98 | + self.nbd_url = f'nbd+unix:///source-node?socket={self.nbd_sock}' | ||
99 | + | ||
100 | + result = self.vm.qmp('nbd-server-start', addr={ | ||
101 | + 'type': 'unix', | ||
102 | + 'data': { | ||
103 | + 'path': self.nbd_sock | ||
104 | + } | ||
105 | + }) | ||
106 | + self.assert_qmp(result, 'return', {}) | ||
107 | + | ||
108 | + result = self.vm.qmp('block-export-add', id='exp0', type='nbd', | ||
109 | + node_name='source-node', writable=True) | ||
110 | + self.assert_qmp(result, 'return', {}) | ||
111 | + | ||
112 | + def tearDown(self): | ||
113 | + # Wait for background requests to settle | ||
114 | + try: | ||
115 | + while True: | ||
116 | + p = self.background_processes.pop() | ||
117 | + while True: | ||
118 | + try: | ||
119 | + p.wait(timeout=0.0) | ||
120 | + break | ||
121 | + except subprocess.TimeoutExpired: | ||
122 | + self.vm.qtest(f'clock_step {1 * 1000 * 1000 * 1000}') | ||
123 | + except IndexError: | ||
124 | + pass | ||
125 | + | ||
126 | + # Cancel ongoing block jobs | ||
127 | + for job in self.vm.qmp('query-jobs')['return']: | ||
128 | + self.vm.qmp('block-job-cancel', device=job['id'], force=True) | ||
129 | + | ||
130 | + while True: | ||
131 | + self.vm.qtest(f'clock_step {1 * 1000 * 1000 * 1000}') | ||
132 | + if len(self.vm.qmp('query-jobs')['return']) == 0: | ||
133 | + break | ||
134 | + | ||
135 | + self.vm.shutdown() | ||
136 | + os.remove(source_img) | ||
137 | + os.remove(target_img) | ||
138 | + | ||
139 | + def testUnderLoad(self): | ||
140 | + ''' | ||
141 | + Throttle the source node, then issue a whole bunch of external requests | ||
142 | + while the mirror job (in write-blocking mode) is running. We want to | ||
143 | + see background requests being issued even while the source is under | ||
144 | + full load by active writes, so that progress can be made towards READY. | ||
145 | + ''' | ||
146 | + | ||
147 | + # Fill the first half of the source image; do not fill the second half, | ||
148 | + # that is where we will have active requests occur. This ensures that | ||
149 | + # active mirroring itself will not directly contribute to the job's | ||
150 | + # progress (because when the job was started, those areas were not | ||
151 | + # intended to be copied, so active mirroring will only lead to not | ||
152 | + # losing progress, but also not making any). | ||
153 | + self.vm.hmp_qemu_io('source-node', | ||
154 | + f'aio_write -P 1 0 {self.image_len // 2}') | ||
155 | + self.vm.qtest(f'clock_step {1 * 1000 * 1000 * 1000}') | ||
156 | + | ||
157 | + # Launch the mirror job | ||
158 | + mirror_buf_size = 65536 | ||
159 | + result = self.vm.qmp('blockdev-mirror', | ||
160 | + job_id='mirror', | ||
161 | + filter_node_name='mirror-node', | ||
162 | + device='source-node', | ||
163 | + target='target-node', | ||
164 | + sync='full', | ||
165 | + copy_mode='write-blocking', | ||
166 | + buf_size=mirror_buf_size) | ||
167 | + self.assert_qmp(result, 'return', {}) | ||
168 | + | ||
169 | + # We create the external requests via qemu-io processes on the NBD | ||
170 | + # server. Have their offset start in the middle of the image so they | ||
171 | + # do not overlap with the background requests (which start from the | ||
172 | + # beginning). | ||
173 | + active_request_offset = self.image_len // 2 | ||
174 | + active_request_len = 4096 | ||
175 | + | ||
176 | + # Create enough requests to saturate the node for 5 seconds | ||
177 | + for _ in range(0, 5 * self.iops): | ||
178 | + req = f'write -P 42 {active_request_offset} {active_request_len}' | ||
179 | + active_request_offset += active_request_len | ||
180 | + p = iotests.qemu_io_popen('-f', 'nbd', self.nbd_url, '-c', req) | ||
181 | + self.background_processes += [p] | ||
182 | + | ||
183 | + # Now advance the clock one I/O operation at a time by the 4 seconds | ||
184 | + # (i.e. one less than 5). We expect the mirror job to issue background | ||
185 | + # operations here, even though active requests are still in flight. | ||
186 | + # The active requests will take precedence, however, because they have | ||
187 | + # been issued earlier than mirror's background requests. | ||
188 | + # Once the active requests we have started above are done (i.e. after 5 | ||
189 | + # virtual seconds), we expect those background requests to be worked | ||
190 | + # on. We only advance 4 seconds here to avoid race conditions. | ||
191 | + for _ in range(0, 4 * self.iops): | ||
192 | + step = math.ceil(1 * 1000 * 1000 * 1000 / self.iops) | ||
193 | + self.vm.qtest(f'clock_step {step}') | ||
194 | + | ||
195 | + # Note how much remains to be done until the mirror job is finished | ||
196 | + job_status = self.vm.qmp('query-jobs')['return'][0] | ||
197 | + start_remaining = job_status['total-progress'] - \ | ||
198 | + job_status['current-progress'] | ||
199 | + | ||
200 | + # Create a whole bunch of more active requests | ||
201 | + for _ in range(0, 10 * self.iops): | ||
202 | + req = f'write -P 42 {active_request_offset} {active_request_len}' | ||
203 | + active_request_offset += active_request_len | ||
204 | + p = iotests.qemu_io_popen('-f', 'nbd', self.nbd_url, '-c', req) | ||
205 | + self.background_processes += [p] | ||
206 | + | ||
207 | + # Let the clock advance more. After 1 second, as noted above, we | ||
208 | + # expect the background requests to be worked on. Give them a couple | ||
209 | + # of seconds (specifically 4) to see their impact. | ||
210 | + for _ in range(0, 5 * self.iops): | ||
211 | + step = math.ceil(1 * 1000 * 1000 * 1000 / self.iops) | ||
212 | + self.vm.qtest(f'clock_step {step}') | ||
213 | + | ||
214 | + # Note how much remains to be done now. We expect this number to be | ||
215 | + # reduced thanks to those background requests. | ||
216 | + job_status = self.vm.qmp('query-jobs')['return'][0] | ||
217 | + end_remaining = job_status['total-progress'] - \ | ||
218 | + job_status['current-progress'] | ||
219 | + | ||
220 | + # See that indeed progress was being made on the job, even while the | ||
221 | + # node was saturated with active requests | ||
222 | + self.assertGreater(start_remaining - end_remaining, 0) | ||
223 | + | ||
224 | + | ||
225 | if __name__ == '__main__': | ||
226 | iotests.main(supported_fmts=['qcow2', 'raw'], | ||
227 | supported_protocols=['file']) | ||
228 | diff --git a/tests/qemu-iotests/151.out b/tests/qemu-iotests/151.out | ||
229 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
230 | --- a/tests/qemu-iotests/151.out | 17 | --- a/block/dmg.c |
231 | +++ b/tests/qemu-iotests/151.out | 18 | +++ b/block/dmg.c |
232 | @@ -XXX,XX +XXX,XX @@ | 19 | @@ -XXX,XX +XXX,XX @@ err: |
233 | -.... | 20 | return s->n_chunks; /* error */ |
234 | +..... | 21 | } |
235 | ---------------------------------------------------------------------- | 22 | |
236 | -Ran 4 tests | 23 | -static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num) |
237 | +Ran 5 tests | 24 | +static int coroutine_fn GRAPH_RDLOCK |
238 | 25 | +dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num) | |
239 | OK | 26 | { |
27 | BDRVDMGState *s = bs->opaque; | ||
28 | |||
29 | @@ -XXX,XX +XXX,XX @@ static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num) | ||
30 | case UDZO: { /* zlib compressed */ | ||
31 | /* we need to buffer, because only the chunk as whole can be | ||
32 | * inflated. */ | ||
33 | - ret = bdrv_pread(bs->file, s->offsets[chunk], s->lengths[chunk], | ||
34 | - s->compressed_chunk, 0); | ||
35 | + ret = bdrv_co_pread(bs->file, s->offsets[chunk], s->lengths[chunk], | ||
36 | + s->compressed_chunk, 0); | ||
37 | if (ret < 0) { | ||
38 | return -1; | ||
39 | } | ||
40 | @@ -XXX,XX +XXX,XX @@ static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num) | ||
41 | } | ||
42 | /* we need to buffer, because only the chunk as whole can be | ||
43 | * inflated. */ | ||
44 | - ret = bdrv_pread(bs->file, s->offsets[chunk], s->lengths[chunk], | ||
45 | - s->compressed_chunk, 0); | ||
46 | + ret = bdrv_co_pread(bs->file, s->offsets[chunk], s->lengths[chunk], | ||
47 | + s->compressed_chunk, 0); | ||
48 | if (ret < 0) { | ||
49 | return -1; | ||
50 | } | ||
51 | @@ -XXX,XX +XXX,XX @@ static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num) | ||
52 | } | ||
53 | /* we need to buffer, because only the chunk as whole can be | ||
54 | * inflated. */ | ||
55 | - ret = bdrv_pread(bs->file, s->offsets[chunk], s->lengths[chunk], | ||
56 | - s->compressed_chunk, 0); | ||
57 | + ret = bdrv_co_pread(bs->file, s->offsets[chunk], s->lengths[chunk], | ||
58 | + s->compressed_chunk, 0); | ||
59 | if (ret < 0) { | ||
60 | return -1; | ||
61 | } | ||
62 | @@ -XXX,XX +XXX,XX @@ static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num) | ||
63 | } | ||
64 | break; | ||
65 | case UDRW: /* copy */ | ||
66 | - ret = bdrv_pread(bs->file, s->offsets[chunk], s->lengths[chunk], | ||
67 | - s->uncompressed_chunk, 0); | ||
68 | + ret = bdrv_co_pread(bs->file, s->offsets[chunk], s->lengths[chunk], | ||
69 | + s->uncompressed_chunk, 0); | ||
70 | if (ret < 0) { | ||
71 | return -1; | ||
72 | } | ||
73 | @@ -XXX,XX +XXX,XX @@ static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num) | ||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | -static int coroutine_fn | ||
78 | +static int coroutine_fn GRAPH_RDLOCK | ||
79 | dmg_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, | ||
80 | QEMUIOVector *qiov, BdrvRequestFlags flags) | ||
81 | { | ||
240 | -- | 82 | -- |
241 | 2.38.1 | 83 | 2.41.0 | diff view generated by jsdifflib |
1 | From: Hanna Reitz <hreitz@redhat.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | There is a small gap in mirror_start_job() before putting the mirror | 3 | Mark functions as coroutine_fn when they are only called by other coroutine_fns |
4 | filter node into the block graph (bdrv_append() call) and the actual job | 4 | and they can suspend. Change calls to co_wrappers to use the non-wrapped |
5 | being created. Before the job is created, MirrorBDSOpaque.job is NULL. | 5 | functions, which in turn requires adding GRAPH_RDLOCK annotations. |
6 | 6 | ||
7 | It is possible that requests come in when bdrv_drained_end() is called, | 7 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> |
8 | and those requests would see MirrorBDSOpaque.job == NULL. Have our | 8 | Message-ID: <20230601115145.196465-9-pbonzini@redhat.com> |
9 | filter node handle that case gracefully. | ||
10 | |||
11 | Signed-off-by: Hanna Reitz <hreitz@redhat.com> | ||
12 | Message-Id: <20221109165452.67927-4-hreitz@redhat.com> | ||
13 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 9 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
14 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 10 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
15 | --- | 11 | --- |
16 | block/mirror.c | 20 ++++++++++++-------- | 12 | block/vmdk.c | 27 ++++++++++++++------------- |
17 | 1 file changed, 12 insertions(+), 8 deletions(-) | 13 | 1 file changed, 14 insertions(+), 13 deletions(-) |
18 | 14 | ||
19 | diff --git a/block/mirror.c b/block/mirror.c | 15 | diff --git a/block/vmdk.c b/block/vmdk.c |
20 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
21 | --- a/block/mirror.c | 17 | --- a/block/vmdk.c |
22 | +++ b/block/mirror.c | 18 | +++ b/block/vmdk.c |
23 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_mirror_top_do_write(BlockDriverState *bs, | 19 | @@ -XXX,XX +XXX,XX @@ out: |
24 | MirrorOp *op = NULL; | 20 | return ret; |
25 | MirrorBDSOpaque *s = bs->opaque; | 21 | } |
26 | int ret = 0; | 22 | |
27 | - bool copy_to_target; | 23 | -static int vmdk_write_cid(BlockDriverState *bs, uint32_t cid) |
28 | + bool copy_to_target = false; | 24 | +static int coroutine_fn GRAPH_RDLOCK |
29 | 25 | +vmdk_write_cid(BlockDriverState *bs, uint32_t cid) | |
30 | - copy_to_target = s->job->ret >= 0 && | 26 | { |
31 | - !job_is_cancelled(&s->job->common.job) && | 27 | char *desc, *tmp_desc; |
32 | - s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING; | 28 | char *p_name, *tmp_str; |
33 | + if (s->job) { | 29 | @@ -XXX,XX +XXX,XX @@ static int vmdk_write_cid(BlockDriverState *bs, uint32_t cid) |
34 | + copy_to_target = s->job->ret >= 0 && | 30 | |
35 | + !job_is_cancelled(&s->job->common.job) && | 31 | desc = g_malloc0(DESC_SIZE); |
36 | + s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING; | 32 | tmp_desc = g_malloc0(DESC_SIZE); |
37 | + } | 33 | - ret = bdrv_pread(bs->file, s->desc_offset, DESC_SIZE, desc, 0); |
38 | 34 | + ret = bdrv_co_pread(bs->file, s->desc_offset, DESC_SIZE, desc, 0); | |
39 | if (copy_to_target) { | 35 | if (ret < 0) { |
40 | op = active_write_prepare(s->job, offset, bytes); | 36 | goto out; |
41 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs, | 37 | } |
42 | QEMUIOVector bounce_qiov; | 38 | @@ -XXX,XX +XXX,XX @@ static int vmdk_write_cid(BlockDriverState *bs, uint32_t cid) |
43 | void *bounce_buf; | 39 | pstrcat(desc, DESC_SIZE, tmp_desc); |
44 | int ret = 0; | 40 | } |
45 | - bool copy_to_target; | 41 | |
46 | + bool copy_to_target = false; | 42 | - ret = bdrv_pwrite_sync(bs->file, s->desc_offset, DESC_SIZE, desc, 0); |
47 | 43 | + ret = bdrv_co_pwrite_sync(bs->file, s->desc_offset, DESC_SIZE, desc, 0); | |
48 | - copy_to_target = s->job->ret >= 0 && | 44 | |
49 | - !job_is_cancelled(&s->job->common.job) && | 45 | out: |
50 | - s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING; | 46 | g_free(desc); |
51 | + if (s->job) { | 47 | @@ -XXX,XX +XXX,XX @@ vmdk_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes, |
52 | + copy_to_target = s->job->ret >= 0 && | 48 | return ret; |
53 | + !job_is_cancelled(&s->job->common.job) && | 49 | } |
54 | + s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING; | 50 | |
55 | + } | 51 | -static int GRAPH_UNLOCKED |
56 | 52 | +static int coroutine_fn GRAPH_UNLOCKED | |
57 | if (copy_to_target) { | 53 | vmdk_init_extent(BlockBackend *blk, int64_t filesize, bool flat, bool compress, |
58 | /* The guest might concurrently modify the data to write; but | 54 | bool zeroed_grain, Error **errp) |
55 | { | ||
56 | @@ -XXX,XX +XXX,XX @@ vmdk_init_extent(BlockBackend *blk, int64_t filesize, bool flat, bool compress, | ||
57 | int gd_buf_size; | ||
58 | |||
59 | if (flat) { | ||
60 | - ret = blk_truncate(blk, filesize, false, PREALLOC_MODE_OFF, 0, errp); | ||
61 | + ret = blk_co_truncate(blk, filesize, false, PREALLOC_MODE_OFF, 0, errp); | ||
62 | goto exit; | ||
63 | } | ||
64 | magic = cpu_to_be32(VMDK4_MAGIC); | ||
65 | @@ -XXX,XX +XXX,XX @@ vmdk_init_extent(BlockBackend *blk, int64_t filesize, bool flat, bool compress, | ||
66 | header.check_bytes[3] = 0xa; | ||
67 | |||
68 | /* write all the data */ | ||
69 | - ret = blk_pwrite(blk, 0, sizeof(magic), &magic, 0); | ||
70 | + ret = blk_co_pwrite(blk, 0, sizeof(magic), &magic, 0); | ||
71 | if (ret < 0) { | ||
72 | error_setg(errp, QERR_IO_ERROR); | ||
73 | goto exit; | ||
74 | } | ||
75 | - ret = blk_pwrite(blk, sizeof(magic), sizeof(header), &header, 0); | ||
76 | + ret = blk_co_pwrite(blk, sizeof(magic), sizeof(header), &header, 0); | ||
77 | if (ret < 0) { | ||
78 | error_setg(errp, QERR_IO_ERROR); | ||
79 | goto exit; | ||
80 | } | ||
81 | |||
82 | - ret = blk_truncate(blk, le64_to_cpu(header.grain_offset) << 9, false, | ||
83 | - PREALLOC_MODE_OFF, 0, errp); | ||
84 | + ret = blk_co_truncate(blk, le64_to_cpu(header.grain_offset) << 9, false, | ||
85 | + PREALLOC_MODE_OFF, 0, errp); | ||
86 | if (ret < 0) { | ||
87 | goto exit; | ||
88 | } | ||
89 | @@ -XXX,XX +XXX,XX @@ vmdk_init_extent(BlockBackend *blk, int64_t filesize, bool flat, bool compress, | ||
90 | i < gt_count; i++, tmp += gt_size) { | ||
91 | gd_buf[i] = cpu_to_le32(tmp); | ||
92 | } | ||
93 | - ret = blk_pwrite(blk, le64_to_cpu(header.rgd_offset) * BDRV_SECTOR_SIZE, | ||
94 | - gd_buf_size, gd_buf, 0); | ||
95 | + ret = blk_co_pwrite(blk, le64_to_cpu(header.rgd_offset) * BDRV_SECTOR_SIZE, | ||
96 | + gd_buf_size, gd_buf, 0); | ||
97 | if (ret < 0) { | ||
98 | error_setg(errp, QERR_IO_ERROR); | ||
99 | goto exit; | ||
100 | @@ -XXX,XX +XXX,XX @@ vmdk_init_extent(BlockBackend *blk, int64_t filesize, bool flat, bool compress, | ||
101 | i < gt_count; i++, tmp += gt_size) { | ||
102 | gd_buf[i] = cpu_to_le32(tmp); | ||
103 | } | ||
104 | - ret = blk_pwrite(blk, le64_to_cpu(header.gd_offset) * BDRV_SECTOR_SIZE, | ||
105 | - gd_buf_size, gd_buf, 0); | ||
106 | + ret = blk_co_pwrite(blk, le64_to_cpu(header.gd_offset) * BDRV_SECTOR_SIZE, | ||
107 | + gd_buf_size, gd_buf, 0); | ||
108 | if (ret < 0) { | ||
109 | error_setg(errp, QERR_IO_ERROR); | ||
110 | } | ||
59 | -- | 111 | -- |
60 | 2.38.1 | 112 | 2.41.0 | diff view generated by jsdifflib |
1 | From: Hanna Reitz <hreitz@redhat.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | mirror_wait_for_free_in_flight_slot() is the only remaining user of | 3 | Mark functions as coroutine_fn when they are only called by other coroutine_fns |
4 | mirror_wait_for_any_operation(), so inline the latter into the former. | 4 | and they can suspend. Change calls to co_wrappers to use the non-wrapped |
5 | functions, which in turn requires adding GRAPH_RDLOCK annotations. | ||
5 | 6 | ||
6 | Signed-off-by: Hanna Reitz <hreitz@redhat.com> | 7 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> |
7 | Message-Id: <20221109165452.67927-3-hreitz@redhat.com> | 8 | Message-ID: <20230601115145.196465-10-pbonzini@redhat.com> |
8 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 9 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
9 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 10 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
10 | --- | 11 | --- |
11 | block/mirror.c | 21 ++++++++------------- | 12 | block/vhdx.h | 5 ++-- |
12 | 1 file changed, 8 insertions(+), 13 deletions(-) | 13 | block/vhdx-log.c | 36 +++++++++++++----------- |
14 | block/vhdx.c | 73 +++++++++++++++++++++++------------------------- | ||
15 | 3 files changed, 57 insertions(+), 57 deletions(-) | ||
13 | 16 | ||
14 | diff --git a/block/mirror.c b/block/mirror.c | 17 | diff --git a/block/vhdx.h b/block/vhdx.h |
15 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/block/mirror.c | 19 | --- a/block/vhdx.h |
17 | +++ b/block/mirror.c | 20 | +++ b/block/vhdx.h |
18 | @@ -XXX,XX +XXX,XX @@ static int mirror_cow_align(MirrorBlockJob *s, int64_t *offset, | 21 | @@ -XXX,XX +XXX,XX @@ bool vhdx_checksum_is_valid(uint8_t *buf, size_t size, int crc_offset); |
22 | int vhdx_parse_log(BlockDriverState *bs, BDRVVHDXState *s, bool *flushed, | ||
23 | Error **errp); | ||
24 | |||
25 | -int vhdx_log_write_and_flush(BlockDriverState *bs, BDRVVHDXState *s, | ||
26 | - void *data, uint32_t length, uint64_t offset); | ||
27 | +int coroutine_fn GRAPH_RDLOCK | ||
28 | +vhdx_log_write_and_flush(BlockDriverState *bs, BDRVVHDXState *s, | ||
29 | + void *data, uint32_t length, uint64_t offset); | ||
30 | |||
31 | static inline void leguid_to_cpus(MSGUID *guid) | ||
32 | { | ||
33 | diff --git a/block/vhdx-log.c b/block/vhdx-log.c | ||
34 | index XXXXXXX..XXXXXXX 100644 | ||
35 | --- a/block/vhdx-log.c | ||
36 | +++ b/block/vhdx-log.c | ||
37 | @@ -XXX,XX +XXX,XX @@ exit: | ||
38 | * It is assumed that 'buffer' is at least 4096*num_sectors large. | ||
39 | * | ||
40 | * 0 is returned on success, -errno otherwise */ | ||
41 | -static int vhdx_log_write_sectors(BlockDriverState *bs, VHDXLogEntries *log, | ||
42 | - uint32_t *sectors_written, void *buffer, | ||
43 | - uint32_t num_sectors) | ||
44 | +static int coroutine_fn GRAPH_RDLOCK | ||
45 | +vhdx_log_write_sectors(BlockDriverState *bs, VHDXLogEntries *log, | ||
46 | + uint32_t *sectors_written, void *buffer, | ||
47 | + uint32_t num_sectors) | ||
48 | { | ||
49 | int ret = 0; | ||
50 | uint64_t offset; | ||
51 | @@ -XXX,XX +XXX,XX @@ static int vhdx_log_write_sectors(BlockDriverState *bs, VHDXLogEntries *log, | ||
52 | /* full */ | ||
53 | break; | ||
54 | } | ||
55 | - ret = bdrv_pwrite(bs->file, offset, VHDX_LOG_SECTOR_SIZE, buffer_tmp, | ||
56 | - 0); | ||
57 | + ret = bdrv_co_pwrite(bs->file, offset, VHDX_LOG_SECTOR_SIZE, buffer_tmp, 0); | ||
58 | if (ret < 0) { | ||
59 | goto exit; | ||
60 | } | ||
61 | @@ -XXX,XX +XXX,XX @@ static void vhdx_log_raw_to_le_sector(VHDXLogDescriptor *desc, | ||
19 | } | 62 | } |
20 | 63 | ||
21 | static inline void coroutine_fn | 64 | |
22 | -mirror_wait_for_any_operation(MirrorBlockJob *s, bool active) | 65 | -static int vhdx_log_write(BlockDriverState *bs, BDRVVHDXState *s, |
23 | +mirror_wait_for_free_in_flight_slot(MirrorBlockJob *s) | 66 | - void *data, uint32_t length, uint64_t offset) |
24 | { | 67 | +static int coroutine_fn GRAPH_RDLOCK |
25 | MirrorOp *op; | 68 | +vhdx_log_write(BlockDriverState *bs, BDRVVHDXState *s, |
26 | 69 | + void *data, uint32_t length, uint64_t offset) | |
27 | QTAILQ_FOREACH(op, &s->ops_in_flight, next) { | 70 | { |
28 | - /* Do not wait on pseudo ops, because it may in turn wait on | 71 | int ret = 0; |
29 | + /* | 72 | void *buffer = NULL; |
30 | + * Do not wait on pseudo ops, because it may in turn wait on | 73 | @@ -XXX,XX +XXX,XX @@ static int vhdx_log_write(BlockDriverState *bs, BDRVVHDXState *s, |
31 | * some other operation to start, which may in fact be the | 74 | |
32 | * caller of this function. Since there is only one pseudo op | 75 | sectors += partial_sectors; |
33 | * at any given time, we will always find some real operation | 76 | |
34 | - * to wait on. */ | 77 | - file_length = bdrv_getlength(bs->file->bs); |
35 | - if (!op->is_pseudo_op && op->is_in_flight && | 78 | + file_length = bdrv_co_getlength(bs->file->bs); |
36 | - op->is_active_write == active) | 79 | if (file_length < 0) { |
37 | - { | 80 | ret = file_length; |
38 | + * to wait on. | 81 | goto exit; |
39 | + * Also, do not wait on active operations, because they do not | 82 | @@ -XXX,XX +XXX,XX @@ static int vhdx_log_write(BlockDriverState *bs, BDRVVHDXState *s, |
40 | + * use up in-flight slots. | 83 | |
41 | + */ | 84 | if (i == 0 && leading_length) { |
42 | + if (!op->is_pseudo_op && op->is_in_flight && !op->is_active_write) { | 85 | /* partial sector at the front of the buffer */ |
43 | qemu_co_queue_wait(&op->waiting_requests, NULL); | 86 | - ret = bdrv_pread(bs->file, file_offset, VHDX_LOG_SECTOR_SIZE, |
44 | return; | 87 | - merged_sector, 0); |
45 | } | 88 | + ret = bdrv_co_pread(bs->file, file_offset, VHDX_LOG_SECTOR_SIZE, |
46 | @@ -XXX,XX +XXX,XX @@ mirror_wait_for_any_operation(MirrorBlockJob *s, bool active) | 89 | + merged_sector, 0); |
47 | abort(); | 90 | if (ret < 0) { |
91 | goto exit; | ||
92 | } | ||
93 | @@ -XXX,XX +XXX,XX @@ static int vhdx_log_write(BlockDriverState *bs, BDRVVHDXState *s, | ||
94 | sector_write = merged_sector; | ||
95 | } else if (i == sectors - 1 && trailing_length) { | ||
96 | /* partial sector at the end of the buffer */ | ||
97 | - ret = bdrv_pread(bs->file, file_offset + trailing_length, | ||
98 | - VHDX_LOG_SECTOR_SIZE - trailing_length, | ||
99 | - merged_sector + trailing_length, 0); | ||
100 | + ret = bdrv_co_pread(bs->file, file_offset + trailing_length, | ||
101 | + VHDX_LOG_SECTOR_SIZE - trailing_length, | ||
102 | + merged_sector + trailing_length, 0); | ||
103 | if (ret < 0) { | ||
104 | goto exit; | ||
105 | } | ||
106 | @@ -XXX,XX +XXX,XX @@ exit: | ||
48 | } | 107 | } |
49 | 108 | ||
50 | -static inline void coroutine_fn | 109 | /* Perform a log write, and then immediately flush the entire log */ |
51 | -mirror_wait_for_free_in_flight_slot(MirrorBlockJob *s) | 110 | -int vhdx_log_write_and_flush(BlockDriverState *bs, BDRVVHDXState *s, |
52 | -{ | 111 | - void *data, uint32_t length, uint64_t offset) |
53 | - /* Only non-active operations use up in-flight slots */ | 112 | +int coroutine_fn |
54 | - mirror_wait_for_any_operation(s, false); | 113 | +vhdx_log_write_and_flush(BlockDriverState *bs, BDRVVHDXState *s, |
55 | -} | 114 | + void *data, uint32_t length, uint64_t offset) |
56 | - | 115 | { |
57 | /* Perform a mirror copy operation. | 116 | int ret = 0; |
117 | VHDXLogSequence logs = { .valid = true, | ||
118 | @@ -XXX,XX +XXX,XX @@ int vhdx_log_write_and_flush(BlockDriverState *bs, BDRVVHDXState *s, | ||
119 | |||
120 | /* Make sure data written (new and/or changed blocks) is stable | ||
121 | * on disk, before creating log entry */ | ||
122 | - ret = bdrv_flush(bs); | ||
123 | + ret = bdrv_co_flush(bs); | ||
124 | if (ret < 0) { | ||
125 | goto exit; | ||
126 | } | ||
127 | @@ -XXX,XX +XXX,XX @@ int vhdx_log_write_and_flush(BlockDriverState *bs, BDRVVHDXState *s, | ||
128 | logs.log = s->log; | ||
129 | |||
130 | /* Make sure log is stable on disk */ | ||
131 | - ret = bdrv_flush(bs); | ||
132 | + ret = bdrv_co_flush(bs); | ||
133 | if (ret < 0) { | ||
134 | goto exit; | ||
135 | } | ||
136 | diff --git a/block/vhdx.c b/block/vhdx.c | ||
137 | index XXXXXXX..XXXXXXX 100644 | ||
138 | --- a/block/vhdx.c | ||
139 | +++ b/block/vhdx.c | ||
140 | @@ -XXX,XX +XXX,XX @@ exit: | ||
58 | * | 141 | * |
59 | * *op->bytes_handled is set to the number of bytes copied after and | 142 | * Returns the file offset start of the new payload block |
143 | */ | ||
144 | -static int vhdx_allocate_block(BlockDriverState *bs, BDRVVHDXState *s, | ||
145 | - uint64_t *new_offset, bool *need_zero) | ||
146 | +static int coroutine_fn GRAPH_RDLOCK | ||
147 | +vhdx_allocate_block(BlockDriverState *bs, BDRVVHDXState *s, | ||
148 | + uint64_t *new_offset, bool *need_zero) | ||
149 | { | ||
150 | int64_t current_len; | ||
151 | |||
152 | - current_len = bdrv_getlength(bs->file->bs); | ||
153 | + current_len = bdrv_co_getlength(bs->file->bs); | ||
154 | if (current_len < 0) { | ||
155 | return current_len; | ||
156 | } | ||
157 | @@ -XXX,XX +XXX,XX @@ static int vhdx_allocate_block(BlockDriverState *bs, BDRVVHDXState *s, | ||
158 | if (*need_zero) { | ||
159 | int ret; | ||
160 | |||
161 | - ret = bdrv_truncate(bs->file, *new_offset + s->block_size, false, | ||
162 | - PREALLOC_MODE_OFF, BDRV_REQ_ZERO_WRITE, NULL); | ||
163 | + ret = bdrv_co_truncate(bs->file, *new_offset + s->block_size, false, | ||
164 | + PREALLOC_MODE_OFF, BDRV_REQ_ZERO_WRITE, NULL); | ||
165 | if (ret != -ENOTSUP) { | ||
166 | *need_zero = false; | ||
167 | return ret; | ||
168 | } | ||
169 | } | ||
170 | |||
171 | - return bdrv_truncate(bs->file, *new_offset + s->block_size, false, | ||
172 | - PREALLOC_MODE_OFF, 0, NULL); | ||
173 | + return bdrv_co_truncate(bs->file, *new_offset + s->block_size, false, | ||
174 | + PREALLOC_MODE_OFF, 0, NULL); | ||
175 | } | ||
176 | |||
177 | /* | ||
178 | @@ -XXX,XX +XXX,XX @@ exit: | ||
179 | * The first 64KB of the Metadata section is reserved for the metadata | ||
180 | * header and entries; beyond that, the metadata items themselves reside. | ||
181 | */ | ||
182 | -static int vhdx_create_new_metadata(BlockBackend *blk, | ||
183 | - uint64_t image_size, | ||
184 | - uint32_t block_size, | ||
185 | - uint32_t sector_size, | ||
186 | - uint64_t metadata_offset, | ||
187 | - VHDXImageType type) | ||
188 | +static int coroutine_fn | ||
189 | +vhdx_create_new_metadata(BlockBackend *blk, uint64_t image_size, | ||
190 | + uint32_t block_size, uint32_t sector_size, | ||
191 | + uint64_t metadata_offset, VHDXImageType type) | ||
192 | { | ||
193 | int ret = 0; | ||
194 | uint32_t offset = 0; | ||
195 | @@ -XXX,XX +XXX,XX @@ static int vhdx_create_new_metadata(BlockBackend *blk, | ||
196 | VHDX_META_FLAGS_IS_VIRTUAL_DISK; | ||
197 | vhdx_metadata_entry_le_export(&md_table_entry[4]); | ||
198 | |||
199 | - ret = blk_pwrite(blk, metadata_offset, VHDX_HEADER_BLOCK_SIZE, buffer, 0); | ||
200 | + ret = blk_co_pwrite(blk, metadata_offset, VHDX_HEADER_BLOCK_SIZE, buffer, 0); | ||
201 | if (ret < 0) { | ||
202 | goto exit; | ||
203 | } | ||
204 | |||
205 | - ret = blk_pwrite(blk, metadata_offset + (64 * KiB), | ||
206 | - VHDX_METADATA_ENTRY_BUFFER_SIZE, entry_buffer, 0); | ||
207 | + ret = blk_co_pwrite(blk, metadata_offset + (64 * KiB), | ||
208 | + VHDX_METADATA_ENTRY_BUFFER_SIZE, entry_buffer, 0); | ||
209 | if (ret < 0) { | ||
210 | goto exit; | ||
211 | } | ||
212 | @@ -XXX,XX +XXX,XX @@ exit: | ||
213 | * Fixed images: default state of the BAT is fully populated, with | ||
214 | * file offsets and state PAYLOAD_BLOCK_FULLY_PRESENT. | ||
215 | */ | ||
216 | -static int vhdx_create_bat(BlockBackend *blk, BDRVVHDXState *s, | ||
217 | - uint64_t image_size, VHDXImageType type, | ||
218 | - bool use_zero_blocks, uint64_t file_offset, | ||
219 | - uint32_t length, Error **errp) | ||
220 | +static int coroutine_fn | ||
221 | +vhdx_create_bat(BlockBackend *blk, BDRVVHDXState *s, | ||
222 | + uint64_t image_size, VHDXImageType type, | ||
223 | + bool use_zero_blocks, uint64_t file_offset, | ||
224 | + uint32_t length, Error **errp) | ||
225 | { | ||
226 | int ret = 0; | ||
227 | uint64_t data_file_offset; | ||
228 | @@ -XXX,XX +XXX,XX @@ static int vhdx_create_bat(BlockBackend *blk, BDRVVHDXState *s, | ||
229 | if (type == VHDX_TYPE_DYNAMIC) { | ||
230 | /* All zeroes, so we can just extend the file - the end of the BAT | ||
231 | * is the furthest thing we have written yet */ | ||
232 | - ret = blk_truncate(blk, data_file_offset, false, PREALLOC_MODE_OFF, | ||
233 | - 0, errp); | ||
234 | + ret = blk_co_truncate(blk, data_file_offset, false, PREALLOC_MODE_OFF, | ||
235 | + 0, errp); | ||
236 | if (ret < 0) { | ||
237 | goto exit; | ||
238 | } | ||
239 | } else if (type == VHDX_TYPE_FIXED) { | ||
240 | - ret = blk_truncate(blk, data_file_offset + image_size, false, | ||
241 | - PREALLOC_MODE_OFF, 0, errp); | ||
242 | + ret = blk_co_truncate(blk, data_file_offset + image_size, false, | ||
243 | + PREALLOC_MODE_OFF, 0, errp); | ||
244 | if (ret < 0) { | ||
245 | goto exit; | ||
246 | } | ||
247 | @@ -XXX,XX +XXX,XX @@ static int vhdx_create_bat(BlockBackend *blk, BDRVVHDXState *s, | ||
248 | s->bat[sinfo.bat_idx] = cpu_to_le64(s->bat[sinfo.bat_idx]); | ||
249 | sector_num += s->sectors_per_block; | ||
250 | } | ||
251 | - ret = blk_pwrite(blk, file_offset, length, s->bat, 0); | ||
252 | + ret = blk_co_pwrite(blk, file_offset, length, s->bat, 0); | ||
253 | if (ret < 0) { | ||
254 | error_setg_errno(errp, -ret, "Failed to write the BAT"); | ||
255 | goto exit; | ||
256 | @@ -XXX,XX +XXX,XX @@ exit: | ||
257 | * to create the BAT itself, we will also cause the BAT to be | ||
258 | * created. | ||
259 | */ | ||
260 | -static int vhdx_create_new_region_table(BlockBackend *blk, | ||
261 | - uint64_t image_size, | ||
262 | - uint32_t block_size, | ||
263 | - uint32_t sector_size, | ||
264 | - uint32_t log_size, | ||
265 | - bool use_zero_blocks, | ||
266 | - VHDXImageType type, | ||
267 | - uint64_t *metadata_offset, | ||
268 | - Error **errp) | ||
269 | +static int coroutine_fn | ||
270 | +vhdx_create_new_region_table(BlockBackend *blk, uint64_t image_size, | ||
271 | + uint32_t block_size, uint32_t sector_size, | ||
272 | + uint32_t log_size, bool use_zero_blocks, | ||
273 | + VHDXImageType type, uint64_t *metadata_offset, | ||
274 | + Error **errp) | ||
275 | { | ||
276 | int ret = 0; | ||
277 | uint32_t offset = 0; | ||
278 | @@ -XXX,XX +XXX,XX @@ static int vhdx_create_new_region_table(BlockBackend *blk, | ||
279 | } | ||
280 | |||
281 | /* Now write out the region headers to disk */ | ||
282 | - ret = blk_pwrite(blk, VHDX_REGION_TABLE_OFFSET, VHDX_HEADER_BLOCK_SIZE, | ||
283 | - buffer, 0); | ||
284 | + ret = blk_co_pwrite(blk, VHDX_REGION_TABLE_OFFSET, VHDX_HEADER_BLOCK_SIZE, | ||
285 | + buffer, 0); | ||
286 | if (ret < 0) { | ||
287 | error_setg_errno(errp, -ret, "Failed to write first region table"); | ||
288 | goto exit; | ||
289 | } | ||
290 | |||
291 | - ret = blk_pwrite(blk, VHDX_REGION_TABLE2_OFFSET, VHDX_HEADER_BLOCK_SIZE, | ||
292 | - buffer, 0); | ||
293 | + ret = blk_co_pwrite(blk, VHDX_REGION_TABLE2_OFFSET, VHDX_HEADER_BLOCK_SIZE, | ||
294 | + buffer, 0); | ||
295 | if (ret < 0) { | ||
296 | error_setg_errno(errp, -ret, "Failed to write second region table"); | ||
297 | goto exit; | ||
60 | -- | 298 | -- |
61 | 2.38.1 | 299 | 2.41.0 | diff view generated by jsdifflib |
1 | From: Hanna Reitz <hreitz@redhat.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | blk_get_aio_context() asserts that blk->ctx is always equal to the root | 3 | Mark functions as coroutine_fn when they are only called by other coroutine_fns |
4 | BDS's context (if there is a root BDS). Therefore, | 4 | and they can suspend. Change calls to co_wrappers to use the non-wrapped |
5 | blk_do_set_aio_context() must update blk->ctx immediately after the root | 5 | functions, which in turn requires adding GRAPH_RDLOCK annotations. |
6 | BDS's context has changed. | ||
7 | 6 | ||
8 | Without this patch, the next patch would break iotest 238, because | 7 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> |
9 | bdrv_drained_begin() (called by blk_do_set_aio_context()) may then | 8 | Message-ID: <20230601115145.196465-11-pbonzini@redhat.com> |
10 | invoke bdrv_child_get_parent_aio_context() on the root child, i.e. | ||
11 | blk_get_aio_context(). However, by this point, blk->ctx would not have | ||
12 | been updated and thus differ from the root node's context. This patch | ||
13 | fixes that. | ||
14 | |||
15 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 9 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
16 | Signed-off-by: Hanna Reitz <hreitz@redhat.com> | ||
17 | Message-Id: <20221107151321.211175-3-hreitz@redhat.com> | ||
18 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 10 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
19 | --- | 11 | --- |
20 | block/block-backend.c | 8 +++++++- | 12 | block/qcow2.h | 33 +++++------ |
21 | 1 file changed, 7 insertions(+), 1 deletion(-) | 13 | block/qcow2-bitmap.c | 26 +++++---- |
14 | block/qcow2-cluster.c | 12 ++-- | ||
15 | block/qcow2-refcount.c | 130 +++++++++++++++++++++-------------------- | ||
16 | block/qcow2.c | 2 +- | ||
17 | 5 files changed, 105 insertions(+), 98 deletions(-) | ||
22 | 18 | ||
23 | diff --git a/block/block-backend.c b/block/block-backend.c | 19 | diff --git a/block/qcow2.h b/block/qcow2.h |
24 | index XXXXXXX..XXXXXXX 100644 | 20 | index XXXXXXX..XXXXXXX 100644 |
25 | --- a/block/block-backend.c | 21 | --- a/block/qcow2.h |
26 | +++ b/block/block-backend.c | 22 | +++ b/block/qcow2.h |
27 | @@ -XXX,XX +XXX,XX @@ static int blk_do_set_aio_context(BlockBackend *blk, AioContext *new_context, | 23 | @@ -XXX,XX +XXX,XX @@ int64_t qcow2_refcount_metadata_size(int64_t clusters, size_t cluster_size, |
28 | return ret; | 24 | |
25 | int qcow2_mark_dirty(BlockDriverState *bs); | ||
26 | int qcow2_mark_corrupt(BlockDriverState *bs); | ||
27 | -int qcow2_mark_consistent(BlockDriverState *bs); | ||
28 | int qcow2_update_header(BlockDriverState *bs); | ||
29 | |||
30 | void qcow2_signal_corruption(BlockDriverState *bs, bool fatal, int64_t offset, | ||
31 | @@ -XXX,XX +XXX,XX @@ int64_t qcow2_refcount_area(BlockDriverState *bs, uint64_t offset, | ||
32 | int64_t qcow2_alloc_clusters(BlockDriverState *bs, uint64_t size); | ||
33 | int64_t coroutine_fn qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset, | ||
34 | int64_t nb_clusters); | ||
35 | -int64_t coroutine_fn qcow2_alloc_bytes(BlockDriverState *bs, int size); | ||
36 | +int64_t coroutine_fn GRAPH_RDLOCK qcow2_alloc_bytes(BlockDriverState *bs, int size); | ||
37 | void qcow2_free_clusters(BlockDriverState *bs, | ||
38 | int64_t offset, int64_t size, | ||
39 | enum qcow2_discard_type type); | ||
40 | @@ -XXX,XX +XXX,XX @@ int qcow2_update_snapshot_refcount(BlockDriverState *bs, | ||
41 | |||
42 | int qcow2_flush_caches(BlockDriverState *bs); | ||
43 | int qcow2_write_caches(BlockDriverState *bs); | ||
44 | -int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res, | ||
45 | - BdrvCheckMode fix); | ||
46 | +int coroutine_fn qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res, | ||
47 | + BdrvCheckMode fix); | ||
48 | |||
49 | void qcow2_process_discards(BlockDriverState *bs, int ret); | ||
50 | |||
51 | @@ -XXX,XX +XXX,XX @@ int qcow2_check_metadata_overlap(BlockDriverState *bs, int ign, int64_t offset, | ||
52 | int64_t size); | ||
53 | int qcow2_pre_write_overlap_check(BlockDriverState *bs, int ign, int64_t offset, | ||
54 | int64_t size, bool data_file); | ||
55 | -int qcow2_inc_refcounts_imrt(BlockDriverState *bs, BdrvCheckResult *res, | ||
56 | - void **refcount_table, | ||
57 | - int64_t *refcount_table_size, | ||
58 | - int64_t offset, int64_t size); | ||
59 | +int coroutine_fn qcow2_inc_refcounts_imrt(BlockDriverState *bs, BdrvCheckResult *res, | ||
60 | + void **refcount_table, | ||
61 | + int64_t *refcount_table_size, | ||
62 | + int64_t offset, int64_t size); | ||
63 | |||
64 | int qcow2_change_refcount_order(BlockDriverState *bs, int refcount_order, | ||
65 | BlockDriverAmendStatusCB *status_cb, | ||
66 | @@ -XXX,XX +XXX,XX @@ int qcow2_get_host_offset(BlockDriverState *bs, uint64_t offset, | ||
67 | int coroutine_fn qcow2_alloc_host_offset(BlockDriverState *bs, uint64_t offset, | ||
68 | unsigned int *bytes, | ||
69 | uint64_t *host_offset, QCowL2Meta **m); | ||
70 | -int coroutine_fn qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, | ||
71 | - uint64_t offset, | ||
72 | - int compressed_size, | ||
73 | - uint64_t *host_offset); | ||
74 | +int coroutine_fn GRAPH_RDLOCK | ||
75 | +qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, uint64_t offset, | ||
76 | + int compressed_size, uint64_t *host_offset); | ||
77 | void qcow2_parse_compressed_l2_entry(BlockDriverState *bs, uint64_t l2_entry, | ||
78 | uint64_t *coffset, int *csize); | ||
79 | |||
80 | @@ -XXX,XX +XXX,XX @@ void *qcow2_cache_is_table_offset(Qcow2Cache *c, uint64_t offset); | ||
81 | void qcow2_cache_discard(Qcow2Cache *c, void *table); | ||
82 | |||
83 | /* qcow2-bitmap.c functions */ | ||
84 | -int qcow2_check_bitmaps_refcounts(BlockDriverState *bs, BdrvCheckResult *res, | ||
85 | - void **refcount_table, | ||
86 | - int64_t *refcount_table_size); | ||
87 | -bool coroutine_fn qcow2_load_dirty_bitmaps(BlockDriverState *bs, | ||
88 | - bool *header_updated, Error **errp); | ||
89 | +int coroutine_fn | ||
90 | +qcow2_check_bitmaps_refcounts(BlockDriverState *bs, BdrvCheckResult *res, | ||
91 | + void **refcount_table, | ||
92 | + int64_t *refcount_table_size); | ||
93 | +bool coroutine_fn GRAPH_RDLOCK | ||
94 | +qcow2_load_dirty_bitmaps(BlockDriverState *bs, bool *header_updated, Error **errp); | ||
95 | bool qcow2_get_bitmap_info_list(BlockDriverState *bs, | ||
96 | Qcow2BitmapInfoList **info_list, Error **errp); | ||
97 | int qcow2_reopen_bitmaps_rw(BlockDriverState *bs, Error **errp); | ||
98 | diff --git a/block/qcow2-bitmap.c b/block/qcow2-bitmap.c | ||
99 | index XXXXXXX..XXXXXXX 100644 | ||
100 | --- a/block/qcow2-bitmap.c | ||
101 | +++ b/block/qcow2-bitmap.c | ||
102 | @@ -XXX,XX +XXX,XX @@ static int free_bitmap_clusters(BlockDriverState *bs, Qcow2BitmapTable *tb) | ||
103 | /* load_bitmap_data | ||
104 | * @bitmap_table entries must satisfy specification constraints. | ||
105 | * @bitmap must be cleared */ | ||
106 | -static int load_bitmap_data(BlockDriverState *bs, | ||
107 | - const uint64_t *bitmap_table, | ||
108 | - uint32_t bitmap_table_size, | ||
109 | - BdrvDirtyBitmap *bitmap) | ||
110 | +static int coroutine_fn GRAPH_RDLOCK | ||
111 | +load_bitmap_data(BlockDriverState *bs, const uint64_t *bitmap_table, | ||
112 | + uint32_t bitmap_table_size, BdrvDirtyBitmap *bitmap) | ||
113 | { | ||
114 | int ret = 0; | ||
115 | BDRVQcow2State *s = bs->opaque; | ||
116 | @@ -XXX,XX +XXX,XX @@ static int load_bitmap_data(BlockDriverState *bs, | ||
117 | * already cleared */ | ||
118 | } | ||
119 | } else { | ||
120 | - ret = bdrv_pread(bs->file, data_offset, s->cluster_size, buf, 0); | ||
121 | + ret = bdrv_co_pread(bs->file, data_offset, s->cluster_size, buf, 0); | ||
122 | if (ret < 0) { | ||
123 | goto finish; | ||
124 | } | ||
125 | @@ -XXX,XX +XXX,XX @@ finish: | ||
126 | return ret; | ||
127 | } | ||
128 | |||
129 | -static BdrvDirtyBitmap *load_bitmap(BlockDriverState *bs, | ||
130 | - Qcow2Bitmap *bm, Error **errp) | ||
131 | +static coroutine_fn GRAPH_RDLOCK | ||
132 | +BdrvDirtyBitmap *load_bitmap(BlockDriverState *bs, | ||
133 | + Qcow2Bitmap *bm, Error **errp) | ||
134 | { | ||
135 | int ret; | ||
136 | uint64_t *bitmap_table = NULL; | ||
137 | @@ -XXX,XX +XXX,XX @@ fail: | ||
138 | return NULL; | ||
139 | } | ||
140 | |||
141 | -int qcow2_check_bitmaps_refcounts(BlockDriverState *bs, BdrvCheckResult *res, | ||
142 | - void **refcount_table, | ||
143 | - int64_t *refcount_table_size) | ||
144 | +int coroutine_fn | ||
145 | +qcow2_check_bitmaps_refcounts(BlockDriverState *bs, BdrvCheckResult *res, | ||
146 | + void **refcount_table, | ||
147 | + int64_t *refcount_table_size) | ||
148 | { | ||
149 | int ret; | ||
150 | BDRVQcow2State *s = bs->opaque; | ||
151 | @@ -XXX,XX +XXX,XX @@ static void set_readonly_helper(gpointer bitmap, gpointer value) | ||
152 | * If header_updated is not NULL then it is set appropriately regardless of | ||
153 | * the return value. | ||
154 | */ | ||
155 | -bool coroutine_fn qcow2_load_dirty_bitmaps(BlockDriverState *bs, | ||
156 | - bool *header_updated, Error **errp) | ||
157 | +bool coroutine_fn GRAPH_RDLOCK | ||
158 | +qcow2_load_dirty_bitmaps(BlockDriverState *bs, | ||
159 | + bool *header_updated, Error **errp) | ||
160 | { | ||
161 | BDRVQcow2State *s = bs->opaque; | ||
162 | Qcow2BitmapList *bm_list; | ||
163 | diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c | ||
164 | index XXXXXXX..XXXXXXX 100644 | ||
165 | --- a/block/qcow2-cluster.c | ||
166 | +++ b/block/qcow2-cluster.c | ||
167 | @@ -XXX,XX +XXX,XX @@ static int get_cluster_table(BlockDriverState *bs, uint64_t offset, | ||
168 | * | ||
169 | * Return 0 on success and -errno in error cases | ||
170 | */ | ||
171 | -int coroutine_fn qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, | ||
172 | - uint64_t offset, | ||
173 | - int compressed_size, | ||
174 | - uint64_t *host_offset) | ||
175 | +int coroutine_fn GRAPH_RDLOCK | ||
176 | +qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, uint64_t offset, | ||
177 | + int compressed_size, uint64_t *host_offset) | ||
178 | { | ||
179 | BDRVQcow2State *s = bs->opaque; | ||
180 | int l2_index, ret; | ||
181 | @@ -XXX,XX +XXX,XX @@ fail: | ||
182 | * all clusters in the same L2 slice) and returns the number of zeroed | ||
183 | * clusters. | ||
184 | */ | ||
185 | -static int zero_in_l2_slice(BlockDriverState *bs, uint64_t offset, | ||
186 | - uint64_t nb_clusters, int flags) | ||
187 | +static int coroutine_fn | ||
188 | +zero_in_l2_slice(BlockDriverState *bs, uint64_t offset, | ||
189 | + uint64_t nb_clusters, int flags) | ||
190 | { | ||
191 | BDRVQcow2State *s = bs->opaque; | ||
192 | uint64_t *l2_slice; | ||
193 | diff --git a/block/qcow2-refcount.c b/block/qcow2-refcount.c | ||
194 | index XXXXXXX..XXXXXXX 100644 | ||
195 | --- a/block/qcow2-refcount.c | ||
196 | +++ b/block/qcow2-refcount.c | ||
197 | @@ -XXX,XX +XXX,XX @@ int64_t coroutine_fn qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offs | ||
198 | |||
199 | /* only used to allocate compressed sectors. We try to allocate | ||
200 | contiguous sectors. size must be <= cluster_size */ | ||
201 | -int64_t coroutine_fn qcow2_alloc_bytes(BlockDriverState *bs, int size) | ||
202 | +int64_t coroutine_fn GRAPH_RDLOCK qcow2_alloc_bytes(BlockDriverState *bs, int size) | ||
203 | { | ||
204 | BDRVQcow2State *s = bs->opaque; | ||
205 | int64_t offset; | ||
206 | @@ -XXX,XX +XXX,XX @@ static int realloc_refcount_array(BDRVQcow2State *s, void **array, | ||
207 | * | ||
208 | * Modifies the number of errors in res. | ||
209 | */ | ||
210 | -int qcow2_inc_refcounts_imrt(BlockDriverState *bs, BdrvCheckResult *res, | ||
211 | - void **refcount_table, | ||
212 | - int64_t *refcount_table_size, | ||
213 | - int64_t offset, int64_t size) | ||
214 | +int coroutine_fn GRAPH_RDLOCK | ||
215 | +qcow2_inc_refcounts_imrt(BlockDriverState *bs, BdrvCheckResult *res, | ||
216 | + void **refcount_table, | ||
217 | + int64_t *refcount_table_size, | ||
218 | + int64_t offset, int64_t size) | ||
219 | { | ||
220 | BDRVQcow2State *s = bs->opaque; | ||
221 | uint64_t start, last, cluster_offset, k, refcount; | ||
222 | @@ -XXX,XX +XXX,XX @@ int qcow2_inc_refcounts_imrt(BlockDriverState *bs, BdrvCheckResult *res, | ||
223 | return 0; | ||
224 | } | ||
225 | |||
226 | - file_len = bdrv_getlength(bs->file->bs); | ||
227 | + file_len = bdrv_co_getlength(bs->file->bs); | ||
228 | if (file_len < 0) { | ||
229 | return file_len; | ||
230 | } | ||
231 | @@ -XXX,XX +XXX,XX @@ enum { | ||
232 | * | ||
233 | * On failure in-memory @l2_table may be modified. | ||
234 | */ | ||
235 | -static int fix_l2_entry_by_zero(BlockDriverState *bs, BdrvCheckResult *res, | ||
236 | - uint64_t l2_offset, | ||
237 | - uint64_t *l2_table, int l2_index, bool active, | ||
238 | - bool *metadata_overlap) | ||
239 | +static int coroutine_fn GRAPH_RDLOCK | ||
240 | +fix_l2_entry_by_zero(BlockDriverState *bs, BdrvCheckResult *res, | ||
241 | + uint64_t l2_offset, uint64_t *l2_table, | ||
242 | + int l2_index, bool active, | ||
243 | + bool *metadata_overlap) | ||
244 | { | ||
245 | BDRVQcow2State *s = bs->opaque; | ||
246 | int ret; | ||
247 | @@ -XXX,XX +XXX,XX @@ static int fix_l2_entry_by_zero(BlockDriverState *bs, BdrvCheckResult *res, | ||
248 | goto fail; | ||
249 | } | ||
250 | |||
251 | - ret = bdrv_pwrite_sync(bs->file, l2e_offset, l2_entry_size(s), | ||
252 | - &l2_table[idx], 0); | ||
253 | + ret = bdrv_co_pwrite_sync(bs->file, l2e_offset, l2_entry_size(s), | ||
254 | + &l2_table[idx], 0); | ||
255 | if (ret < 0) { | ||
256 | fprintf(stderr, "ERROR: Failed to overwrite L2 " | ||
257 | "table entry: %s\n", strerror(-ret)); | ||
258 | @@ -XXX,XX +XXX,XX @@ fail: | ||
259 | * Returns the number of errors found by the checks or -errno if an internal | ||
260 | * error occurred. | ||
261 | */ | ||
262 | -static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res, | ||
263 | - void **refcount_table, | ||
264 | - int64_t *refcount_table_size, int64_t l2_offset, | ||
265 | - int flags, BdrvCheckMode fix, bool active) | ||
266 | +static int coroutine_fn GRAPH_RDLOCK | ||
267 | +check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res, | ||
268 | + void **refcount_table, | ||
269 | + int64_t *refcount_table_size, int64_t l2_offset, | ||
270 | + int flags, BdrvCheckMode fix, bool active) | ||
271 | { | ||
272 | BDRVQcow2State *s = bs->opaque; | ||
273 | uint64_t l2_entry, l2_bitmap; | ||
274 | @@ -XXX,XX +XXX,XX @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res, | ||
275 | bool metadata_overlap; | ||
276 | |||
277 | /* Read L2 table from disk */ | ||
278 | - ret = bdrv_pread(bs->file, l2_offset, l2_size_bytes, l2_table, 0); | ||
279 | + ret = bdrv_co_pread(bs->file, l2_offset, l2_size_bytes, l2_table, 0); | ||
280 | if (ret < 0) { | ||
281 | fprintf(stderr, "ERROR: I/O error in check_refcounts_l2\n"); | ||
282 | res->check_errors++; | ||
283 | @@ -XXX,XX +XXX,XX @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res, | ||
284 | * Returns the number of errors found by the checks or -errno if an internal | ||
285 | * error occurred. | ||
286 | */ | ||
287 | -static int check_refcounts_l1(BlockDriverState *bs, | ||
288 | - BdrvCheckResult *res, | ||
289 | - void **refcount_table, | ||
290 | - int64_t *refcount_table_size, | ||
291 | - int64_t l1_table_offset, int l1_size, | ||
292 | - int flags, BdrvCheckMode fix, bool active) | ||
293 | +static int coroutine_fn GRAPH_RDLOCK | ||
294 | +check_refcounts_l1(BlockDriverState *bs, BdrvCheckResult *res, | ||
295 | + void **refcount_table, int64_t *refcount_table_size, | ||
296 | + int64_t l1_table_offset, int l1_size, | ||
297 | + int flags, BdrvCheckMode fix, bool active) | ||
298 | { | ||
299 | BDRVQcow2State *s = bs->opaque; | ||
300 | size_t l1_size_bytes = l1_size * L1E_SIZE; | ||
301 | @@ -XXX,XX +XXX,XX @@ static int check_refcounts_l1(BlockDriverState *bs, | ||
302 | } | ||
303 | |||
304 | /* Read L1 table entries from disk */ | ||
305 | - ret = bdrv_pread(bs->file, l1_table_offset, l1_size_bytes, l1_table, 0); | ||
306 | + ret = bdrv_co_pread(bs->file, l1_table_offset, l1_size_bytes, l1_table, 0); | ||
307 | if (ret < 0) { | ||
308 | fprintf(stderr, "ERROR: I/O error in check_refcounts_l1\n"); | ||
309 | res->check_errors++; | ||
310 | @@ -XXX,XX +XXX,XX @@ static int check_refcounts_l1(BlockDriverState *bs, | ||
311 | * have been already detected and sufficiently signaled by the calling function | ||
312 | * (qcow2_check_refcounts) by the time this function is called). | ||
313 | */ | ||
314 | -static int check_oflag_copied(BlockDriverState *bs, BdrvCheckResult *res, | ||
315 | - BdrvCheckMode fix) | ||
316 | +static int coroutine_fn GRAPH_RDLOCK | ||
317 | +check_oflag_copied(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix) | ||
318 | { | ||
319 | BDRVQcow2State *s = bs->opaque; | ||
320 | uint64_t *l2_table = qemu_blockalign(bs, s->cluster_size); | ||
321 | @@ -XXX,XX +XXX,XX @@ static int check_oflag_copied(BlockDriverState *bs, BdrvCheckResult *res, | ||
29 | } | 322 | } |
30 | } | 323 | } |
31 | + /* | 324 | |
32 | + * Make blk->ctx consistent with the root node before we invoke any | 325 | - ret = bdrv_pread(bs->file, l2_offset, s->l2_size * l2_entry_size(s), |
33 | + * other operations like drain that might inquire blk->ctx | 326 | - l2_table, 0); |
34 | + */ | 327 | + ret = bdrv_co_pread(bs->file, l2_offset, s->l2_size * l2_entry_size(s), |
35 | + blk->ctx = new_context; | 328 | + l2_table, 0); |
36 | if (tgm->throttle_state) { | 329 | if (ret < 0) { |
37 | bdrv_drained_begin(bs); | 330 | fprintf(stderr, "ERROR: Could not read L2 table: %s\n", |
38 | throttle_group_detach_aio_context(tgm); | 331 | strerror(-ret)); |
39 | @@ -XXX,XX +XXX,XX @@ static int blk_do_set_aio_context(BlockBackend *blk, AioContext *new_context, | 332 | @@ -XXX,XX +XXX,XX @@ static int check_oflag_copied(BlockDriverState *bs, BdrvCheckResult *res, |
40 | } | 333 | goto fail; |
41 | 334 | } | |
42 | bdrv_unref(bs); | 335 | |
43 | + } else { | 336 | - ret = bdrv_pwrite(bs->file, l2_offset, s->cluster_size, l2_table, |
44 | + blk->ctx = new_context; | 337 | - 0); |
338 | + ret = bdrv_co_pwrite(bs->file, l2_offset, s->cluster_size, l2_table, 0); | ||
339 | if (ret < 0) { | ||
340 | fprintf(stderr, "ERROR: Could not write L2 table: %s\n", | ||
341 | strerror(-ret)); | ||
342 | @@ -XXX,XX +XXX,XX @@ fail: | ||
343 | * Checks consistency of refblocks and accounts for each refblock in | ||
344 | * *refcount_table. | ||
345 | */ | ||
346 | -static int check_refblocks(BlockDriverState *bs, BdrvCheckResult *res, | ||
347 | - BdrvCheckMode fix, bool *rebuild, | ||
348 | - void **refcount_table, int64_t *nb_clusters) | ||
349 | +static int coroutine_fn GRAPH_RDLOCK | ||
350 | +check_refblocks(BlockDriverState *bs, BdrvCheckResult *res, | ||
351 | + BdrvCheckMode fix, bool *rebuild, | ||
352 | + void **refcount_table, int64_t *nb_clusters) | ||
353 | { | ||
354 | BDRVQcow2State *s = bs->opaque; | ||
355 | int64_t i, size; | ||
356 | @@ -XXX,XX +XXX,XX @@ static int check_refblocks(BlockDriverState *bs, BdrvCheckResult *res, | ||
357 | goto resize_fail; | ||
358 | } | ||
359 | |||
360 | - ret = bdrv_truncate(bs->file, offset + s->cluster_size, false, | ||
361 | - PREALLOC_MODE_OFF, 0, &local_err); | ||
362 | + ret = bdrv_co_truncate(bs->file, offset + s->cluster_size, false, | ||
363 | + PREALLOC_MODE_OFF, 0, &local_err); | ||
364 | if (ret < 0) { | ||
365 | error_report_err(local_err); | ||
366 | goto resize_fail; | ||
367 | } | ||
368 | - size = bdrv_getlength(bs->file->bs); | ||
369 | + size = bdrv_co_getlength(bs->file->bs); | ||
370 | if (size < 0) { | ||
371 | ret = size; | ||
372 | goto resize_fail; | ||
373 | @@ -XXX,XX +XXX,XX @@ resize_fail: | ||
374 | /* | ||
375 | * Calculates an in-memory refcount table. | ||
376 | */ | ||
377 | -static int calculate_refcounts(BlockDriverState *bs, BdrvCheckResult *res, | ||
378 | - BdrvCheckMode fix, bool *rebuild, | ||
379 | - void **refcount_table, int64_t *nb_clusters) | ||
380 | +static int coroutine_fn GRAPH_RDLOCK | ||
381 | +calculate_refcounts(BlockDriverState *bs, BdrvCheckResult *res, | ||
382 | + BdrvCheckMode fix, bool *rebuild, | ||
383 | + void **refcount_table, int64_t *nb_clusters) | ||
384 | { | ||
385 | BDRVQcow2State *s = bs->opaque; | ||
386 | int64_t i; | ||
387 | @@ -XXX,XX +XXX,XX @@ static int calculate_refcounts(BlockDriverState *bs, BdrvCheckResult *res, | ||
388 | * Compares the actual reference count for each cluster in the image against the | ||
389 | * refcount as reported by the refcount structures on-disk. | ||
390 | */ | ||
391 | -static void compare_refcounts(BlockDriverState *bs, BdrvCheckResult *res, | ||
392 | - BdrvCheckMode fix, bool *rebuild, | ||
393 | - int64_t *highest_cluster, | ||
394 | - void *refcount_table, int64_t nb_clusters) | ||
395 | +static void coroutine_fn | ||
396 | +compare_refcounts(BlockDriverState *bs, BdrvCheckResult *res, | ||
397 | + BdrvCheckMode fix, bool *rebuild, | ||
398 | + int64_t *highest_cluster, | ||
399 | + void *refcount_table, int64_t nb_clusters) | ||
400 | { | ||
401 | BDRVQcow2State *s = bs->opaque; | ||
402 | int64_t i; | ||
403 | @@ -XXX,XX +XXX,XX @@ static int64_t alloc_clusters_imrt(BlockDriverState *bs, | ||
404 | * Return whether the on-disk reftable array was resized (true/false), | ||
405 | * or -errno on error. | ||
406 | */ | ||
407 | -static int rebuild_refcounts_write_refblocks( | ||
408 | +static int coroutine_fn GRAPH_RDLOCK | ||
409 | +rebuild_refcounts_write_refblocks( | ||
410 | BlockDriverState *bs, void **refcount_table, int64_t *nb_clusters, | ||
411 | int64_t first_cluster, int64_t end_cluster, | ||
412 | uint64_t **on_disk_reftable_ptr, uint32_t *on_disk_reftable_entries_ptr, | ||
413 | @@ -XXX,XX +XXX,XX @@ static int rebuild_refcounts_write_refblocks( | ||
414 | on_disk_refblock = (void *)((char *) *refcount_table + | ||
415 | refblock_index * s->cluster_size); | ||
416 | |||
417 | - ret = bdrv_pwrite(bs->file, refblock_offset, s->cluster_size, | ||
418 | - on_disk_refblock, 0); | ||
419 | + ret = bdrv_co_pwrite(bs->file, refblock_offset, s->cluster_size, | ||
420 | + on_disk_refblock, 0); | ||
421 | if (ret < 0) { | ||
422 | error_setg_errno(errp, -ret, "ERROR writing refblock"); | ||
423 | return ret; | ||
424 | @@ -XXX,XX +XXX,XX @@ static int rebuild_refcounts_write_refblocks( | ||
425 | * On success, the old refcount structure is leaked (it will be covered by the | ||
426 | * new refcount structure). | ||
427 | */ | ||
428 | -static int rebuild_refcount_structure(BlockDriverState *bs, | ||
429 | - BdrvCheckResult *res, | ||
430 | - void **refcount_table, | ||
431 | - int64_t *nb_clusters, | ||
432 | - Error **errp) | ||
433 | +static int coroutine_fn GRAPH_RDLOCK | ||
434 | +rebuild_refcount_structure(BlockDriverState *bs, BdrvCheckResult *res, | ||
435 | + void **refcount_table, int64_t *nb_clusters, | ||
436 | + Error **errp) | ||
437 | { | ||
438 | BDRVQcow2State *s = bs->opaque; | ||
439 | int64_t reftable_offset = -1; | ||
440 | @@ -XXX,XX +XXX,XX @@ static int rebuild_refcount_structure(BlockDriverState *bs, | ||
45 | } | 441 | } |
46 | 442 | ||
47 | - blk->ctx = new_context; | 443 | assert(reftable_length < INT_MAX); |
48 | return 0; | 444 | - ret = bdrv_pwrite(bs->file, reftable_offset, reftable_length, |
445 | - on_disk_reftable, 0); | ||
446 | + ret = bdrv_co_pwrite(bs->file, reftable_offset, reftable_length, | ||
447 | + on_disk_reftable, 0); | ||
448 | if (ret < 0) { | ||
449 | error_setg_errno(errp, -ret, "ERROR writing reftable"); | ||
450 | goto fail; | ||
451 | @@ -XXX,XX +XXX,XX @@ static int rebuild_refcount_structure(BlockDriverState *bs, | ||
452 | reftable_offset_and_clusters.reftable_offset = cpu_to_be64(reftable_offset); | ||
453 | reftable_offset_and_clusters.reftable_clusters = | ||
454 | cpu_to_be32(reftable_clusters); | ||
455 | - ret = bdrv_pwrite_sync(bs->file, | ||
456 | - offsetof(QCowHeader, refcount_table_offset), | ||
457 | - sizeof(reftable_offset_and_clusters), | ||
458 | - &reftable_offset_and_clusters, 0); | ||
459 | + ret = bdrv_co_pwrite_sync(bs->file, | ||
460 | + offsetof(QCowHeader, refcount_table_offset), | ||
461 | + sizeof(reftable_offset_and_clusters), | ||
462 | + &reftable_offset_and_clusters, 0); | ||
463 | if (ret < 0) { | ||
464 | error_setg_errno(errp, -ret, "ERROR setting reftable"); | ||
465 | goto fail; | ||
466 | @@ -XXX,XX +XXX,XX @@ fail: | ||
467 | * Returns 0 if no errors are found, the number of errors in case the image is | ||
468 | * detected as corrupted, and -errno when an internal error occurred. | ||
469 | */ | ||
470 | -int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res, | ||
471 | - BdrvCheckMode fix) | ||
472 | +int coroutine_fn GRAPH_RDLOCK | ||
473 | +qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix) | ||
474 | { | ||
475 | BDRVQcow2State *s = bs->opaque; | ||
476 | BdrvCheckResult pre_compare_res; | ||
477 | @@ -XXX,XX +XXX,XX @@ int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res, | ||
478 | bool rebuild = false; | ||
479 | int ret; | ||
480 | |||
481 | - size = bdrv_getlength(bs->file->bs); | ||
482 | + size = bdrv_co_getlength(bs->file->bs); | ||
483 | if (size < 0) { | ||
484 | res->check_errors++; | ||
485 | return size; | ||
486 | @@ -XXX,XX +XXX,XX @@ done: | ||
487 | return ret; | ||
49 | } | 488 | } |
50 | 489 | ||
490 | -static int64_t get_refblock_offset(BlockDriverState *bs, uint64_t offset) | ||
491 | +static int64_t coroutine_fn get_refblock_offset(BlockDriverState *bs, | ||
492 | + uint64_t offset) | ||
493 | { | ||
494 | BDRVQcow2State *s = bs->opaque; | ||
495 | uint32_t index = offset_to_reftable_index(s, offset); | ||
496 | @@ -XXX,XX +XXX,XX @@ int64_t coroutine_fn qcow2_get_last_cluster(BlockDriverState *bs, int64_t size) | ||
497 | return -EIO; | ||
498 | } | ||
499 | |||
500 | -int coroutine_fn qcow2_detect_metadata_preallocation(BlockDriverState *bs) | ||
501 | +int coroutine_fn GRAPH_RDLOCK | ||
502 | +qcow2_detect_metadata_preallocation(BlockDriverState *bs) | ||
503 | { | ||
504 | BDRVQcow2State *s = bs->opaque; | ||
505 | int64_t i, end_cluster, cluster_count = 0, threshold; | ||
506 | diff --git a/block/qcow2.c b/block/qcow2.c | ||
507 | index XXXXXXX..XXXXXXX 100644 | ||
508 | --- a/block/qcow2.c | ||
509 | +++ b/block/qcow2.c | ||
510 | @@ -XXX,XX +XXX,XX @@ int qcow2_mark_corrupt(BlockDriverState *bs) | ||
511 | * Marks the image as consistent, i.e., unsets the corrupt bit, and flushes | ||
512 | * before if necessary. | ||
513 | */ | ||
514 | -int qcow2_mark_consistent(BlockDriverState *bs) | ||
515 | +static int coroutine_fn qcow2_mark_consistent(BlockDriverState *bs) | ||
516 | { | ||
517 | BDRVQcow2State *s = bs->opaque; | ||
518 | |||
51 | -- | 519 | -- |
52 | 2.38.1 | 520 | 2.41.0 | diff view generated by jsdifflib |
1 | From: Hanna Reitz <hreitz@redhat.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Waiting for all active writes to settle before daring to create a | 3 | bdrv_co_getlength was recently introduced, with bdrv_getlength becoming |
4 | background copying operation means that we will never do background | 4 | a wrapper for use in unknown context. Switch to bdrv_co_getlength when |
5 | operations while the guest does anything (in write-blocking mode), and | 5 | possible. |
6 | therefore cannot converge. Yes, we also will not diverge, but actually | ||
7 | converging would be even nicer. | ||
8 | 6 | ||
9 | It is unclear why we did decide to wait for all active writes to settle | 7 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> |
10 | before creating a background operation, but it just does not seem | 8 | Message-ID: <20230601115145.196465-12-pbonzini@redhat.com> |
11 | necessary. Active writes will put themselves into the in_flight bitmap | ||
12 | and thus properly block actually conflicting background requests. | ||
13 | |||
14 | It is important for active requests to wait on overlapping background | ||
15 | requests, which we do in active_write_prepare(). However, so far it was | ||
16 | not documented why it is important. Add such documentation now, and | ||
17 | also to the other call of mirror_wait_on_conflicts(), so that it becomes | ||
18 | more clear why and when requests need to actively wait for other | ||
19 | requests to settle. | ||
20 | |||
21 | Another thing to note is that of course we need to ensure that there are | ||
22 | no active requests when the job completes, but that is done by virtue of | ||
23 | the BDS being drained anyway, so there cannot be any active requests at | ||
24 | that point. | ||
25 | |||
26 | With this change, we will need to explicitly keep track of how many | ||
27 | bytes are in flight in active requests so that | ||
28 | job_progress_set_remaining() in mirror_run() can set the correct number | ||
29 | of remaining bytes. | ||
30 | |||
31 | Buglink: https://bugzilla.redhat.com/show_bug.cgi?id=2123297 | ||
32 | Signed-off-by: Hanna Reitz <hreitz@redhat.com> | ||
33 | Message-Id: <20221109165452.67927-2-hreitz@redhat.com> | ||
34 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 9 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
35 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 10 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
36 | --- | 11 | --- |
37 | block/mirror.c | 37 ++++++++++++++++++++++++++++++------- | 12 | block/io.c | 10 +++++----- |
38 | 1 file changed, 30 insertions(+), 7 deletions(-) | 13 | block/parallels.c | 4 ++-- |
14 | block/qcow.c | 6 +++--- | ||
15 | block/vmdk.c | 4 ++-- | ||
16 | 4 files changed, 12 insertions(+), 12 deletions(-) | ||
39 | 17 | ||
40 | diff --git a/block/mirror.c b/block/mirror.c | 18 | diff --git a/block/io.c b/block/io.c |
41 | index XXXXXXX..XXXXXXX 100644 | 19 | index XXXXXXX..XXXXXXX 100644 |
42 | --- a/block/mirror.c | 20 | --- a/block/io.c |
43 | +++ b/block/mirror.c | 21 | +++ b/block/io.c |
44 | @@ -XXX,XX +XXX,XX @@ typedef struct MirrorBlockJob { | 22 | @@ -XXX,XX +XXX,XX @@ bdrv_aligned_preadv(BdrvChild *child, BdrvTrackedRequest *req, |
45 | int max_iov; | ||
46 | bool initial_zeroing_ongoing; | ||
47 | int in_active_write_counter; | ||
48 | + int64_t active_write_bytes_in_flight; | ||
49 | bool prepared; | ||
50 | bool in_drain; | ||
51 | } MirrorBlockJob; | ||
52 | @@ -XXX,XX +XXX,XX @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) | ||
53 | } | 23 | } |
54 | bdrv_dirty_bitmap_unlock(s->dirty_bitmap); | 24 | |
55 | 25 | /* Forward the request to the BlockDriver, possibly fragmenting it */ | |
56 | + /* | 26 | - total_bytes = bdrv_getlength(bs); |
57 | + * Wait for concurrent requests to @offset. The next loop will limit the | 27 | + total_bytes = bdrv_co_getlength(bs); |
58 | + * copied area based on in_flight_bitmap so we only copy an area that does | 28 | if (total_bytes < 0) { |
59 | + * not overlap with concurrent in-flight requests. Still, we would like to | 29 | ret = total_bytes; |
60 | + * copy something, so wait until there are at least no more requests to the | 30 | goto out; |
61 | + * very beginning of the area. | 31 | @@ -XXX,XX +XXX,XX @@ bdrv_co_block_status(BlockDriverState *bs, bool want_zero, |
62 | + */ | 32 | assert(pnum); |
63 | mirror_wait_on_conflicts(NULL, s, offset, 1); | 33 | assert_bdrv_graph_readable(); |
64 | 34 | *pnum = 0; | |
65 | job_pause_point(&s->common.job); | 35 | - total_size = bdrv_getlength(bs); |
66 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn mirror_run(Job *job, Error **errp) | 36 | + total_size = bdrv_co_getlength(bs); |
67 | int64_t cnt, delta; | 37 | if (total_size < 0) { |
68 | bool should_complete; | 38 | ret = total_size; |
69 | 39 | goto early_out; | |
70 | - /* Do not start passive operations while there are active | 40 | @@ -XXX,XX +XXX,XX @@ bdrv_co_block_status(BlockDriverState *bs, bool want_zero, |
71 | - * writes in progress */ | 41 | bytes = n; |
72 | - while (s->in_active_write_counter) { | ||
73 | - mirror_wait_for_any_operation(s, true); | ||
74 | - } | ||
75 | - | ||
76 | if (s->ret < 0) { | ||
77 | ret = s->ret; | ||
78 | goto immediate_exit; | ||
79 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn mirror_run(Job *job, Error **errp) | ||
80 | /* cnt is the number of dirty bytes remaining and s->bytes_in_flight is | ||
81 | * the number of bytes currently being processed; together those are | ||
82 | * the current remaining operation length */ | ||
83 | - job_progress_set_remaining(&s->common.job, s->bytes_in_flight + cnt); | ||
84 | + job_progress_set_remaining(&s->common.job, | ||
85 | + s->bytes_in_flight + cnt + | ||
86 | + s->active_write_bytes_in_flight); | ||
87 | |||
88 | /* Note that even when no rate limit is applied we need to yield | ||
89 | * periodically with no pending I/O so that bdrv_drain_all() returns. | ||
90 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn mirror_run(Job *job, Error **errp) | ||
91 | |||
92 | s->in_drain = true; | ||
93 | bdrv_drained_begin(bs); | ||
94 | + | ||
95 | + /* Must be zero because we are drained */ | ||
96 | + assert(s->in_active_write_counter == 0); | ||
97 | + | ||
98 | cnt = bdrv_get_dirty_count(s->dirty_bitmap); | ||
99 | if (cnt > 0 || mirror_flush(s) < 0) { | ||
100 | bdrv_drained_end(bs); | ||
101 | @@ -XXX,XX +XXX,XX @@ do_sync_target_write(MirrorBlockJob *job, MirrorMethod method, | ||
102 | } | 42 | } |
103 | 43 | ||
104 | job_progress_increase_remaining(&job->common.job, bytes); | 44 | - /* Must be non-NULL or bdrv_getlength() would have failed */ |
105 | + job->active_write_bytes_in_flight += bytes; | 45 | + /* Must be non-NULL or bdrv_co_getlength() would have failed */ |
106 | 46 | assert(bs->drv); | |
107 | switch (method) { | 47 | has_filtered_child = bdrv_filter_child(bs); |
108 | case MIRROR_METHOD_COPY: | 48 | if (!bs->drv->bdrv_co_block_status && !has_filtered_child) { |
109 | @@ -XXX,XX +XXX,XX @@ do_sync_target_write(MirrorBlockJob *job, MirrorMethod method, | 49 | @@ -XXX,XX +XXX,XX @@ bdrv_co_block_status(BlockDriverState *bs, bool want_zero, |
110 | abort(); | 50 | if (!cow_bs) { |
51 | ret |= BDRV_BLOCK_ZERO; | ||
52 | } else if (want_zero) { | ||
53 | - int64_t size2 = bdrv_getlength(cow_bs); | ||
54 | + int64_t size2 = bdrv_co_getlength(cow_bs); | ||
55 | |||
56 | if (size2 >= 0 && offset >= size2) { | ||
57 | ret |= BDRV_BLOCK_ZERO; | ||
58 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact, | ||
59 | return ret; | ||
111 | } | 60 | } |
112 | 61 | ||
113 | + job->active_write_bytes_in_flight -= bytes; | 62 | - old_size = bdrv_getlength(bs); |
114 | if (ret >= 0) { | 63 | + old_size = bdrv_co_getlength(bs); |
115 | job_progress_update(&job->common.job, bytes); | 64 | if (old_size < 0) { |
116 | } else { | 65 | error_setg_errno(errp, -old_size, "Failed to get old image size"); |
117 | @@ -XXX,XX +XXX,XX @@ static MirrorOp *coroutine_fn active_write_prepare(MirrorBlockJob *s, | 66 | return old_size; |
118 | 67 | diff --git a/block/parallels.c b/block/parallels.c | |
119 | s->in_active_write_counter++; | 68 | index XXXXXXX..XXXXXXX 100644 |
120 | 69 | --- a/block/parallels.c | |
121 | + /* | 70 | +++ b/block/parallels.c |
122 | + * Wait for concurrent requests affecting the area. If there are already | 71 | @@ -XXX,XX +XXX,XX @@ allocate_clusters(BlockDriverState *bs, int64_t sector_num, |
123 | + * running requests that are copying off now-to-be stale data in the area, | 72 | assert(idx < s->bat_size && idx + to_allocate <= s->bat_size); |
124 | + * we must wait for them to finish before we begin writing fresh data to the | 73 | |
125 | + * target so that the write operations appear in the correct order. | 74 | space = to_allocate * s->tracks; |
126 | + * Note that background requests (see mirror_iteration()) in contrast only | 75 | - len = bdrv_getlength(bs->file->bs); |
127 | + * wait for conflicting requests at the start of the dirty area, and then | 76 | + len = bdrv_co_getlength(bs->file->bs); |
128 | + * (based on the in_flight_bitmap) truncate the area to copy so it will not | 77 | if (len < 0) { |
129 | + * conflict with any requests beyond that. For active writes, however, we | 78 | return len; |
130 | + * cannot truncate that area. The request from our parent must be blocked | 79 | } |
131 | + * until the area is copied in full. Therefore, we must wait for the whole | 80 | @@ -XXX,XX +XXX,XX @@ parallels_check_outside_image(BlockDriverState *bs, BdrvCheckResult *res, |
132 | + * area to become free of concurrent requests. | 81 | uint32_t i; |
133 | + */ | 82 | int64_t off, high_off, size; |
134 | mirror_wait_on_conflicts(op, s, offset, bytes); | 83 | |
135 | 84 | - size = bdrv_getlength(bs->file->bs); | |
136 | bitmap_set(s->in_flight_bitmap, start_chunk, end_chunk - start_chunk); | 85 | + size = bdrv_co_getlength(bs->file->bs); |
86 | if (size < 0) { | ||
87 | res->check_errors++; | ||
88 | return size; | ||
89 | diff --git a/block/qcow.c b/block/qcow.c | ||
90 | index XXXXXXX..XXXXXXX 100644 | ||
91 | --- a/block/qcow.c | ||
92 | +++ b/block/qcow.c | ||
93 | @@ -XXX,XX +XXX,XX @@ get_cluster_offset(BlockDriverState *bs, uint64_t offset, int allocate, | ||
94 | if (!allocate) | ||
95 | return 0; | ||
96 | /* allocate a new l2 entry */ | ||
97 | - l2_offset = bdrv_getlength(bs->file->bs); | ||
98 | + l2_offset = bdrv_co_getlength(bs->file->bs); | ||
99 | if (l2_offset < 0) { | ||
100 | return l2_offset; | ||
101 | } | ||
102 | @@ -XXX,XX +XXX,XX @@ get_cluster_offset(BlockDriverState *bs, uint64_t offset, int allocate, | ||
103 | if (decompress_cluster(bs, cluster_offset) < 0) { | ||
104 | return -EIO; | ||
105 | } | ||
106 | - cluster_offset = bdrv_getlength(bs->file->bs); | ||
107 | + cluster_offset = bdrv_co_getlength(bs->file->bs); | ||
108 | if ((int64_t) cluster_offset < 0) { | ||
109 | return cluster_offset; | ||
110 | } | ||
111 | @@ -XXX,XX +XXX,XX @@ get_cluster_offset(BlockDriverState *bs, uint64_t offset, int allocate, | ||
112 | return ret; | ||
113 | } | ||
114 | } else { | ||
115 | - cluster_offset = bdrv_getlength(bs->file->bs); | ||
116 | + cluster_offset = bdrv_co_getlength(bs->file->bs); | ||
117 | if ((int64_t) cluster_offset < 0) { | ||
118 | return cluster_offset; | ||
119 | } | ||
120 | diff --git a/block/vmdk.c b/block/vmdk.c | ||
121 | index XXXXXXX..XXXXXXX 100644 | ||
122 | --- a/block/vmdk.c | ||
123 | +++ b/block/vmdk.c | ||
124 | @@ -XXX,XX +XXX,XX @@ vmdk_co_pwritev_compressed(BlockDriverState *bs, int64_t offset, int64_t bytes, | ||
125 | int64_t length; | ||
126 | |||
127 | for (i = 0; i < s->num_extents; i++) { | ||
128 | - length = bdrv_getlength(s->extents[i].file->bs); | ||
129 | + length = bdrv_co_getlength(s->extents[i].file->bs); | ||
130 | if (length < 0) { | ||
131 | return length; | ||
132 | } | ||
133 | @@ -XXX,XX +XXX,XX @@ vmdk_co_check(BlockDriverState *bs, BdrvCheckResult *result, BdrvCheckMode fix) | ||
134 | break; | ||
135 | } | ||
136 | if (ret == VMDK_OK) { | ||
137 | - int64_t extent_len = bdrv_getlength(extent->file->bs); | ||
138 | + int64_t extent_len = bdrv_co_getlength(extent->file->bs); | ||
139 | if (extent_len < 0) { | ||
140 | fprintf(stderr, | ||
141 | "ERROR: could not get extent file length for sector %" | ||
137 | -- | 142 | -- |
138 | 2.38.1 | 143 | 2.41.0 | diff view generated by jsdifflib |
1 | From: Hanna Reitz <hreitz@redhat.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | bdrv_parent_drained_{begin,end}_single() are supposed to operate on the | 3 | bdrv_co_debug_event was recently introduced, with bdrv_debug_event |
4 | parent, not on the child, so they should not attempt to get the context | 4 | becoming a wrapper for use in unknown context. Because most of the |
5 | to poll from the child but the parent instead. BDRV_POLL_WHILE(c->bs) | 5 | time bdrv_debug_event is used on a BdrvChild via the wrapper macro |
6 | does get the context from the child, so we should replace it with | 6 | BLKDBG_EVENT, introduce a similar macro BLKDBG_CO_EVENT that calls |
7 | AIO_WAIT_WHILE() on the parent's context instead. | 7 | bdrv_co_debug_event, and switch whenever possible. |
8 | 8 | ||
9 | This problem becomes apparent when bdrv_replace_child_noperm() invokes | 9 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> |
10 | bdrv_parent_drained_end_single() after removing a child from a subgraph | 10 | Message-ID: <20230601115145.196465-13-pbonzini@redhat.com> |
11 | that is in an I/O thread. By the time bdrv_parent_drained_end_single() | ||
12 | is called, child->bs is NULL, and so BDRV_POLL_WHILE(c->bs, ...) will | ||
13 | poll the main loop instead of the I/O thread; but anything that | ||
14 | bdrv_parent_drained_end_single_no_poll() may have scheduled is going to | ||
15 | want to run in the I/O thread, but because we poll the main loop, the | ||
16 | I/O thread is never unpaused, and nothing is run, resulting in a | ||
17 | deadlock. | ||
18 | |||
19 | Closes: https://gitlab.com/qemu-project/qemu/-/issues/1215 | ||
20 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> | 11 | Reviewed-by: Kevin Wolf <kwolf@redhat.com> |
21 | Signed-off-by: Hanna Reitz <hreitz@redhat.com> | ||
22 | Message-Id: <20221107151321.211175-4-hreitz@redhat.com> | ||
23 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> | 12 | Signed-off-by: Kevin Wolf <kwolf@redhat.com> |
24 | --- | 13 | --- |
25 | block/io.c | 6 ++++-- | 14 | include/block/block-io.h | 7 +++++++ |
26 | 1 file changed, 4 insertions(+), 2 deletions(-) | 15 | block/io.c | 4 ++-- |
16 | block/qcow.c | 24 ++++++++++++------------ | ||
17 | block/qcow2-cluster.c | 12 ++++++------ | ||
18 | block/qcow2-refcount.c | 4 ++-- | ||
19 | block/qcow2.c | 18 +++++++++--------- | ||
20 | block/qed-table.c | 6 +++--- | ||
21 | block/qed.c | 8 ++++---- | ||
22 | block/raw-format.c | 4 ++-- | ||
23 | block/vmdk.c | 24 ++++++++++++------------ | ||
24 | 10 files changed, 59 insertions(+), 52 deletions(-) | ||
27 | 25 | ||
26 | diff --git a/include/block/block-io.h b/include/block/block-io.h | ||
27 | index XXXXXXX..XXXXXXX 100644 | ||
28 | --- a/include/block/block-io.h | ||
29 | +++ b/include/block/block-io.h | ||
30 | @@ -XXX,XX +XXX,XX @@ bdrv_co_debug_event(BlockDriverState *bs, BlkdebugEvent event); | ||
31 | void co_wrapper_mixed_bdrv_rdlock | ||
32 | bdrv_debug_event(BlockDriverState *bs, BlkdebugEvent event); | ||
33 | |||
34 | +#define BLKDBG_CO_EVENT(child, evt) \ | ||
35 | + do { \ | ||
36 | + if (child) { \ | ||
37 | + bdrv_co_debug_event(child->bs, evt); \ | ||
38 | + } \ | ||
39 | + } while (0) | ||
40 | + | ||
41 | #define BLKDBG_EVENT(child, evt) \ | ||
42 | do { \ | ||
43 | if (child) { \ | ||
28 | diff --git a/block/io.c b/block/io.c | 44 | diff --git a/block/io.c b/block/io.c |
29 | index XXXXXXX..XXXXXXX 100644 | 45 | index XXXXXXX..XXXXXXX 100644 |
30 | --- a/block/io.c | 46 | --- a/block/io.c |
31 | +++ b/block/io.c | 47 | +++ b/block/io.c |
32 | @@ -XXX,XX +XXX,XX @@ static void bdrv_parent_drained_end_single_no_poll(BdrvChild *c, | 48 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_flush(BlockDriverState *bs) |
33 | void bdrv_parent_drained_end_single(BdrvChild *c) | 49 | } |
50 | |||
51 | /* Write back cached data to the OS even with cache=unsafe */ | ||
52 | - BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_OS); | ||
53 | + BLKDBG_CO_EVENT(primary_child, BLKDBG_FLUSH_TO_OS); | ||
54 | if (bs->drv->bdrv_co_flush_to_os) { | ||
55 | ret = bs->drv->bdrv_co_flush_to_os(bs); | ||
56 | if (ret < 0) { | ||
57 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_flush(BlockDriverState *bs) | ||
58 | goto flush_children; | ||
59 | } | ||
60 | |||
61 | - BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_DISK); | ||
62 | + BLKDBG_CO_EVENT(primary_child, BLKDBG_FLUSH_TO_DISK); | ||
63 | if (!bs->drv) { | ||
64 | /* bs->drv->bdrv_co_flush() might have ejected the BDS | ||
65 | * (even in case of apparent success) */ | ||
66 | diff --git a/block/qcow.c b/block/qcow.c | ||
67 | index XXXXXXX..XXXXXXX 100644 | ||
68 | --- a/block/qcow.c | ||
69 | +++ b/block/qcow.c | ||
70 | @@ -XXX,XX +XXX,XX @@ get_cluster_offset(BlockDriverState *bs, uint64_t offset, int allocate, | ||
71 | /* update the L1 entry */ | ||
72 | s->l1_table[l1_index] = l2_offset; | ||
73 | tmp = cpu_to_be64(l2_offset); | ||
74 | - BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); | ||
75 | + BLKDBG_CO_EVENT(bs->file, BLKDBG_L1_UPDATE); | ||
76 | ret = bdrv_co_pwrite_sync(bs->file, | ||
77 | s->l1_table_offset + l1_index * sizeof(tmp), | ||
78 | sizeof(tmp), &tmp, 0); | ||
79 | @@ -XXX,XX +XXX,XX @@ get_cluster_offset(BlockDriverState *bs, uint64_t offset, int allocate, | ||
80 | } | ||
81 | } | ||
82 | l2_table = s->l2_cache + (min_index << s->l2_bits); | ||
83 | - BLKDBG_EVENT(bs->file, BLKDBG_L2_LOAD); | ||
84 | + BLKDBG_CO_EVENT(bs->file, BLKDBG_L2_LOAD); | ||
85 | if (new_l2_table) { | ||
86 | memset(l2_table, 0, s->l2_size * sizeof(uint64_t)); | ||
87 | ret = bdrv_co_pwrite_sync(bs->file, l2_offset, | ||
88 | @@ -XXX,XX +XXX,XX @@ get_cluster_offset(BlockDriverState *bs, uint64_t offset, int allocate, | ||
89 | ((cluster_offset & QCOW_OFLAG_COMPRESSED) && allocate == 1)) { | ||
90 | if (!allocate) | ||
91 | return 0; | ||
92 | - BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC); | ||
93 | + BLKDBG_CO_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC); | ||
94 | assert(QEMU_IS_ALIGNED(n_start | n_end, BDRV_SECTOR_SIZE)); | ||
95 | /* allocate a new cluster */ | ||
96 | if ((cluster_offset & QCOW_OFLAG_COMPRESSED) && | ||
97 | @@ -XXX,XX +XXX,XX @@ get_cluster_offset(BlockDriverState *bs, uint64_t offset, int allocate, | ||
98 | } | ||
99 | cluster_offset = QEMU_ALIGN_UP(cluster_offset, s->cluster_size); | ||
100 | /* write the cluster content */ | ||
101 | - BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); | ||
102 | + BLKDBG_CO_EVENT(bs->file, BLKDBG_WRITE_AIO); | ||
103 | ret = bdrv_co_pwrite(bs->file, cluster_offset, s->cluster_size, | ||
104 | s->cluster_cache, 0); | ||
105 | if (ret < 0) { | ||
106 | @@ -XXX,XX +XXX,XX @@ get_cluster_offset(BlockDriverState *bs, uint64_t offset, int allocate, | ||
107 | NULL) < 0) { | ||
108 | return -EIO; | ||
109 | } | ||
110 | - BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); | ||
111 | + BLKDBG_CO_EVENT(bs->file, BLKDBG_WRITE_AIO); | ||
112 | ret = bdrv_co_pwrite(bs->file, cluster_offset + i, | ||
113 | BDRV_SECTOR_SIZE, | ||
114 | s->cluster_data, 0); | ||
115 | @@ -XXX,XX +XXX,XX @@ get_cluster_offset(BlockDriverState *bs, uint64_t offset, int allocate, | ||
116 | tmp = cpu_to_be64(cluster_offset); | ||
117 | l2_table[l2_index] = tmp; | ||
118 | if (allocate == 2) { | ||
119 | - BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED); | ||
120 | + BLKDBG_CO_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED); | ||
121 | } else { | ||
122 | - BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE); | ||
123 | + BLKDBG_CO_EVENT(bs->file, BLKDBG_L2_UPDATE); | ||
124 | } | ||
125 | ret = bdrv_co_pwrite_sync(bs->file, l2_offset + l2_index * sizeof(tmp), | ||
126 | sizeof(tmp), &tmp, 0); | ||
127 | @@ -XXX,XX +XXX,XX @@ decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset) | ||
128 | if (s->cluster_cache_offset != coffset) { | ||
129 | csize = cluster_offset >> (63 - s->cluster_bits); | ||
130 | csize &= (s->cluster_size - 1); | ||
131 | - BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED); | ||
132 | + BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_COMPRESSED); | ||
133 | ret = bdrv_co_pread(bs->file, coffset, csize, s->cluster_data, 0); | ||
134 | if (ret < 0) | ||
135 | return -1; | ||
136 | @@ -XXX,XX +XXX,XX @@ qcow_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, | ||
137 | /* read from the base image */ | ||
138 | qemu_co_mutex_unlock(&s->lock); | ||
139 | /* qcow2 emits this on bs->file instead of bs->backing */ | ||
140 | - BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO); | ||
141 | + BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_BACKING_AIO); | ||
142 | ret = bdrv_co_pread(bs->backing, offset, n, buf, 0); | ||
143 | qemu_co_mutex_lock(&s->lock); | ||
144 | if (ret < 0) { | ||
145 | @@ -XXX,XX +XXX,XX @@ qcow_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, | ||
146 | break; | ||
147 | } | ||
148 | qemu_co_mutex_unlock(&s->lock); | ||
149 | - BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); | ||
150 | + BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_AIO); | ||
151 | ret = bdrv_co_pread(bs->file, cluster_offset + offset_in_cluster, | ||
152 | n, buf, 0); | ||
153 | qemu_co_mutex_lock(&s->lock); | ||
154 | @@ -XXX,XX +XXX,XX @@ qcow_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, | ||
155 | } | ||
156 | |||
157 | qemu_co_mutex_unlock(&s->lock); | ||
158 | - BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); | ||
159 | + BLKDBG_CO_EVENT(bs->file, BLKDBG_WRITE_AIO); | ||
160 | ret = bdrv_co_pwrite(bs->file, cluster_offset + offset_in_cluster, | ||
161 | n, buf, 0); | ||
162 | qemu_co_mutex_lock(&s->lock); | ||
163 | @@ -XXX,XX +XXX,XX @@ qcow_co_pwritev_compressed(BlockDriverState *bs, int64_t offset, int64_t bytes, | ||
164 | } | ||
165 | cluster_offset &= s->cluster_offset_mask; | ||
166 | |||
167 | - BLKDBG_EVENT(bs->file, BLKDBG_WRITE_COMPRESSED); | ||
168 | + BLKDBG_CO_EVENT(bs->file, BLKDBG_WRITE_COMPRESSED); | ||
169 | ret = bdrv_co_pwrite(bs->file, cluster_offset, out_len, out_buf, 0); | ||
170 | if (ret < 0) { | ||
171 | goto fail; | ||
172 | diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c | ||
173 | index XXXXXXX..XXXXXXX 100644 | ||
174 | --- a/block/qcow2-cluster.c | ||
175 | +++ b/block/qcow2-cluster.c | ||
176 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn qcow2_shrink_l1_table(BlockDriverState *bs, | ||
177 | fprintf(stderr, "shrink l1_table from %d to %d\n", s->l1_size, new_l1_size); | ||
178 | #endif | ||
179 | |||
180 | - BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_WRITE_TABLE); | ||
181 | + BLKDBG_CO_EVENT(bs->file, BLKDBG_L1_SHRINK_WRITE_TABLE); | ||
182 | ret = bdrv_co_pwrite_zeroes(bs->file, | ||
183 | s->l1_table_offset + new_l1_size * L1E_SIZE, | ||
184 | (s->l1_size - new_l1_size) * L1E_SIZE, 0); | ||
185 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn qcow2_shrink_l1_table(BlockDriverState *bs, | ||
186 | goto fail; | ||
187 | } | ||
188 | |||
189 | - BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_FREE_L2_CLUSTERS); | ||
190 | + BLKDBG_CO_EVENT(bs->file, BLKDBG_L1_SHRINK_FREE_L2_CLUSTERS); | ||
191 | for (i = s->l1_size - 1; i > new_l1_size - 1; i--) { | ||
192 | if ((s->l1_table[i] & L1E_OFFSET_MASK) == 0) { | ||
193 | continue; | ||
194 | @@ -XXX,XX +XXX,XX @@ do_perform_cow_read(BlockDriverState *bs, uint64_t src_cluster_offset, | ||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | - BLKDBG_EVENT(bs->file, BLKDBG_COW_READ); | ||
199 | + BLKDBG_CO_EVENT(bs->file, BLKDBG_COW_READ); | ||
200 | |||
201 | if (!bs->drv) { | ||
202 | return -ENOMEDIUM; | ||
203 | @@ -XXX,XX +XXX,XX @@ do_perform_cow_write(BlockDriverState *bs, uint64_t cluster_offset, | ||
204 | return ret; | ||
205 | } | ||
206 | |||
207 | - BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE); | ||
208 | + BLKDBG_CO_EVENT(bs->file, BLKDBG_COW_WRITE); | ||
209 | ret = bdrv_co_pwritev(s->data_file, cluster_offset + offset_in_cluster, | ||
210 | qiov->size, qiov, 0); | ||
211 | if (ret < 0) { | ||
212 | @@ -XXX,XX +XXX,XX @@ qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, uint64_t offset, | ||
213 | |||
214 | /* compressed clusters never have the copied flag */ | ||
215 | |||
216 | - BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED); | ||
217 | + BLKDBG_CO_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED); | ||
218 | qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); | ||
219 | set_l2_entry(s, l2_slice, l2_index, cluster_offset); | ||
220 | if (has_subclusters(s)) { | ||
221 | @@ -XXX,XX +XXX,XX @@ perform_cow(BlockDriverState *bs, QCowL2Meta *m) | ||
222 | /* NOTE: we have a write_aio blkdebug event here followed by | ||
223 | * a cow_write one in do_perform_cow_write(), but there's only | ||
224 | * one single I/O operation */ | ||
225 | - BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); | ||
226 | + BLKDBG_CO_EVENT(bs->file, BLKDBG_WRITE_AIO); | ||
227 | ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov); | ||
228 | } else { | ||
229 | /* If there's no guest data then write both COW regions separately */ | ||
230 | diff --git a/block/qcow2-refcount.c b/block/qcow2-refcount.c | ||
231 | index XXXXXXX..XXXXXXX 100644 | ||
232 | --- a/block/qcow2-refcount.c | ||
233 | +++ b/block/qcow2-refcount.c | ||
234 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn qcow2_refcount_init(BlockDriverState *bs) | ||
235 | ret = -ENOMEM; | ||
236 | goto fail; | ||
237 | } | ||
238 | - BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_LOAD); | ||
239 | + BLKDBG_CO_EVENT(bs->file, BLKDBG_REFTABLE_LOAD); | ||
240 | ret = bdrv_co_pread(bs->file, s->refcount_table_offset, | ||
241 | refcount_table_size2, s->refcount_table, 0); | ||
242 | if (ret < 0) { | ||
243 | @@ -XXX,XX +XXX,XX @@ int64_t coroutine_fn GRAPH_RDLOCK qcow2_alloc_bytes(BlockDriverState *bs, int si | ||
244 | size_t free_in_cluster; | ||
245 | int ret; | ||
246 | |||
247 | - BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC_BYTES); | ||
248 | + BLKDBG_CO_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC_BYTES); | ||
249 | assert(size > 0 && size <= s->cluster_size); | ||
250 | assert(!s->free_byte_offset || offset_into_cluster(s, s->free_byte_offset)); | ||
251 | |||
252 | diff --git a/block/qcow2.c b/block/qcow2.c | ||
253 | index XXXXXXX..XXXXXXX 100644 | ||
254 | --- a/block/qcow2.c | ||
255 | +++ b/block/qcow2.c | ||
256 | @@ -XXX,XX +XXX,XX @@ qcow2_co_preadv_encrypted(BlockDriverState *bs, | ||
257 | return -ENOMEM; | ||
258 | } | ||
259 | |||
260 | - BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); | ||
261 | + BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_AIO); | ||
262 | ret = bdrv_co_pread(s->data_file, host_offset, bytes, buf, 0); | ||
263 | if (ret < 0) { | ||
264 | goto fail; | ||
265 | @@ -XXX,XX +XXX,XX @@ qcow2_co_preadv_task(BlockDriverState *bs, QCow2SubclusterType subc_type, | ||
266 | case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC: | ||
267 | assert(bs->backing); /* otherwise handled in qcow2_co_preadv_part */ | ||
268 | |||
269 | - BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO); | ||
270 | + BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_BACKING_AIO); | ||
271 | return bdrv_co_preadv_part(bs->backing, offset, bytes, | ||
272 | qiov, qiov_offset, 0); | ||
273 | |||
274 | @@ -XXX,XX +XXX,XX @@ qcow2_co_preadv_task(BlockDriverState *bs, QCow2SubclusterType subc_type, | ||
275 | offset, bytes, qiov, qiov_offset); | ||
276 | } | ||
277 | |||
278 | - BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); | ||
279 | + BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_AIO); | ||
280 | return bdrv_co_preadv_part(s->data_file, host_offset, | ||
281 | bytes, qiov, qiov_offset, 0); | ||
282 | |||
283 | @@ -XXX,XX +XXX,XX @@ handle_alloc_space(BlockDriverState *bs, QCowL2Meta *l2meta) | ||
284 | return ret; | ||
285 | } | ||
286 | |||
287 | - BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC_SPACE); | ||
288 | + BLKDBG_CO_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC_SPACE); | ||
289 | ret = bdrv_co_pwrite_zeroes(s->data_file, start_offset, nb_bytes, | ||
290 | BDRV_REQ_NO_FALLBACK); | ||
291 | if (ret < 0) { | ||
292 | @@ -XXX,XX +XXX,XX @@ int qcow2_co_pwritev_task(BlockDriverState *bs, uint64_t host_offset, | ||
293 | * guest data now. | ||
294 | */ | ||
295 | if (!merge_cow(offset, bytes, qiov, qiov_offset, l2meta)) { | ||
296 | - BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); | ||
297 | + BLKDBG_CO_EVENT(bs->file, BLKDBG_WRITE_AIO); | ||
298 | trace_qcow2_writev_data(qemu_coroutine_self(), host_offset); | ||
299 | ret = bdrv_co_pwritev_part(s->data_file, host_offset, | ||
300 | bytes, qiov, qiov_offset, 0); | ||
301 | @@ -XXX,XX +XXX,XX @@ qcow2_co_pwritev_compressed_task(BlockDriverState *bs, | ||
302 | goto fail; | ||
303 | } | ||
304 | |||
305 | - BLKDBG_EVENT(s->data_file, BLKDBG_WRITE_COMPRESSED); | ||
306 | + BLKDBG_CO_EVENT(s->data_file, BLKDBG_WRITE_COMPRESSED); | ||
307 | ret = bdrv_co_pwrite(s->data_file, cluster_offset, out_len, out_buf, 0); | ||
308 | if (ret < 0) { | ||
309 | goto fail; | ||
310 | @@ -XXX,XX +XXX,XX @@ qcow2_co_preadv_compressed(BlockDriverState *bs, | ||
311 | |||
312 | out_buf = qemu_blockalign(bs, s->cluster_size); | ||
313 | |||
314 | - BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED); | ||
315 | + BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_COMPRESSED); | ||
316 | ret = bdrv_co_pread(bs->file, coffset, csize, buf, 0); | ||
317 | if (ret < 0) { | ||
318 | goto fail; | ||
319 | @@ -XXX,XX +XXX,XX @@ qcow2_co_save_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) | ||
320 | return offset; | ||
321 | } | ||
322 | |||
323 | - BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_SAVE); | ||
324 | + BLKDBG_CO_EVENT(bs->file, BLKDBG_VMSTATE_SAVE); | ||
325 | return bs->drv->bdrv_co_pwritev_part(bs, offset, qiov->size, qiov, 0, 0); | ||
326 | } | ||
327 | |||
328 | @@ -XXX,XX +XXX,XX @@ qcow2_co_load_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) | ||
329 | return offset; | ||
330 | } | ||
331 | |||
332 | - BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_LOAD); | ||
333 | + BLKDBG_CO_EVENT(bs->file, BLKDBG_VMSTATE_LOAD); | ||
334 | return bs->drv->bdrv_co_preadv_part(bs, offset, qiov->size, qiov, 0, 0); | ||
335 | } | ||
336 | |||
337 | diff --git a/block/qed-table.c b/block/qed-table.c | ||
338 | index XXXXXXX..XXXXXXX 100644 | ||
339 | --- a/block/qed-table.c | ||
340 | +++ b/block/qed-table.c | ||
341 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn qed_read_l1_table_sync(BDRVQEDState *s) | ||
342 | int coroutine_fn qed_write_l1_table(BDRVQEDState *s, unsigned int index, | ||
343 | unsigned int n) | ||
34 | { | 344 | { |
35 | int drained_end_counter = 0; | 345 | - BLKDBG_EVENT(s->bs->file, BLKDBG_L1_UPDATE); |
36 | + AioContext *ctx = bdrv_child_get_parent_aio_context(c); | 346 | + BLKDBG_CO_EVENT(s->bs->file, BLKDBG_L1_UPDATE); |
37 | IO_OR_GS_CODE(); | 347 | return qed_write_table(s, s->header.l1_table_offset, |
38 | bdrv_parent_drained_end_single_no_poll(c, &drained_end_counter); | 348 | s->l1_table, index, n, false); |
39 | - BDRV_POLL_WHILE(c->bs, qatomic_read(&drained_end_counter) > 0); | ||
40 | + AIO_WAIT_WHILE(ctx, qatomic_read(&drained_end_counter) > 0); | ||
41 | } | 349 | } |
42 | 350 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn qed_read_l2_table(BDRVQEDState *s, QEDRequest *request, | |
43 | static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore, | 351 | request->l2_table = qed_alloc_l2_cache_entry(&s->l2_cache); |
44 | @@ -XXX,XX +XXX,XX @@ static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore, | 352 | request->l2_table->table = qed_alloc_table(s); |
45 | 353 | ||
46 | void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll) | 354 | - BLKDBG_EVENT(s->bs->file, BLKDBG_L2_LOAD); |
355 | + BLKDBG_CO_EVENT(s->bs->file, BLKDBG_L2_LOAD); | ||
356 | ret = qed_read_table(s, offset, request->l2_table->table); | ||
357 | |||
358 | if (ret) { | ||
359 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn qed_write_l2_table(BDRVQEDState *s, QEDRequest *request, | ||
360 | unsigned int index, unsigned int n, | ||
361 | bool flush) | ||
47 | { | 362 | { |
48 | + AioContext *ctx = bdrv_child_get_parent_aio_context(c); | 363 | - BLKDBG_EVENT(s->bs->file, BLKDBG_L2_UPDATE); |
49 | IO_OR_GS_CODE(); | 364 | + BLKDBG_CO_EVENT(s->bs->file, BLKDBG_L2_UPDATE); |
50 | c->parent_quiesce_counter++; | 365 | return qed_write_table(s, request->l2_table->offset, |
51 | if (c->klass->drained_begin) { | 366 | request->l2_table->table, index, n, flush); |
52 | c->klass->drained_begin(c); | ||
53 | } | ||
54 | if (poll) { | ||
55 | - BDRV_POLL_WHILE(c->bs, bdrv_parent_drained_poll_single(c)); | ||
56 | + AIO_WAIT_WHILE(ctx, bdrv_parent_drained_poll_single(c)); | ||
57 | } | ||
58 | } | 367 | } |
368 | diff --git a/block/qed.c b/block/qed.c | ||
369 | index XXXXXXX..XXXXXXX 100644 | ||
370 | --- a/block/qed.c | ||
371 | +++ b/block/qed.c | ||
372 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn GRAPH_RDLOCK | ||
373 | qed_read_backing_file(BDRVQEDState *s, uint64_t pos, QEMUIOVector *qiov) | ||
374 | { | ||
375 | if (s->bs->backing) { | ||
376 | - BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO); | ||
377 | + BLKDBG_CO_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO); | ||
378 | return bdrv_co_preadv(s->bs->backing, pos, qiov->size, qiov, 0); | ||
379 | } | ||
380 | qemu_iovec_memset(qiov, 0, 0, qiov->size); | ||
381 | @@ -XXX,XX +XXX,XX @@ qed_copy_from_backing_file(BDRVQEDState *s, uint64_t pos, uint64_t len, | ||
382 | goto out; | ||
383 | } | ||
384 | |||
385 | - BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE); | ||
386 | + BLKDBG_CO_EVENT(s->bs->file, BLKDBG_COW_WRITE); | ||
387 | ret = bdrv_co_pwritev(s->bs->file, offset, qiov.size, &qiov, 0); | ||
388 | if (ret < 0) { | ||
389 | goto out; | ||
390 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn GRAPH_RDLOCK qed_aio_write_main(QEDAIOCB *acb) | ||
391 | |||
392 | trace_qed_aio_write_main(s, acb, 0, offset, acb->cur_qiov.size); | ||
393 | |||
394 | - BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO); | ||
395 | + BLKDBG_CO_EVENT(s->bs->file, BLKDBG_WRITE_AIO); | ||
396 | return bdrv_co_pwritev(s->bs->file, offset, acb->cur_qiov.size, | ||
397 | &acb->cur_qiov, 0); | ||
398 | } | ||
399 | @@ -XXX,XX +XXX,XX @@ qed_aio_read_data(void *opaque, int ret, uint64_t offset, size_t len) | ||
400 | } else if (ret != QED_CLUSTER_FOUND) { | ||
401 | r = qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov); | ||
402 | } else { | ||
403 | - BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); | ||
404 | + BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_AIO); | ||
405 | r = bdrv_co_preadv(bs->file, offset, acb->cur_qiov.size, | ||
406 | &acb->cur_qiov, 0); | ||
407 | } | ||
408 | diff --git a/block/raw-format.c b/block/raw-format.c | ||
409 | index XXXXXXX..XXXXXXX 100644 | ||
410 | --- a/block/raw-format.c | ||
411 | +++ b/block/raw-format.c | ||
412 | @@ -XXX,XX +XXX,XX @@ raw_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, | ||
413 | return ret; | ||
414 | } | ||
415 | |||
416 | - BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); | ||
417 | + BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_AIO); | ||
418 | return bdrv_co_preadv(bs->file, offset, bytes, qiov, flags); | ||
419 | } | ||
420 | |||
421 | @@ -XXX,XX +XXX,XX @@ raw_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, | ||
422 | goto fail; | ||
423 | } | ||
424 | |||
425 | - BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); | ||
426 | + BLKDBG_CO_EVENT(bs->file, BLKDBG_WRITE_AIO); | ||
427 | ret = bdrv_co_pwritev(bs->file, offset, bytes, qiov, flags); | ||
428 | |||
429 | fail: | ||
430 | diff --git a/block/vmdk.c b/block/vmdk.c | ||
431 | index XXXXXXX..XXXXXXX 100644 | ||
432 | --- a/block/vmdk.c | ||
433 | +++ b/block/vmdk.c | ||
434 | @@ -XXX,XX +XXX,XX @@ get_whole_cluster(BlockDriverState *bs, VmdkExtent *extent, | ||
435 | if (skip_start_bytes > 0) { | ||
436 | if (copy_from_backing) { | ||
437 | /* qcow2 emits this on bs->file instead of bs->backing */ | ||
438 | - BLKDBG_EVENT(extent->file, BLKDBG_COW_READ); | ||
439 | + BLKDBG_CO_EVENT(extent->file, BLKDBG_COW_READ); | ||
440 | ret = bdrv_co_pread(bs->backing, offset, skip_start_bytes, | ||
441 | whole_grain, 0); | ||
442 | if (ret < 0) { | ||
443 | @@ -XXX,XX +XXX,XX @@ get_whole_cluster(BlockDriverState *bs, VmdkExtent *extent, | ||
444 | goto exit; | ||
445 | } | ||
446 | } | ||
447 | - BLKDBG_EVENT(extent->file, BLKDBG_COW_WRITE); | ||
448 | + BLKDBG_CO_EVENT(extent->file, BLKDBG_COW_WRITE); | ||
449 | ret = bdrv_co_pwrite(extent->file, cluster_offset, skip_start_bytes, | ||
450 | whole_grain, 0); | ||
451 | if (ret < 0) { | ||
452 | @@ -XXX,XX +XXX,XX @@ get_whole_cluster(BlockDriverState *bs, VmdkExtent *extent, | ||
453 | if (skip_end_bytes < cluster_bytes) { | ||
454 | if (copy_from_backing) { | ||
455 | /* qcow2 emits this on bs->file instead of bs->backing */ | ||
456 | - BLKDBG_EVENT(extent->file, BLKDBG_COW_READ); | ||
457 | + BLKDBG_CO_EVENT(extent->file, BLKDBG_COW_READ); | ||
458 | ret = bdrv_co_pread(bs->backing, offset + skip_end_bytes, | ||
459 | cluster_bytes - skip_end_bytes, | ||
460 | whole_grain + skip_end_bytes, 0); | ||
461 | @@ -XXX,XX +XXX,XX @@ get_whole_cluster(BlockDriverState *bs, VmdkExtent *extent, | ||
462 | goto exit; | ||
463 | } | ||
464 | } | ||
465 | - BLKDBG_EVENT(extent->file, BLKDBG_COW_WRITE); | ||
466 | + BLKDBG_CO_EVENT(extent->file, BLKDBG_COW_WRITE); | ||
467 | ret = bdrv_co_pwrite(extent->file, cluster_offset + skip_end_bytes, | ||
468 | cluster_bytes - skip_end_bytes, | ||
469 | whole_grain + skip_end_bytes, 0); | ||
470 | @@ -XXX,XX +XXX,XX @@ vmdk_L2update(VmdkExtent *extent, VmdkMetaData *m_data, uint32_t offset) | ||
471 | { | ||
472 | offset = cpu_to_le32(offset); | ||
473 | /* update L2 table */ | ||
474 | - BLKDBG_EVENT(extent->file, BLKDBG_L2_UPDATE); | ||
475 | + BLKDBG_CO_EVENT(extent->file, BLKDBG_L2_UPDATE); | ||
476 | if (bdrv_co_pwrite(extent->file, | ||
477 | ((int64_t)m_data->l2_offset * 512) | ||
478 | + (m_data->l2_index * sizeof(offset)), | ||
479 | @@ -XXX,XX +XXX,XX @@ get_cluster_offset(BlockDriverState *bs, VmdkExtent *extent, | ||
480 | } | ||
481 | } | ||
482 | l2_table = (char *)extent->l2_cache + (min_index * l2_size_bytes); | ||
483 | - BLKDBG_EVENT(extent->file, BLKDBG_L2_LOAD); | ||
484 | + BLKDBG_CO_EVENT(extent->file, BLKDBG_L2_LOAD); | ||
485 | if (bdrv_co_pread(extent->file, | ||
486 | (int64_t)l2_offset * 512, | ||
487 | l2_size_bytes, | ||
488 | @@ -XXX,XX +XXX,XX @@ vmdk_write_extent(VmdkExtent *extent, int64_t cluster_offset, | ||
489 | n_bytes = buf_len + sizeof(VmdkGrainMarker); | ||
490 | qemu_iovec_init_buf(&local_qiov, data, n_bytes); | ||
491 | |||
492 | - BLKDBG_EVENT(extent->file, BLKDBG_WRITE_COMPRESSED); | ||
493 | + BLKDBG_CO_EVENT(extent->file, BLKDBG_WRITE_COMPRESSED); | ||
494 | } else { | ||
495 | qemu_iovec_init(&local_qiov, qiov->niov); | ||
496 | qemu_iovec_concat(&local_qiov, qiov, qiov_offset, n_bytes); | ||
497 | |||
498 | - BLKDBG_EVENT(extent->file, BLKDBG_WRITE_AIO); | ||
499 | + BLKDBG_CO_EVENT(extent->file, BLKDBG_WRITE_AIO); | ||
500 | } | ||
501 | |||
502 | write_offset = cluster_offset + offset_in_cluster; | ||
503 | @@ -XXX,XX +XXX,XX @@ vmdk_read_extent(VmdkExtent *extent, int64_t cluster_offset, | ||
504 | |||
505 | |||
506 | if (!extent->compressed) { | ||
507 | - BLKDBG_EVENT(extent->file, BLKDBG_READ_AIO); | ||
508 | + BLKDBG_CO_EVENT(extent->file, BLKDBG_READ_AIO); | ||
509 | ret = bdrv_co_preadv(extent->file, | ||
510 | cluster_offset + offset_in_cluster, bytes, | ||
511 | qiov, 0); | ||
512 | @@ -XXX,XX +XXX,XX @@ vmdk_read_extent(VmdkExtent *extent, int64_t cluster_offset, | ||
513 | buf_bytes = cluster_bytes * 2; | ||
514 | cluster_buf = g_malloc(buf_bytes); | ||
515 | uncomp_buf = g_malloc(cluster_bytes); | ||
516 | - BLKDBG_EVENT(extent->file, BLKDBG_READ_COMPRESSED); | ||
517 | + BLKDBG_CO_EVENT(extent->file, BLKDBG_READ_COMPRESSED); | ||
518 | ret = bdrv_co_pread(extent->file, cluster_offset, buf_bytes, cluster_buf, | ||
519 | 0); | ||
520 | if (ret < 0) { | ||
521 | @@ -XXX,XX +XXX,XX @@ vmdk_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, | ||
522 | qemu_iovec_concat(&local_qiov, qiov, bytes_done, n_bytes); | ||
523 | |||
524 | /* qcow2 emits this on bs->file instead of bs->backing */ | ||
525 | - BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO); | ||
526 | + BLKDBG_CO_EVENT(bs->file, BLKDBG_READ_BACKING_AIO); | ||
527 | ret = bdrv_co_preadv(bs->backing, offset, n_bytes, | ||
528 | &local_qiov, 0); | ||
529 | if (ret < 0) { | ||
530 | @@ -XXX,XX +XXX,XX @@ vmdk_co_check(BlockDriverState *bs, BdrvCheckResult *result, BdrvCheckMode fix) | ||
531 | BDRVVmdkState *s = bs->opaque; | ||
532 | VmdkExtent *extent = NULL; | ||
533 | int64_t sector_num = 0; | ||
534 | - int64_t total_sectors = bdrv_nb_sectors(bs); | ||
535 | + int64_t total_sectors = bdrv_co_nb_sectors(bs); | ||
536 | int ret; | ||
537 | uint64_t cluster_offset; | ||
59 | 538 | ||
60 | -- | 539 | -- |
61 | 2.38.1 | 540 | 2.41.0 | diff view generated by jsdifflib |