1
The following changes since commit 8c1ecb590497b0349c550607db923972b37f6963:
1
The following changes since commit 9cf289af47bcfae5c75de37d8e5d6fd23705322c:
2
2
3
Merge remote-tracking branch 'remotes/stsquad/tags/pull-testing-next-280519-2' into staging (2019-05-28 17:38:32 +0100)
3
Merge tag 'qga-pull-request' of gitlab.com:marcandre.lureau/qemu into staging (2022-05-04 03:42:49 -0700)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://github.com/XanClic/qemu.git tags/pull-block-2019-05-28
7
https://gitlab.com/stefanha/qemu.git tags/block-pull-request
8
8
9
for you to fetch changes up to a2d665c1bc3624a8375e2f9a7d569f7565cc1358:
9
for you to fetch changes up to bef2e050d6a7feb865854c65570c496ac5a8cf53:
10
10
11
blockdev: loosen restrictions on drive-backup source node (2019-05-28 20:30:55 +0200)
11
util/event-loop-base: Introduce options to set the thread pool size (2022-05-04 17:02:19 +0100)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Block patches:
14
Pull request
15
- qcow2: Use threads for encrypted I/O
15
16
- qemu-img rebase: Optimizations
16
Add new thread-pool-min/thread-pool-max parameters to control the thread pool
17
- backup job: Allow any source node, and some refactoring
17
used for async I/O.
18
- Some general simplifications in the block layer
19
18
20
----------------------------------------------------------------
19
----------------------------------------------------------------
21
Alberto Garcia (2):
22
block: Use bdrv_unref_child() for all children in bdrv_close()
23
block: Make bdrv_root_attach_child() unref child_bs on failure
24
20
25
Andrey Shinkevich (1):
21
Nicolas Saenz Julienne (3):
26
qcow2-bitmap: initialize bitmap directory alignment
22
Introduce event-loop-base abstract class
23
util/main-loop: Introduce the main loop into QOM
24
util/event-loop-base: Introduce options to set the thread pool size
27
25
28
Anton Nefedov (1):
26
qapi/qom.json | 43 ++++++++--
29
qcow2: skip writing zero buffers to empty COW areas
27
meson.build | 26 +++---
30
28
include/block/aio.h | 10 +++
31
John Snow (1):
29
include/block/thread-pool.h | 3 +
32
blockdev: loosen restrictions on drive-backup source node
30
include/qemu/main-loop.h | 10 +++
33
31
include/sysemu/event-loop-base.h | 41 +++++++++
34
Sam Eiderman (3):
32
include/sysemu/iothread.h | 6 +-
35
qemu-img: rebase: Reuse parent BlockDriverState
33
event-loop-base.c | 140 +++++++++++++++++++++++++++++++
36
qemu-img: rebase: Reduce reads on in-chain rebase
34
iothread.c | 68 +++++----------
37
qemu-img: rebase: Reuse in-chain BlockDriverState
35
util/aio-posix.c | 1 +
38
36
util/async.c | 20 +++++
39
Vladimir Sementsov-Ogievskiy (13):
37
util/main-loop.c | 65 ++++++++++++++
40
qcow2.h: add missing include
38
util/thread-pool.c | 55 +++++++++++-
41
qcow2: add separate file for threaded data processing functions
39
13 files changed, 419 insertions(+), 69 deletions(-)
42
qcow2-threads: use thread_pool_submit_co
40
create mode 100644 include/sysemu/event-loop-base.h
43
qcow2-threads: qcow2_co_do_compress: protect queuing by mutex
41
create mode 100644 event-loop-base.c
44
qcow2-threads: split out generic path
45
qcow2: qcow2_co_preadv: improve locking
46
qcow2: bdrv_co_pwritev: move encryption code out of the lock
47
qcow2: do encryption in threads
48
block/backup: simplify backup_incremental_init_copy_bitmap
49
block/backup: move to copy_bitmap with granularity
50
block/backup: refactor and tolerate unallocated cluster skipping
51
block/backup: unify different modes code path
52
block/backup: refactor: split out backup_calculate_cluster_size
53
54
block/Makefile.objs | 2 +-
55
qapi/block-core.json | 4 +-
56
block/qcow2.h | 26 ++-
57
block.c | 46 +++---
58
block/backup.c | 243 ++++++++++++---------------
59
block/block-backend.c | 3 +-
60
block/qcow2-bitmap.c | 3 +-
61
block/qcow2-cache.c | 1 -
62
block/qcow2-cluster.c | 10 +-
63
block/qcow2-refcount.c | 1 -
64
block/qcow2-snapshot.c | 1 -
65
block/qcow2-threads.c | 268 ++++++++++++++++++++++++++++++
66
block/qcow2.c | 320 +++++++++++++-----------------------
67
block/quorum.c | 1 -
68
blockdev.c | 7 +-
69
blockjob.c | 2 +-
70
qemu-img.c | 85 ++++++----
71
tests/test-bdrv-drain.c | 6 -
72
tests/test-bdrv-graph-mod.c | 1 -
73
block/trace-events | 1 +
74
tests/qemu-iotests/056 | 2 +-
75
tests/qemu-iotests/060 | 7 +-
76
tests/qemu-iotests/060.out | 5 +-
77
23 files changed, 615 insertions(+), 430 deletions(-)
78
create mode 100644 block/qcow2-threads.c
79
42
80
--
43
--
81
2.21.0
44
2.35.1
82
83
diff view generated by jsdifflib
Deleted patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
1
3
qcow2.h depends on block_int.h. Compilation isn't broken currently only
4
due to block_int.h always included before qcow2.h. Though, it seems
5
better to directly include block_int.h in qcow2.h.
6
7
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
8
Reviewed-by: Alberto Garcia <berto@igalia.com>
9
Reviewed-by: Max Reitz <mreitz@redhat.com>
10
Message-id: 20190506142741.41731-2-vsementsov@virtuozzo.com
11
Signed-off-by: Max Reitz <mreitz@redhat.com>
12
---
13
block/qcow2.h | 1 +
14
block/qcow2-bitmap.c | 1 -
15
block/qcow2-cache.c | 1 -
16
block/qcow2-cluster.c | 1 -
17
block/qcow2-refcount.c | 1 -
18
block/qcow2-snapshot.c | 1 -
19
block/qcow2.c | 1 -
20
7 files changed, 1 insertion(+), 6 deletions(-)
21
22
diff --git a/block/qcow2.h b/block/qcow2.h
23
index XXXXXXX..XXXXXXX 100644
24
--- a/block/qcow2.h
25
+++ b/block/qcow2.h
26
@@ -XXX,XX +XXX,XX @@
27
#include "crypto/block.h"
28
#include "qemu/coroutine.h"
29
#include "qemu/units.h"
30
+#include "block/block_int.h"
31
32
//#define DEBUG_ALLOC
33
//#define DEBUG_ALLOC2
34
diff --git a/block/qcow2-bitmap.c b/block/qcow2-bitmap.c
35
index XXXXXXX..XXXXXXX 100644
36
--- a/block/qcow2-bitmap.c
37
+++ b/block/qcow2-bitmap.c
38
@@ -XXX,XX +XXX,XX @@
39
#include "qapi/error.h"
40
#include "qemu/cutils.h"
41
42
-#include "block/block_int.h"
43
#include "qcow2.h"
44
45
/* NOTICE: BME here means Bitmaps Extension and used as a namespace for
46
diff --git a/block/qcow2-cache.c b/block/qcow2-cache.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/block/qcow2-cache.c
49
+++ b/block/qcow2-cache.c
50
@@ -XXX,XX +XXX,XX @@
51
*/
52
53
#include "qemu/osdep.h"
54
-#include "block/block_int.h"
55
#include "qemu-common.h"
56
#include "qcow2.h"
57
#include "trace.h"
58
diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c
59
index XXXXXXX..XXXXXXX 100644
60
--- a/block/qcow2-cluster.c
61
+++ b/block/qcow2-cluster.c
62
@@ -XXX,XX +XXX,XX @@
63
64
#include "qapi/error.h"
65
#include "qemu-common.h"
66
-#include "block/block_int.h"
67
#include "qcow2.h"
68
#include "qemu/bswap.h"
69
#include "trace.h"
70
diff --git a/block/qcow2-refcount.c b/block/qcow2-refcount.c
71
index XXXXXXX..XXXXXXX 100644
72
--- a/block/qcow2-refcount.c
73
+++ b/block/qcow2-refcount.c
74
@@ -XXX,XX +XXX,XX @@
75
#include "qemu/osdep.h"
76
#include "qapi/error.h"
77
#include "qemu-common.h"
78
-#include "block/block_int.h"
79
#include "qcow2.h"
80
#include "qemu/range.h"
81
#include "qemu/bswap.h"
82
diff --git a/block/qcow2-snapshot.c b/block/qcow2-snapshot.c
83
index XXXXXXX..XXXXXXX 100644
84
--- a/block/qcow2-snapshot.c
85
+++ b/block/qcow2-snapshot.c
86
@@ -XXX,XX +XXX,XX @@
87
88
#include "qemu/osdep.h"
89
#include "qapi/error.h"
90
-#include "block/block_int.h"
91
#include "qcow2.h"
92
#include "qemu/bswap.h"
93
#include "qemu/error-report.h"
94
diff --git a/block/qcow2.c b/block/qcow2.c
95
index XXXXXXX..XXXXXXX 100644
96
--- a/block/qcow2.c
97
+++ b/block/qcow2.c
98
@@ -XXX,XX +XXX,XX @@
99
#define ZLIB_CONST
100
#include <zlib.h>
101
102
-#include "block/block_int.h"
103
#include "block/qdict.h"
104
#include "sysemu/block-backend.h"
105
#include "qemu/module.h"
106
--
107
2.21.0
108
109
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
From: Nicolas Saenz Julienne <nsaenzju@redhat.com>
2
2
3
Move compression-on-threads to separate file. Encryption will be in it
3
Introduce the 'event-loop-base' abstract class, it'll hold the
4
too.
4
properties common to all event loops and provide the necessary hooks for
5
5
their creation and maintenance. Then have iothread inherit from it.
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
6
7
Reviewed-by: Alberto Garcia <berto@igalia.com>
7
EventLoopBaseClass is defined as user creatable and provides a hook for
8
Reviewed-by: Max Reitz <mreitz@redhat.com>
8
its children to attach themselves to the user creatable class 'complete'
9
Message-id: 20190506142741.41731-3-vsementsov@virtuozzo.com
9
function. It also provides an update_params() callback to propagate
10
Signed-off-by: Max Reitz <mreitz@redhat.com>
10
property changes onto its children.
11
12
The new 'event-loop-base' class will live in the root directory. It is
13
built on its own using the 'link_whole' option (there are no direct
14
function dependencies between the class and its children, it all happens
15
trough 'constructor' magic). And also imposes new compilation
16
dependencies:
17
18
qom <- event-loop-base <- blockdev (iothread.c)
19
20
And in subsequent patches:
21
22
qom <- event-loop-base <- qemuutil (util/main-loop.c)
23
24
All this forced some amount of reordering in meson.build:
25
26
- Moved qom build definition before qemuutil. Doing it the other way
27
around (i.e. moving qemuutil after qom) isn't possible as a lot of
28
core libraries that live in between the two depend on it.
29
30
- Process the 'hw' subdir earlier, as it introduces files into the
31
'qom' source set.
32
33
No functional changes intended.
34
35
Signed-off-by: Nicolas Saenz Julienne <nsaenzju@redhat.com>
36
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
37
Acked-by: Markus Armbruster <armbru@redhat.com>
38
Message-id: 20220425075723.20019-2-nsaenzju@redhat.com
39
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
11
---
40
---
12
block/Makefile.objs | 2 +-
41
qapi/qom.json | 22 +++++--
13
block/qcow2.h | 7 ++
42
meson.build | 23 ++++---
14
block/qcow2-threads.c | 201 ++++++++++++++++++++++++++++++++++++++++++
43
include/sysemu/event-loop-base.h | 36 +++++++++++
15
block/qcow2.c | 169 -----------------------------------
44
include/sysemu/iothread.h | 6 +-
16
4 files changed, 209 insertions(+), 170 deletions(-)
45
event-loop-base.c | 104 +++++++++++++++++++++++++++++++
17
create mode 100644 block/qcow2-threads.c
46
iothread.c | 65 ++++++-------------
18
47
6 files changed, 192 insertions(+), 64 deletions(-)
19
diff --git a/block/Makefile.objs b/block/Makefile.objs
48
create mode 100644 include/sysemu/event-loop-base.h
49
create mode 100644 event-loop-base.c
50
51
diff --git a/qapi/qom.json b/qapi/qom.json
20
index XXXXXXX..XXXXXXX 100644
52
index XXXXXXX..XXXXXXX 100644
21
--- a/block/Makefile.objs
53
--- a/qapi/qom.json
22
+++ b/block/Makefile.objs
54
+++ b/qapi/qom.json
23
@@ -XXX,XX +XXX,XX @@ block-obj-$(CONFIG_BOCHS) += bochs.o
55
@@ -XXX,XX +XXX,XX @@
24
block-obj-$(CONFIG_VVFAT) += vvfat.o
56
'*repeat': 'bool',
25
block-obj-$(CONFIG_DMG) += dmg.o
57
'*grab-toggle': 'GrabToggleKeys' } }
26
58
27
-block-obj-y += qcow2.o qcow2-refcount.o qcow2-cluster.o qcow2-snapshot.o qcow2-cache.o qcow2-bitmap.o
59
+##
28
+block-obj-y += qcow2.o qcow2-refcount.o qcow2-cluster.o qcow2-snapshot.o qcow2-cache.o qcow2-bitmap.o qcow2-threads.o
60
+# @EventLoopBaseProperties:
29
block-obj-$(CONFIG_QED) += qed.o qed-l2-cache.o qed-table.o qed-cluster.o
61
+#
30
block-obj-$(CONFIG_QED) += qed-check.o
62
+# Common properties for event loops
31
block-obj-y += vhdx.o vhdx-endian.o vhdx-log.o
63
+#
32
diff --git a/block/qcow2.h b/block/qcow2.h
64
+# @aio-max-batch: maximum number of requests in a batch for the AIO engine,
65
+# 0 means that the engine will use its default.
66
+# (default: 0)
67
+#
68
+# Since: 7.1
69
+##
70
+{ 'struct': 'EventLoopBaseProperties',
71
+ 'data': { '*aio-max-batch': 'int' } }
72
+
73
##
74
# @IothreadProperties:
75
#
76
@@ -XXX,XX +XXX,XX @@
77
# algorithm detects it is spending too long polling without
78
# encountering events. 0 selects a default behaviour (default: 0)
79
#
80
-# @aio-max-batch: maximum number of requests in a batch for the AIO engine,
81
-# 0 means that the engine will use its default
82
-# (default:0, since 6.1)
83
+# The @aio-max-batch option is available since 6.1.
84
#
85
# Since: 2.0
86
##
87
{ 'struct': 'IothreadProperties',
88
+ 'base': 'EventLoopBaseProperties',
89
'data': { '*poll-max-ns': 'int',
90
'*poll-grow': 'int',
91
- '*poll-shrink': 'int',
92
- '*aio-max-batch': 'int' } }
93
+ '*poll-shrink': 'int' } }
94
95
##
96
# @MemoryBackendProperties:
97
diff --git a/meson.build b/meson.build
33
index XXXXXXX..XXXXXXX 100644
98
index XXXXXXX..XXXXXXX 100644
34
--- a/block/qcow2.h
99
--- a/meson.build
35
+++ b/block/qcow2.h
100
+++ b/meson.build
36
@@ -XXX,XX +XXX,XX @@ void qcow2_remove_persistent_dirty_bitmap(BlockDriverState *bs,
101
@@ -XXX,XX +XXX,XX @@ subdir('qom')
37
const char *name,
102
subdir('authz')
38
Error **errp);
103
subdir('crypto')
39
104
subdir('ui')
40
+ssize_t coroutine_fn
105
+subdir('hw')
41
+qcow2_co_compress(BlockDriverState *bs, void *dest, size_t dest_size,
106
42
+ const void *src, size_t src_size);
107
43
+ssize_t coroutine_fn
108
if enable_modules
44
+qcow2_co_decompress(BlockDriverState *bs, void *dest, size_t dest_size,
109
@@ -XXX,XX +XXX,XX @@ if enable_modules
45
+ const void *src, size_t src_size);
110
modulecommon = declare_dependency(link_whole: libmodulecommon, compile_args: '-DBUILD_DSO')
46
+
111
endif
47
#endif
112
48
diff --git a/block/qcow2-threads.c b/block/qcow2-threads.c
113
+qom_ss = qom_ss.apply(config_host, strict: false)
114
+libqom = static_library('qom', qom_ss.sources() + genh,
115
+ dependencies: [qom_ss.dependencies()],
116
+ name_suffix: 'fa')
117
+qom = declare_dependency(link_whole: libqom)
118
+
119
+event_loop_base = files('event-loop-base.c')
120
+event_loop_base = static_library('event-loop-base', sources: event_loop_base + genh,
121
+ build_by_default: true)
122
+event_loop_base = declare_dependency(link_whole: event_loop_base,
123
+ dependencies: [qom])
124
+
125
stub_ss = stub_ss.apply(config_all, strict: false)
126
127
util_ss.add_all(trace_ss)
128
@@ -XXX,XX +XXX,XX @@ subdir('monitor')
129
subdir('net')
130
subdir('replay')
131
subdir('semihosting')
132
-subdir('hw')
133
subdir('tcg')
134
subdir('fpu')
135
subdir('accel')
136
@@ -XXX,XX +XXX,XX @@ qemu_syms = custom_target('qemu.syms', output: 'qemu.syms',
137
capture: true,
138
command: [undefsym, nm, '@INPUT@'])
139
140
-qom_ss = qom_ss.apply(config_host, strict: false)
141
-libqom = static_library('qom', qom_ss.sources() + genh,
142
- dependencies: [qom_ss.dependencies()],
143
- name_suffix: 'fa')
144
-
145
-qom = declare_dependency(link_whole: libqom)
146
-
147
authz_ss = authz_ss.apply(config_host, strict: false)
148
libauthz = static_library('authz', authz_ss.sources() + genh,
149
dependencies: [authz_ss.dependencies()],
150
@@ -XXX,XX +XXX,XX @@ libblockdev = static_library('blockdev', blockdev_ss.sources() + genh,
151
build_by_default: false)
152
153
blockdev = declare_dependency(link_whole: [libblockdev],
154
- dependencies: [block])
155
+ dependencies: [block, event_loop_base])
156
157
qmp_ss = qmp_ss.apply(config_host, strict: false)
158
libqmp = static_library('qmp', qmp_ss.sources() + genh,
159
diff --git a/include/sysemu/event-loop-base.h b/include/sysemu/event-loop-base.h
49
new file mode 100644
160
new file mode 100644
50
index XXXXXXX..XXXXXXX
161
index XXXXXXX..XXXXXXX
51
--- /dev/null
162
--- /dev/null
52
+++ b/block/qcow2-threads.c
163
+++ b/include/sysemu/event-loop-base.h
53
@@ -XXX,XX +XXX,XX @@
164
@@ -XXX,XX +XXX,XX @@
54
+/*
165
+/*
55
+ * Threaded data processing for Qcow2: compression, encryption
166
+ * QEMU event-loop backend
56
+ *
167
+ *
57
+ * Copyright (c) 2004-2006 Fabrice Bellard
168
+ * Copyright (C) 2022 Red Hat Inc
58
+ * Copyright (c) 2018 Virtuozzo International GmbH. All rights reserved.
169
+ *
59
+ *
170
+ * Authors:
60
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
171
+ * Nicolas Saenz Julienne <nsaenzju@redhat.com>
61
+ * of this software and associated documentation files (the "Software"), to deal
172
+ *
62
+ * in the Software without restriction, including without limitation the rights
173
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
63
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
174
+ * See the COPYING file in the top-level directory.
64
+ * copies of the Software, and to permit persons to whom the Software is
65
+ * furnished to do so, subject to the following conditions:
66
+ *
67
+ * The above copyright notice and this permission notice shall be included in
68
+ * all copies or substantial portions of the Software.
69
+ *
70
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
71
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
72
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
73
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
74
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
75
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
76
+ * THE SOFTWARE.
77
+ */
175
+ */
176
+#ifndef QEMU_EVENT_LOOP_BASE_H
177
+#define QEMU_EVENT_LOOP_BASE_H
178
+
179
+#include "qom/object.h"
180
+#include "block/aio.h"
181
+#include "qemu/typedefs.h"
182
+
183
+#define TYPE_EVENT_LOOP_BASE "event-loop-base"
184
+OBJECT_DECLARE_TYPE(EventLoopBase, EventLoopBaseClass,
185
+ EVENT_LOOP_BASE)
186
+
187
+struct EventLoopBaseClass {
188
+ ObjectClass parent_class;
189
+
190
+ void (*init)(EventLoopBase *base, Error **errp);
191
+ void (*update_params)(EventLoopBase *base, Error **errp);
192
+};
193
+
194
+struct EventLoopBase {
195
+ Object parent;
196
+
197
+ /* AioContext AIO engine parameters */
198
+ int64_t aio_max_batch;
199
+};
200
+#endif
201
diff --git a/include/sysemu/iothread.h b/include/sysemu/iothread.h
202
index XXXXXXX..XXXXXXX 100644
203
--- a/include/sysemu/iothread.h
204
+++ b/include/sysemu/iothread.h
205
@@ -XXX,XX +XXX,XX @@
206
#include "block/aio.h"
207
#include "qemu/thread.h"
208
#include "qom/object.h"
209
+#include "sysemu/event-loop-base.h"
210
211
#define TYPE_IOTHREAD "iothread"
212
213
struct IOThread {
214
- Object parent_obj;
215
+ EventLoopBase parent_obj;
216
217
QemuThread thread;
218
AioContext *ctx;
219
@@ -XXX,XX +XXX,XX @@ struct IOThread {
220
int64_t poll_max_ns;
221
int64_t poll_grow;
222
int64_t poll_shrink;
223
-
224
- /* AioContext AIO engine parameters */
225
- int64_t aio_max_batch;
226
};
227
typedef struct IOThread IOThread;
228
229
diff --git a/event-loop-base.c b/event-loop-base.c
230
new file mode 100644
231
index XXXXXXX..XXXXXXX
232
--- /dev/null
233
+++ b/event-loop-base.c
234
@@ -XXX,XX +XXX,XX @@
235
+/*
236
+ * QEMU event-loop base
237
+ *
238
+ * Copyright (C) 2022 Red Hat Inc
239
+ *
240
+ * Authors:
241
+ * Stefan Hajnoczi <stefanha@redhat.com>
242
+ * Nicolas Saenz Julienne <nsaenzju@redhat.com>
243
+ *
244
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
245
+ * See the COPYING file in the top-level directory.
246
+ */
78
+
247
+
79
+#include "qemu/osdep.h"
248
+#include "qemu/osdep.h"
80
+
249
+#include "qom/object_interfaces.h"
81
+#define ZLIB_CONST
250
+#include "qapi/error.h"
82
+#include <zlib.h>
251
+#include "sysemu/event-loop-base.h"
83
+
252
+
84
+#include "qcow2.h"
253
+typedef struct {
85
+#include "block/thread-pool.h"
254
+ const char *name;
86
+
255
+ ptrdiff_t offset; /* field's byte offset in EventLoopBase struct */
87
+#define MAX_COMPRESS_THREADS 4
256
+} EventLoopBaseParamInfo;
88
+
257
+
89
+typedef ssize_t (*Qcow2CompressFunc)(void *dest, size_t dest_size,
258
+static EventLoopBaseParamInfo aio_max_batch_info = {
90
+ const void *src, size_t src_size);
259
+ "aio-max-batch", offsetof(EventLoopBase, aio_max_batch),
91
+typedef struct Qcow2CompressData {
260
+};
92
+ void *dest;
261
+
93
+ size_t dest_size;
262
+static void event_loop_base_get_param(Object *obj, Visitor *v,
94
+ const void *src;
263
+ const char *name, void *opaque, Error **errp)
95
+ size_t src_size;
96
+ ssize_t ret;
97
+
98
+ Qcow2CompressFunc func;
99
+} Qcow2CompressData;
100
+
101
+/*
102
+ * qcow2_compress()
103
+ *
104
+ * @dest - destination buffer, @dest_size bytes
105
+ * @src - source buffer, @src_size bytes
106
+ *
107
+ * Returns: compressed size on success
108
+ * -ENOMEM destination buffer is not enough to store compressed data
109
+ * -EIO on any other error
110
+ */
111
+static ssize_t qcow2_compress(void *dest, size_t dest_size,
112
+ const void *src, size_t src_size)
113
+{
264
+{
114
+ ssize_t ret;
265
+ EventLoopBase *event_loop_base = EVENT_LOOP_BASE(obj);
115
+ z_stream strm;
266
+ EventLoopBaseParamInfo *info = opaque;
116
+
267
+ int64_t *field = (void *)event_loop_base + info->offset;
117
+ /* best compression, small window, no zlib header */
268
+
118
+ memset(&strm, 0, sizeof(strm));
269
+ visit_type_int64(v, name, field, errp);
119
+ ret = deflateInit2(&strm, Z_DEFAULT_COMPRESSION, Z_DEFLATED,
120
+ -12, 9, Z_DEFAULT_STRATEGY);
121
+ if (ret != Z_OK) {
122
+ return -EIO;
123
+ }
124
+
125
+ /*
126
+ * strm.next_in is not const in old zlib versions, such as those used on
127
+ * OpenBSD/NetBSD, so cast the const away
128
+ */
129
+ strm.avail_in = src_size;
130
+ strm.next_in = (void *) src;
131
+ strm.avail_out = dest_size;
132
+ strm.next_out = dest;
133
+
134
+ ret = deflate(&strm, Z_FINISH);
135
+ if (ret == Z_STREAM_END) {
136
+ ret = dest_size - strm.avail_out;
137
+ } else {
138
+ ret = (ret == Z_OK ? -ENOMEM : -EIO);
139
+ }
140
+
141
+ deflateEnd(&strm);
142
+
143
+ return ret;
144
+}
270
+}
145
+
271
+
146
+/*
272
+static void event_loop_base_set_param(Object *obj, Visitor *v,
147
+ * qcow2_decompress()
273
+ const char *name, void *opaque, Error **errp)
148
+ *
149
+ * Decompress some data (not more than @src_size bytes) to produce exactly
150
+ * @dest_size bytes.
151
+ *
152
+ * @dest - destination buffer, @dest_size bytes
153
+ * @src - source buffer, @src_size bytes
154
+ *
155
+ * Returns: 0 on success
156
+ * -1 on fail
157
+ */
158
+static ssize_t qcow2_decompress(void *dest, size_t dest_size,
159
+ const void *src, size_t src_size)
160
+{
274
+{
161
+ int ret = 0;
275
+ EventLoopBaseClass *bc = EVENT_LOOP_BASE_GET_CLASS(obj);
162
+ z_stream strm;
276
+ EventLoopBase *base = EVENT_LOOP_BASE(obj);
163
+
277
+ EventLoopBaseParamInfo *info = opaque;
164
+ memset(&strm, 0, sizeof(strm));
278
+ int64_t *field = (void *)base + info->offset;
165
+ strm.avail_in = src_size;
279
+ int64_t value;
166
+ strm.next_in = (void *) src;
280
+
167
+ strm.avail_out = dest_size;
281
+ if (!visit_type_int64(v, name, &value, errp)) {
168
+ strm.next_out = dest;
282
+ return;
169
+
283
+ }
170
+ ret = inflateInit2(&strm, -12);
284
+
171
+ if (ret != Z_OK) {
285
+ if (value < 0) {
172
+ return -1;
286
+ error_setg(errp, "%s value must be in range [0, %" PRId64 "]",
173
+ }
287
+ info->name, INT64_MAX);
174
+
288
+ return;
175
+ ret = inflate(&strm, Z_FINISH);
289
+ }
176
+ if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) || strm.avail_out != 0) {
290
+
177
+ /*
291
+ *field = value;
178
+ * We approve Z_BUF_ERROR because we need @dest buffer to be filled, but
292
+
179
+ * @src buffer may be processed partly (because in qcow2 we know size of
293
+ if (bc->update_params) {
180
+ * compressed data with precision of one sector)
294
+ bc->update_params(base, errp);
181
+ */
295
+ }
182
+ ret = -1;
296
+
183
+ }
297
+ return;
184
+
185
+ inflateEnd(&strm);
186
+
187
+ return ret;
188
+}
298
+}
189
+
299
+
190
+static int qcow2_compress_pool_func(void *opaque)
300
+static void event_loop_base_complete(UserCreatable *uc, Error **errp)
191
+{
301
+{
192
+ Qcow2CompressData *data = opaque;
302
+ EventLoopBaseClass *bc = EVENT_LOOP_BASE_GET_CLASS(uc);
193
+
303
+ EventLoopBase *base = EVENT_LOOP_BASE(uc);
194
+ data->ret = data->func(data->dest, data->dest_size,
304
+
195
+ data->src, data->src_size);
305
+ if (bc->init) {
196
+
306
+ bc->init(base, errp);
197
+ return 0;
307
+ }
198
+}
308
+}
199
+
309
+
200
+static void qcow2_compress_complete(void *opaque, int ret)
310
+static void event_loop_base_class_init(ObjectClass *klass, void *class_data)
201
+{
311
+{
202
+ qemu_coroutine_enter(opaque);
312
+ UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass);
313
+ ucc->complete = event_loop_base_complete;
314
+
315
+ object_class_property_add(klass, "aio-max-batch", "int",
316
+ event_loop_base_get_param,
317
+ event_loop_base_set_param,
318
+ NULL, &aio_max_batch_info);
203
+}
319
+}
204
+
320
+
205
+static ssize_t coroutine_fn
321
+static const TypeInfo event_loop_base_info = {
206
+qcow2_co_do_compress(BlockDriverState *bs, void *dest, size_t dest_size,
322
+ .name = TYPE_EVENT_LOOP_BASE,
207
+ const void *src, size_t src_size, Qcow2CompressFunc func)
323
+ .parent = TYPE_OBJECT,
324
+ .instance_size = sizeof(EventLoopBase),
325
+ .class_size = sizeof(EventLoopBaseClass),
326
+ .class_init = event_loop_base_class_init,
327
+ .abstract = true,
328
+ .interfaces = (InterfaceInfo[]) {
329
+ { TYPE_USER_CREATABLE },
330
+ { }
331
+ }
332
+};
333
+
334
+static void register_types(void)
208
+{
335
+{
209
+ BDRVQcow2State *s = bs->opaque;
336
+ type_register_static(&event_loop_base_info);
210
+ BlockAIOCB *acb;
211
+ ThreadPool *pool = aio_get_thread_pool(bdrv_get_aio_context(bs));
212
+ Qcow2CompressData arg = {
213
+ .dest = dest,
214
+ .dest_size = dest_size,
215
+ .src = src,
216
+ .src_size = src_size,
217
+ .func = func,
218
+ };
219
+
220
+ while (s->nb_compress_threads >= MAX_COMPRESS_THREADS) {
221
+ qemu_co_queue_wait(&s->compress_wait_queue, NULL);
222
+ }
223
+
224
+ s->nb_compress_threads++;
225
+ acb = thread_pool_submit_aio(pool, qcow2_compress_pool_func, &arg,
226
+ qcow2_compress_complete,
227
+ qemu_coroutine_self());
228
+
229
+ if (!acb) {
230
+ s->nb_compress_threads--;
231
+ return -EINVAL;
232
+ }
233
+ qemu_coroutine_yield();
234
+ s->nb_compress_threads--;
235
+ qemu_co_queue_next(&s->compress_wait_queue);
236
+
237
+ return arg.ret;
238
+}
337
+}
239
+
338
+type_init(register_types);
240
+ssize_t coroutine_fn
339
diff --git a/iothread.c b/iothread.c
241
+qcow2_co_compress(BlockDriverState *bs, void *dest, size_t dest_size,
242
+ const void *src, size_t src_size)
243
+{
244
+ return qcow2_co_do_compress(bs, dest, dest_size, src, src_size,
245
+ qcow2_compress);
246
+}
247
+
248
+ssize_t coroutine_fn
249
+qcow2_co_decompress(BlockDriverState *bs, void *dest, size_t dest_size,
250
+ const void *src, size_t src_size)
251
+{
252
+ return qcow2_co_do_compress(bs, dest, dest_size, src, src_size,
253
+ qcow2_decompress);
254
+}
255
diff --git a/block/qcow2.c b/block/qcow2.c
256
index XXXXXXX..XXXXXXX 100644
340
index XXXXXXX..XXXXXXX 100644
257
--- a/block/qcow2.c
341
--- a/iothread.c
258
+++ b/block/qcow2.c
342
+++ b/iothread.c
259
@@ -XXX,XX +XXX,XX @@
343
@@ -XXX,XX +XXX,XX @@
260
261
#include "qemu/osdep.h"
262
263
-#define ZLIB_CONST
264
-#include <zlib.h>
265
-
266
#include "block/qdict.h"
267
#include "sysemu/block-backend.h"
268
#include "qemu/module.h"
344
#include "qemu/module.h"
269
@@ -XXX,XX +XXX,XX @@
345
#include "block/aio.h"
270
#include "qapi/qobject-input-visitor.h"
346
#include "block/block.h"
271
#include "qapi/qapi-visit-block-core.h"
347
+#include "sysemu/event-loop-base.h"
272
#include "crypto.h"
348
#include "sysemu/iothread.h"
273
-#include "block/thread-pool.h"
349
#include "qapi/error.h"
274
350
#include "qapi/qapi-commands-misc.h"
275
/*
351
@@ -XXX,XX +XXX,XX @@ static void iothread_init_gcontext(IOThread *iothread)
276
Differences with QCOW:
352
iothread->main_loop = g_main_loop_new(iothread->worker_context, TRUE);
277
@@ -XXX,XX +XXX,XX @@ fail:
278
return ret;
279
}
353
}
280
354
281
-/*
355
-static void iothread_set_aio_context_params(IOThread *iothread, Error **errp)
282
- * qcow2_compress()
356
+static void iothread_set_aio_context_params(EventLoopBase *base, Error **errp)
283
- *
357
{
284
- * @dest - destination buffer, @dest_size bytes
358
+ IOThread *iothread = IOTHREAD(base);
285
- * @src - source buffer, @src_size bytes
359
ERRP_GUARD();
286
- *
360
287
- * Returns: compressed size on success
361
+ if (!iothread->ctx) {
288
- * -ENOMEM destination buffer is not enough to store compressed data
362
+ return;
289
- * -EIO on any other error
363
+ }
290
- */
364
+
291
-static ssize_t qcow2_compress(void *dest, size_t dest_size,
365
aio_context_set_poll_params(iothread->ctx,
292
- const void *src, size_t src_size)
366
iothread->poll_max_ns,
367
iothread->poll_grow,
368
@@ -XXX,XX +XXX,XX @@ static void iothread_set_aio_context_params(IOThread *iothread, Error **errp)
369
}
370
371
aio_context_set_aio_params(iothread->ctx,
372
- iothread->aio_max_batch,
373
+ iothread->parent_obj.aio_max_batch,
374
errp);
375
}
376
377
-static void iothread_complete(UserCreatable *obj, Error **errp)
378
+
379
+static void iothread_init(EventLoopBase *base, Error **errp)
380
{
381
Error *local_error = NULL;
382
- IOThread *iothread = IOTHREAD(obj);
383
+ IOThread *iothread = IOTHREAD(base);
384
char *thread_name;
385
386
iothread->stopping = false;
387
@@ -XXX,XX +XXX,XX @@ static void iothread_complete(UserCreatable *obj, Error **errp)
388
*/
389
iothread_init_gcontext(iothread);
390
391
- iothread_set_aio_context_params(iothread, &local_error);
392
+ iothread_set_aio_context_params(base, &local_error);
393
if (local_error) {
394
error_propagate(errp, local_error);
395
aio_context_unref(iothread->ctx);
396
@@ -XXX,XX +XXX,XX @@ static void iothread_complete(UserCreatable *obj, Error **errp)
397
* to inherit.
398
*/
399
thread_name = g_strdup_printf("IO %s",
400
- object_get_canonical_path_component(OBJECT(obj)));
401
+ object_get_canonical_path_component(OBJECT(base)));
402
qemu_thread_create(&iothread->thread, thread_name, iothread_run,
403
iothread, QEMU_THREAD_JOINABLE);
404
g_free(thread_name);
405
@@ -XXX,XX +XXX,XX @@ static IOThreadParamInfo poll_grow_info = {
406
static IOThreadParamInfo poll_shrink_info = {
407
"poll-shrink", offsetof(IOThread, poll_shrink),
408
};
409
-static IOThreadParamInfo aio_max_batch_info = {
410
- "aio-max-batch", offsetof(IOThread, aio_max_batch),
411
-};
412
413
static void iothread_get_param(Object *obj, Visitor *v,
414
const char *name, IOThreadParamInfo *info, Error **errp)
415
@@ -XXX,XX +XXX,XX @@ static void iothread_set_poll_param(Object *obj, Visitor *v,
416
}
417
}
418
419
-static void iothread_get_aio_param(Object *obj, Visitor *v,
420
- const char *name, void *opaque, Error **errp)
293
-{
421
-{
294
- ssize_t ret;
422
- IOThreadParamInfo *info = opaque;
295
- z_stream strm;
423
-
296
-
424
- iothread_get_param(obj, v, name, info, errp);
297
- /* best compression, small window, no zlib header */
425
-}
298
- memset(&strm, 0, sizeof(strm));
426
-
299
- ret = deflateInit2(&strm, Z_DEFAULT_COMPRESSION, Z_DEFLATED,
427
-static void iothread_set_aio_param(Object *obj, Visitor *v,
300
- -12, 9, Z_DEFAULT_STRATEGY);
428
- const char *name, void *opaque, Error **errp)
301
- if (ret != Z_OK) {
429
-{
302
- return -EIO;
430
- IOThread *iothread = IOTHREAD(obj);
431
- IOThreadParamInfo *info = opaque;
432
-
433
- if (!iothread_set_param(obj, v, name, info, errp)) {
434
- return;
303
- }
435
- }
304
-
436
-
305
- /* strm.next_in is not const in old zlib versions, such as those used on
437
- if (iothread->ctx) {
306
- * OpenBSD/NetBSD, so cast the const away */
438
- aio_context_set_aio_params(iothread->ctx,
307
- strm.avail_in = src_size;
439
- iothread->aio_max_batch,
308
- strm.next_in = (void *) src;
440
- errp);
309
- strm.avail_out = dest_size;
310
- strm.next_out = dest;
311
-
312
- ret = deflate(&strm, Z_FINISH);
313
- if (ret == Z_STREAM_END) {
314
- ret = dest_size - strm.avail_out;
315
- } else {
316
- ret = (ret == Z_OK ? -ENOMEM : -EIO);
317
- }
441
- }
318
-
319
- deflateEnd(&strm);
320
-
321
- return ret;
322
-}
442
-}
323
-
443
-
324
-/*
444
static void iothread_class_init(ObjectClass *klass, void *class_data)
325
- * qcow2_decompress()
445
{
326
- *
446
- UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass);
327
- * Decompress some data (not more than @src_size bytes) to produce exactly
447
- ucc->complete = iothread_complete;
328
- * @dest_size bytes.
448
+ EventLoopBaseClass *bc = EVENT_LOOP_BASE_CLASS(klass);
329
- *
449
+
330
- * @dest - destination buffer, @dest_size bytes
450
+ bc->init = iothread_init;
331
- * @src - source buffer, @src_size bytes
451
+ bc->update_params = iothread_set_aio_context_params;
332
- *
452
333
- * Returns: 0 on success
453
object_class_property_add(klass, "poll-max-ns", "int",
334
- * -1 on fail
454
iothread_get_poll_param,
335
- */
455
@@ -XXX,XX +XXX,XX @@ static void iothread_class_init(ObjectClass *klass, void *class_data)
336
-static ssize_t qcow2_decompress(void *dest, size_t dest_size,
456
iothread_get_poll_param,
337
- const void *src, size_t src_size)
457
iothread_set_poll_param,
338
-{
458
NULL, &poll_shrink_info);
339
- int ret = 0;
459
- object_class_property_add(klass, "aio-max-batch", "int",
340
- z_stream strm;
460
- iothread_get_aio_param,
341
-
461
- iothread_set_aio_param,
342
- memset(&strm, 0, sizeof(strm));
462
- NULL, &aio_max_batch_info);
343
- strm.avail_in = src_size;
463
}
344
- strm.next_in = (void *) src;
464
345
- strm.avail_out = dest_size;
465
static const TypeInfo iothread_info = {
346
- strm.next_out = dest;
466
.name = TYPE_IOTHREAD,
347
-
467
- .parent = TYPE_OBJECT,
348
- ret = inflateInit2(&strm, -12);
468
+ .parent = TYPE_EVENT_LOOP_BASE,
349
- if (ret != Z_OK) {
469
.class_init = iothread_class_init,
350
- return -1;
470
.instance_size = sizeof(IOThread),
351
- }
471
.instance_init = iothread_instance_init,
352
-
472
.instance_finalize = iothread_instance_finalize,
353
- ret = inflate(&strm, Z_FINISH);
473
- .interfaces = (InterfaceInfo[]) {
354
- if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) || strm.avail_out != 0) {
474
- {TYPE_USER_CREATABLE},
355
- /* We approve Z_BUF_ERROR because we need @dest buffer to be filled, but
475
- {}
356
- * @src buffer may be processed partly (because in qcow2 we know size of
476
- },
357
- * compressed data with precision of one sector) */
477
};
358
- ret = -1;
478
359
- }
479
static void iothread_register_types(void)
360
-
480
@@ -XXX,XX +XXX,XX @@ static int query_one_iothread(Object *object, void *opaque)
361
- inflateEnd(&strm);
481
info->poll_max_ns = iothread->poll_max_ns;
362
-
482
info->poll_grow = iothread->poll_grow;
363
- return ret;
483
info->poll_shrink = iothread->poll_shrink;
364
-}
484
- info->aio_max_batch = iothread->aio_max_batch;
365
-
485
+ info->aio_max_batch = iothread->parent_obj.aio_max_batch;
366
-#define MAX_COMPRESS_THREADS 4
486
367
-
487
QAPI_LIST_APPEND(*tail, info);
368
-typedef ssize_t (*Qcow2CompressFunc)(void *dest, size_t dest_size,
488
return 0;
369
- const void *src, size_t src_size);
370
-typedef struct Qcow2CompressData {
371
- void *dest;
372
- size_t dest_size;
373
- const void *src;
374
- size_t src_size;
375
- ssize_t ret;
376
-
377
- Qcow2CompressFunc func;
378
-} Qcow2CompressData;
379
-
380
-static int qcow2_compress_pool_func(void *opaque)
381
-{
382
- Qcow2CompressData *data = opaque;
383
-
384
- data->ret = data->func(data->dest, data->dest_size,
385
- data->src, data->src_size);
386
-
387
- return 0;
388
-}
389
-
390
-static void qcow2_compress_complete(void *opaque, int ret)
391
-{
392
- qemu_coroutine_enter(opaque);
393
-}
394
-
395
-static ssize_t coroutine_fn
396
-qcow2_co_do_compress(BlockDriverState *bs, void *dest, size_t dest_size,
397
- const void *src, size_t src_size, Qcow2CompressFunc func)
398
-{
399
- BDRVQcow2State *s = bs->opaque;
400
- BlockAIOCB *acb;
401
- ThreadPool *pool = aio_get_thread_pool(bdrv_get_aio_context(bs));
402
- Qcow2CompressData arg = {
403
- .dest = dest,
404
- .dest_size = dest_size,
405
- .src = src,
406
- .src_size = src_size,
407
- .func = func,
408
- };
409
-
410
- while (s->nb_compress_threads >= MAX_COMPRESS_THREADS) {
411
- qemu_co_queue_wait(&s->compress_wait_queue, NULL);
412
- }
413
-
414
- s->nb_compress_threads++;
415
- acb = thread_pool_submit_aio(pool, qcow2_compress_pool_func, &arg,
416
- qcow2_compress_complete,
417
- qemu_coroutine_self());
418
-
419
- if (!acb) {
420
- s->nb_compress_threads--;
421
- return -EINVAL;
422
- }
423
- qemu_coroutine_yield();
424
- s->nb_compress_threads--;
425
- qemu_co_queue_next(&s->compress_wait_queue);
426
-
427
- return arg.ret;
428
-}
429
-
430
-static ssize_t coroutine_fn
431
-qcow2_co_compress(BlockDriverState *bs, void *dest, size_t dest_size,
432
- const void *src, size_t src_size)
433
-{
434
- return qcow2_co_do_compress(bs, dest, dest_size, src, src_size,
435
- qcow2_compress);
436
-}
437
-
438
-static ssize_t coroutine_fn
439
-qcow2_co_decompress(BlockDriverState *bs, void *dest, size_t dest_size,
440
- const void *src, size_t src_size)
441
-{
442
- return qcow2_co_do_compress(bs, dest, dest_size, src, src_size,
443
- qcow2_decompress);
444
-}
445
-
446
/* XXX: put compressed sectors first, then all the cluster aligned
447
tables to avoid losing bytes in alignment */
448
static coroutine_fn int
449
--
489
--
450
2.21.0
490
2.35.1
451
452
diff view generated by jsdifflib
Deleted patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
1
3
Use thread_pool_submit_co, instead of reinventing it here. Note, that
4
thread_pool_submit_aio() never returns NULL, so checking it was an
5
extra thing.
6
7
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
8
Reviewed-by: Alberto Garcia <berto@igalia.com>
9
Reviewed-by: Max Reitz <mreitz@redhat.com>
10
Message-id: 20190506142741.41731-4-vsementsov@virtuozzo.com
11
Signed-off-by: Max Reitz <mreitz@redhat.com>
12
---
13
block/qcow2-threads.c | 17 ++---------------
14
1 file changed, 2 insertions(+), 15 deletions(-)
15
16
diff --git a/block/qcow2-threads.c b/block/qcow2-threads.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/block/qcow2-threads.c
19
+++ b/block/qcow2-threads.c
20
@@ -XXX,XX +XXX,XX @@ static int qcow2_compress_pool_func(void *opaque)
21
return 0;
22
}
23
24
-static void qcow2_compress_complete(void *opaque, int ret)
25
-{
26
- qemu_coroutine_enter(opaque);
27
-}
28
-
29
static ssize_t coroutine_fn
30
qcow2_co_do_compress(BlockDriverState *bs, void *dest, size_t dest_size,
31
const void *src, size_t src_size, Qcow2CompressFunc func)
32
{
33
BDRVQcow2State *s = bs->opaque;
34
- BlockAIOCB *acb;
35
ThreadPool *pool = aio_get_thread_pool(bdrv_get_aio_context(bs));
36
Qcow2CompressData arg = {
37
.dest = dest,
38
@@ -XXX,XX +XXX,XX @@ qcow2_co_do_compress(BlockDriverState *bs, void *dest, size_t dest_size,
39
}
40
41
s->nb_compress_threads++;
42
- acb = thread_pool_submit_aio(pool, qcow2_compress_pool_func, &arg,
43
- qcow2_compress_complete,
44
- qemu_coroutine_self());
45
-
46
- if (!acb) {
47
- s->nb_compress_threads--;
48
- return -EINVAL;
49
- }
50
- qemu_coroutine_yield();
51
+ thread_pool_submit_co(pool, qcow2_compress_pool_func, &arg);
52
s->nb_compress_threads--;
53
+
54
qemu_co_queue_next(&s->compress_wait_queue);
55
56
return arg.ret;
57
--
58
2.21.0
59
60
diff view generated by jsdifflib
Deleted patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
1
3
Drop dependence on AioContext lock.
4
5
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
6
Reviewed-by: Alberto Garcia <berto@igalia.com>
7
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
8
Reviewed-by: Max Reitz <mreitz@redhat.com>
9
Message-id: 20190506142741.41731-5-vsementsov@virtuozzo.com
10
Signed-off-by: Max Reitz <mreitz@redhat.com>
11
---
12
block/qcow2-threads.c | 10 +++++++---
13
1 file changed, 7 insertions(+), 3 deletions(-)
14
15
diff --git a/block/qcow2-threads.c b/block/qcow2-threads.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/block/qcow2-threads.c
18
+++ b/block/qcow2-threads.c
19
@@ -XXX,XX +XXX,XX @@ qcow2_co_do_compress(BlockDriverState *bs, void *dest, size_t dest_size,
20
.func = func,
21
};
22
23
+ qemu_co_mutex_lock(&s->lock);
24
while (s->nb_compress_threads >= MAX_COMPRESS_THREADS) {
25
- qemu_co_queue_wait(&s->compress_wait_queue, NULL);
26
+ qemu_co_queue_wait(&s->compress_wait_queue, &s->lock);
27
}
28
-
29
s->nb_compress_threads++;
30
+ qemu_co_mutex_unlock(&s->lock);
31
+
32
thread_pool_submit_co(pool, qcow2_compress_pool_func, &arg);
33
- s->nb_compress_threads--;
34
35
+ qemu_co_mutex_lock(&s->lock);
36
+ s->nb_compress_threads--;
37
qemu_co_queue_next(&s->compress_wait_queue);
38
+ qemu_co_mutex_unlock(&s->lock);
39
40
return arg.ret;
41
}
42
--
43
2.21.0
44
45
diff view generated by jsdifflib
Deleted patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
1
3
Move generic part out of qcow2_co_do_compress, to reuse it for
4
encryption and rename things that would be shared with encryption path.
5
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
7
Reviewed-by: Alberto Garcia <berto@igalia.com>
8
Reviewed-by: Max Reitz <mreitz@redhat.com>
9
Message-id: 20190506142741.41731-6-vsementsov@virtuozzo.com
10
Signed-off-by: Max Reitz <mreitz@redhat.com>
11
---
12
block/qcow2.h | 4 ++--
13
block/qcow2-threads.c | 47 ++++++++++++++++++++++++++++---------------
14
block/qcow2.c | 2 +-
15
3 files changed, 34 insertions(+), 19 deletions(-)
16
17
diff --git a/block/qcow2.h b/block/qcow2.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/block/qcow2.h
20
+++ b/block/qcow2.h
21
@@ -XXX,XX +XXX,XX @@ typedef struct BDRVQcow2State {
22
char *image_backing_format;
23
char *image_data_file;
24
25
- CoQueue compress_wait_queue;
26
- int nb_compress_threads;
27
+ CoQueue thread_task_queue;
28
+ int nb_threads;
29
30
BdrvChild *data_file;
31
} BDRVQcow2State;
32
diff --git a/block/qcow2-threads.c b/block/qcow2-threads.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/block/qcow2-threads.c
35
+++ b/block/qcow2-threads.c
36
@@ -XXX,XX +XXX,XX @@
37
#include "qcow2.h"
38
#include "block/thread-pool.h"
39
40
-#define MAX_COMPRESS_THREADS 4
41
+#define QCOW2_MAX_THREADS 4
42
+
43
+static int coroutine_fn
44
+qcow2_co_process(BlockDriverState *bs, ThreadPoolFunc *func, void *arg)
45
+{
46
+ int ret;
47
+ BDRVQcow2State *s = bs->opaque;
48
+ ThreadPool *pool = aio_get_thread_pool(bdrv_get_aio_context(bs));
49
+
50
+ qemu_co_mutex_lock(&s->lock);
51
+ while (s->nb_threads >= QCOW2_MAX_THREADS) {
52
+ qemu_co_queue_wait(&s->thread_task_queue, &s->lock);
53
+ }
54
+ s->nb_threads++;
55
+ qemu_co_mutex_unlock(&s->lock);
56
+
57
+ ret = thread_pool_submit_co(pool, func, arg);
58
+
59
+ qemu_co_mutex_lock(&s->lock);
60
+ s->nb_threads--;
61
+ qemu_co_queue_next(&s->thread_task_queue);
62
+ qemu_co_mutex_unlock(&s->lock);
63
+
64
+ return ret;
65
+}
66
+
67
+
68
+/*
69
+ * Compression
70
+ */
71
72
typedef ssize_t (*Qcow2CompressFunc)(void *dest, size_t dest_size,
73
const void *src, size_t src_size);
74
@@ -XXX,XX +XXX,XX @@ static ssize_t coroutine_fn
75
qcow2_co_do_compress(BlockDriverState *bs, void *dest, size_t dest_size,
76
const void *src, size_t src_size, Qcow2CompressFunc func)
77
{
78
- BDRVQcow2State *s = bs->opaque;
79
- ThreadPool *pool = aio_get_thread_pool(bdrv_get_aio_context(bs));
80
Qcow2CompressData arg = {
81
.dest = dest,
82
.dest_size = dest_size,
83
@@ -XXX,XX +XXX,XX @@ qcow2_co_do_compress(BlockDriverState *bs, void *dest, size_t dest_size,
84
.func = func,
85
};
86
87
- qemu_co_mutex_lock(&s->lock);
88
- while (s->nb_compress_threads >= MAX_COMPRESS_THREADS) {
89
- qemu_co_queue_wait(&s->compress_wait_queue, &s->lock);
90
- }
91
- s->nb_compress_threads++;
92
- qemu_co_mutex_unlock(&s->lock);
93
-
94
- thread_pool_submit_co(pool, qcow2_compress_pool_func, &arg);
95
-
96
- qemu_co_mutex_lock(&s->lock);
97
- s->nb_compress_threads--;
98
- qemu_co_queue_next(&s->compress_wait_queue);
99
- qemu_co_mutex_unlock(&s->lock);
100
+ qcow2_co_process(bs, qcow2_compress_pool_func, &arg);
101
102
return arg.ret;
103
}
104
diff --git a/block/qcow2.c b/block/qcow2.c
105
index XXXXXXX..XXXXXXX 100644
106
--- a/block/qcow2.c
107
+++ b/block/qcow2.c
108
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn qcow2_do_open(BlockDriverState *bs, QDict *options,
109
}
110
#endif
111
112
- qemu_co_queue_init(&s->compress_wait_queue);
113
+ qemu_co_queue_init(&s->thread_task_queue);
114
115
return ret;
116
117
--
118
2.21.0
119
120
diff view generated by jsdifflib
Deleted patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
1
3
Background: decryption will be done in threads, to take benefit of it,
4
we should move it out of the lock first.
5
6
But let's go further: it turns out, that only
7
qcow2_get_cluster_offset() needs locking, so reduce locking to it.
8
9
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
10
Message-id: 20190506142741.41731-7-vsementsov@virtuozzo.com
11
Reviewed-by: Alberto Garcia <berto@igalia.com>
12
Signed-off-by: Max Reitz <mreitz@redhat.com>
13
---
14
block/qcow2.c | 12 ++----------
15
1 file changed, 2 insertions(+), 10 deletions(-)
16
17
diff --git a/block/qcow2.c b/block/qcow2.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/block/qcow2.c
20
+++ b/block/qcow2.c
21
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow2_co_preadv(BlockDriverState *bs, uint64_t offset,
22
23
qemu_iovec_init(&hd_qiov, qiov->niov);
24
25
- qemu_co_mutex_lock(&s->lock);
26
-
27
while (bytes != 0) {
28
29
/* prepare next request */
30
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow2_co_preadv(BlockDriverState *bs, uint64_t offset,
31
QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size);
32
}
33
34
+ qemu_co_mutex_lock(&s->lock);
35
ret = qcow2_get_cluster_offset(bs, offset, &cur_bytes, &cluster_offset);
36
+ qemu_co_mutex_unlock(&s->lock);
37
if (ret < 0) {
38
goto fail;
39
}
40
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow2_co_preadv(BlockDriverState *bs, uint64_t offset,
41
42
if (bs->backing) {
43
BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO);
44
- qemu_co_mutex_unlock(&s->lock);
45
ret = bdrv_co_preadv(bs->backing, offset, cur_bytes,
46
&hd_qiov, 0);
47
- qemu_co_mutex_lock(&s->lock);
48
if (ret < 0) {
49
goto fail;
50
}
51
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow2_co_preadv(BlockDriverState *bs, uint64_t offset,
52
break;
53
54
case QCOW2_CLUSTER_COMPRESSED:
55
- qemu_co_mutex_unlock(&s->lock);
56
ret = qcow2_co_preadv_compressed(bs, cluster_offset,
57
offset, cur_bytes,
58
&hd_qiov);
59
- qemu_co_mutex_lock(&s->lock);
60
if (ret < 0) {
61
goto fail;
62
}
63
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow2_co_preadv(BlockDriverState *bs, uint64_t offset,
64
}
65
66
BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
67
- qemu_co_mutex_unlock(&s->lock);
68
ret = bdrv_co_preadv(s->data_file,
69
cluster_offset + offset_in_cluster,
70
cur_bytes, &hd_qiov, 0);
71
- qemu_co_mutex_lock(&s->lock);
72
if (ret < 0) {
73
goto fail;
74
}
75
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow2_co_preadv(BlockDriverState *bs, uint64_t offset,
76
ret = 0;
77
78
fail:
79
- qemu_co_mutex_unlock(&s->lock);
80
-
81
qemu_iovec_destroy(&hd_qiov);
82
qemu_vfree(cluster_data);
83
84
--
85
2.21.0
86
87
diff view generated by jsdifflib
Deleted patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
1
3
Encryption will be done in threads, to take benefit of it, we should
4
move it out of the lock first.
5
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
7
Reviewed-by: Alberto Garcia <berto@igalia.com>
8
Reviewed-by: Max Reitz <mreitz@redhat.com>
9
Message-id: 20190506142741.41731-8-vsementsov@virtuozzo.com
10
Signed-off-by: Max Reitz <mreitz@redhat.com>
11
---
12
block/qcow2.c | 35 +++++++++++++++++++++--------------
13
1 file changed, 21 insertions(+), 14 deletions(-)
14
15
diff --git a/block/qcow2.c b/block/qcow2.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/block/qcow2.c
18
+++ b/block/qcow2.c
19
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow2_co_pwritev(BlockDriverState *bs, uint64_t offset,
20
ret = qcow2_alloc_cluster_offset(bs, offset, &cur_bytes,
21
&cluster_offset, &l2meta);
22
if (ret < 0) {
23
- goto fail;
24
+ goto out_locked;
25
}
26
27
assert((cluster_offset & 511) == 0);
28
29
+ ret = qcow2_pre_write_overlap_check(bs, 0,
30
+ cluster_offset + offset_in_cluster,
31
+ cur_bytes, true);
32
+ if (ret < 0) {
33
+ goto out_locked;
34
+ }
35
+
36
+ qemu_co_mutex_unlock(&s->lock);
37
+
38
qemu_iovec_reset(&hd_qiov);
39
qemu_iovec_concat(&hd_qiov, qiov, bytes_done, cur_bytes);
40
41
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow2_co_pwritev(BlockDriverState *bs, uint64_t offset,
42
* s->cluster_size);
43
if (cluster_data == NULL) {
44
ret = -ENOMEM;
45
- goto fail;
46
+ goto out_unlocked;
47
}
48
}
49
50
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow2_co_pwritev(BlockDriverState *bs, uint64_t offset,
51
cluster_data,
52
cur_bytes, NULL) < 0) {
53
ret = -EIO;
54
- goto fail;
55
+ goto out_unlocked;
56
}
57
58
qemu_iovec_reset(&hd_qiov);
59
qemu_iovec_add(&hd_qiov, cluster_data, cur_bytes);
60
}
61
62
- ret = qcow2_pre_write_overlap_check(bs, 0,
63
- cluster_offset + offset_in_cluster, cur_bytes, true);
64
- if (ret < 0) {
65
- goto fail;
66
- }
67
-
68
/* If we need to do COW, check if it's possible to merge the
69
* writing of the guest data together with that of the COW regions.
70
* If it's not possible (or not necessary) then write the
71
* guest data now. */
72
if (!merge_cow(offset, cur_bytes, &hd_qiov, l2meta)) {
73
- qemu_co_mutex_unlock(&s->lock);
74
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
75
trace_qcow2_writev_data(qemu_coroutine_self(),
76
cluster_offset + offset_in_cluster);
77
ret = bdrv_co_pwritev(s->data_file,
78
cluster_offset + offset_in_cluster,
79
cur_bytes, &hd_qiov, 0);
80
- qemu_co_mutex_lock(&s->lock);
81
if (ret < 0) {
82
- goto fail;
83
+ goto out_unlocked;
84
}
85
}
86
87
+ qemu_co_mutex_lock(&s->lock);
88
+
89
ret = qcow2_handle_l2meta(bs, &l2meta, true);
90
if (ret) {
91
- goto fail;
92
+ goto out_locked;
93
}
94
95
bytes -= cur_bytes;
96
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow2_co_pwritev(BlockDriverState *bs, uint64_t offset,
97
trace_qcow2_writev_done_part(qemu_coroutine_self(), cur_bytes);
98
}
99
ret = 0;
100
+ goto out_locked;
101
102
-fail:
103
+out_unlocked:
104
+ qemu_co_mutex_lock(&s->lock);
105
+
106
+out_locked:
107
qcow2_handle_l2meta(bs, &l2meta, false);
108
109
qemu_co_mutex_unlock(&s->lock);
110
--
111
2.21.0
112
113
diff view generated by jsdifflib
Deleted patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
1
3
Do encryption/decryption in threads, like it is already done for
4
compression. This improves asynchronous encrypted io.
5
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
7
Reviewed-by: Alberto Garcia <berto@igalia.com>
8
Reviewed-by: Max Reitz <mreitz@redhat.com>
9
Message-id: 20190506142741.41731-9-vsementsov@virtuozzo.com
10
Signed-off-by: Max Reitz <mreitz@redhat.com>
11
---
12
block/qcow2.h | 8 ++++++
13
block/qcow2-cluster.c | 7 ++---
14
block/qcow2-threads.c | 65 +++++++++++++++++++++++++++++++++++++++++--
15
block/qcow2.c | 22 +++++----------
16
4 files changed, 81 insertions(+), 21 deletions(-)
17
18
diff --git a/block/qcow2.h b/block/qcow2.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/block/qcow2.h
21
+++ b/block/qcow2.h
22
@@ -XXX,XX +XXX,XX @@ typedef struct Qcow2BitmapHeaderExt {
23
uint64_t bitmap_directory_offset;
24
} QEMU_PACKED Qcow2BitmapHeaderExt;
25
26
+#define QCOW2_MAX_THREADS 4
27
+
28
typedef struct BDRVQcow2State {
29
int cluster_bits;
30
int cluster_size;
31
@@ -XXX,XX +XXX,XX @@ qcow2_co_compress(BlockDriverState *bs, void *dest, size_t dest_size,
32
ssize_t coroutine_fn
33
qcow2_co_decompress(BlockDriverState *bs, void *dest, size_t dest_size,
34
const void *src, size_t src_size);
35
+int coroutine_fn
36
+qcow2_co_encrypt(BlockDriverState *bs, uint64_t file_cluster_offset,
37
+ uint64_t offset, void *buf, size_t len);
38
+int coroutine_fn
39
+qcow2_co_decrypt(BlockDriverState *bs, uint64_t file_cluster_offset,
40
+ uint64_t offset, void *buf, size_t len);
41
42
#endif
43
diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/block/qcow2-cluster.c
46
+++ b/block/qcow2-cluster.c
47
@@ -XXX,XX +XXX,XX @@ static bool coroutine_fn do_perform_cow_encrypt(BlockDriverState *bs,
48
{
49
if (bytes && bs->encrypted) {
50
BDRVQcow2State *s = bs->opaque;
51
- int64_t offset = (s->crypt_physical_offset ?
52
- (cluster_offset + offset_in_cluster) :
53
- (src_cluster_offset + offset_in_cluster));
54
assert((offset_in_cluster & ~BDRV_SECTOR_MASK) == 0);
55
assert((bytes & ~BDRV_SECTOR_MASK) == 0);
56
assert(s->crypto);
57
- if (qcrypto_block_encrypt(s->crypto, offset, buffer, bytes, NULL) < 0) {
58
+ if (qcow2_co_encrypt(bs, cluster_offset,
59
+ src_cluster_offset + offset_in_cluster,
60
+ buffer, bytes) < 0) {
61
return false;
62
}
63
}
64
diff --git a/block/qcow2-threads.c b/block/qcow2-threads.c
65
index XXXXXXX..XXXXXXX 100644
66
--- a/block/qcow2-threads.c
67
+++ b/block/qcow2-threads.c
68
@@ -XXX,XX +XXX,XX @@
69
70
#include "qcow2.h"
71
#include "block/thread-pool.h"
72
-
73
-#define QCOW2_MAX_THREADS 4
74
+#include "crypto.h"
75
76
static int coroutine_fn
77
qcow2_co_process(BlockDriverState *bs, ThreadPoolFunc *func, void *arg)
78
@@ -XXX,XX +XXX,XX @@ qcow2_co_decompress(BlockDriverState *bs, void *dest, size_t dest_size,
79
return qcow2_co_do_compress(bs, dest, dest_size, src, src_size,
80
qcow2_decompress);
81
}
82
+
83
+
84
+/*
85
+ * Cryptography
86
+ */
87
+
88
+/*
89
+ * Qcow2EncDecFunc: common prototype of qcrypto_block_encrypt() and
90
+ * qcrypto_block_decrypt() functions.
91
+ */
92
+typedef int (*Qcow2EncDecFunc)(QCryptoBlock *block, uint64_t offset,
93
+ uint8_t *buf, size_t len, Error **errp);
94
+
95
+typedef struct Qcow2EncDecData {
96
+ QCryptoBlock *block;
97
+ uint64_t offset;
98
+ uint8_t *buf;
99
+ size_t len;
100
+
101
+ Qcow2EncDecFunc func;
102
+} Qcow2EncDecData;
103
+
104
+static int qcow2_encdec_pool_func(void *opaque)
105
+{
106
+ Qcow2EncDecData *data = opaque;
107
+
108
+ return data->func(data->block, data->offset, data->buf, data->len, NULL);
109
+}
110
+
111
+static int coroutine_fn
112
+qcow2_co_encdec(BlockDriverState *bs, uint64_t file_cluster_offset,
113
+ uint64_t offset, void *buf, size_t len, Qcow2EncDecFunc func)
114
+{
115
+ BDRVQcow2State *s = bs->opaque;
116
+ Qcow2EncDecData arg = {
117
+ .block = s->crypto,
118
+ .offset = s->crypt_physical_offset ?
119
+ file_cluster_offset + offset_into_cluster(s, offset) :
120
+ offset,
121
+ .buf = buf,
122
+ .len = len,
123
+ .func = func,
124
+ };
125
+
126
+ return qcow2_co_process(bs, qcow2_encdec_pool_func, &arg);
127
+}
128
+
129
+int coroutine_fn
130
+qcow2_co_encrypt(BlockDriverState *bs, uint64_t file_cluster_offset,
131
+ uint64_t offset, void *buf, size_t len)
132
+{
133
+ return qcow2_co_encdec(bs, file_cluster_offset, offset, buf, len,
134
+ qcrypto_block_encrypt);
135
+}
136
+
137
+int coroutine_fn
138
+qcow2_co_decrypt(BlockDriverState *bs, uint64_t file_cluster_offset,
139
+ uint64_t offset, void *buf, size_t len)
140
+{
141
+ return qcow2_co_encdec(bs, file_cluster_offset, offset, buf, len,
142
+ qcrypto_block_decrypt);
143
+}
144
diff --git a/block/qcow2.c b/block/qcow2.c
145
index XXXXXXX..XXXXXXX 100644
146
--- a/block/qcow2.c
147
+++ b/block/qcow2.c
148
@@ -XXX,XX +XXX,XX @@ static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset,
149
}
150
s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.",
151
qcow2_crypto_hdr_read_func,
152
- bs, cflags, 1, errp);
153
+ bs, cflags, QCOW2_MAX_THREADS, errp);
154
if (!s->crypto) {
155
return -EINVAL;
156
}
157
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn qcow2_do_open(BlockDriverState *bs, QDict *options,
158
cflags |= QCRYPTO_BLOCK_OPEN_NO_IO;
159
}
160
s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.",
161
- NULL, NULL, cflags, 1, errp);
162
+ NULL, NULL, cflags,
163
+ QCOW2_MAX_THREADS, errp);
164
if (!s->crypto) {
165
ret = -EINVAL;
166
goto fail;
167
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow2_co_preadv(BlockDriverState *bs, uint64_t offset,
168
assert(s->crypto);
169
assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
170
assert((cur_bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
171
- if (qcrypto_block_decrypt(s->crypto,
172
- (s->crypt_physical_offset ?
173
- cluster_offset + offset_in_cluster :
174
- offset),
175
- cluster_data,
176
- cur_bytes,
177
- NULL) < 0) {
178
+ if (qcow2_co_decrypt(bs, cluster_offset, offset,
179
+ cluster_data, cur_bytes) < 0) {
180
ret = -EIO;
181
goto fail;
182
}
183
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow2_co_pwritev(BlockDriverState *bs, uint64_t offset,
184
QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size);
185
qemu_iovec_to_buf(&hd_qiov, 0, cluster_data, hd_qiov.size);
186
187
- if (qcrypto_block_encrypt(s->crypto,
188
- (s->crypt_physical_offset ?
189
- cluster_offset + offset_in_cluster :
190
- offset),
191
- cluster_data,
192
- cur_bytes, NULL) < 0) {
193
+ if (qcow2_co_encrypt(bs, cluster_offset, offset,
194
+ cluster_data, cur_bytes) < 0) {
195
ret = -EIO;
196
goto out_unlocked;
197
}
198
--
199
2.21.0
200
201
diff view generated by jsdifflib
Deleted patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
1
3
Simplify backup_incremental_init_copy_bitmap using the function
4
bdrv_dirty_bitmap_next_dirty_area.
5
6
Note: move to job->len instead of bitmap size: it should not matter but
7
less code.
8
9
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
10
Reviewed-by: Max Reitz <mreitz@redhat.com>
11
Message-id: 20190429090842.57910-2-vsementsov@virtuozzo.com
12
Signed-off-by: Max Reitz <mreitz@redhat.com>
13
---
14
block/backup.c | 40 ++++++++++++----------------------------
15
1 file changed, 12 insertions(+), 28 deletions(-)
16
17
diff --git a/block/backup.c b/block/backup.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/block/backup.c
20
+++ b/block/backup.c
21
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn backup_run_incremental(BackupBlockJob *job)
22
/* init copy_bitmap from sync_bitmap */
23
static void backup_incremental_init_copy_bitmap(BackupBlockJob *job)
24
{
25
- BdrvDirtyBitmapIter *dbi;
26
- int64_t offset;
27
- int64_t end = DIV_ROUND_UP(bdrv_dirty_bitmap_size(job->sync_bitmap),
28
- job->cluster_size);
29
-
30
- dbi = bdrv_dirty_iter_new(job->sync_bitmap);
31
- while ((offset = bdrv_dirty_iter_next(dbi)) != -1) {
32
- int64_t cluster = offset / job->cluster_size;
33
- int64_t next_cluster;
34
-
35
- offset += bdrv_dirty_bitmap_granularity(job->sync_bitmap);
36
- if (offset >= bdrv_dirty_bitmap_size(job->sync_bitmap)) {
37
- hbitmap_set(job->copy_bitmap, cluster, end - cluster);
38
- break;
39
- }
40
+ uint64_t offset = 0;
41
+ uint64_t bytes = job->len;
42
43
- offset = bdrv_dirty_bitmap_next_zero(job->sync_bitmap, offset,
44
- UINT64_MAX);
45
- if (offset == -1) {
46
- hbitmap_set(job->copy_bitmap, cluster, end - cluster);
47
- break;
48
- }
49
+ while (bdrv_dirty_bitmap_next_dirty_area(job->sync_bitmap,
50
+ &offset, &bytes))
51
+ {
52
+ uint64_t cluster = offset / job->cluster_size;
53
+ uint64_t end_cluster = DIV_ROUND_UP(offset + bytes, job->cluster_size);
54
55
- next_cluster = DIV_ROUND_UP(offset, job->cluster_size);
56
- hbitmap_set(job->copy_bitmap, cluster, next_cluster - cluster);
57
- if (next_cluster >= end) {
58
+ hbitmap_set(job->copy_bitmap, cluster, end_cluster - cluster);
59
+
60
+ offset = end_cluster * job->cluster_size;
61
+ if (offset >= job->len) {
62
break;
63
}
64
-
65
- bdrv_set_dirty_iter(dbi, next_cluster * job->cluster_size);
66
+ bytes = job->len - offset;
67
}
68
69
/* TODO job_progress_set_remaining() would make more sense */
70
job_progress_update(&job->common.job,
71
job->len - hbitmap_count(job->copy_bitmap) * job->cluster_size);
72
-
73
- bdrv_dirty_iter_free(dbi);
74
}
75
76
static int coroutine_fn backup_run(Job *job, Error **errp)
77
--
78
2.21.0
79
80
diff view generated by jsdifflib
Deleted patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
1
3
We are going to share this bitmap between backup and backup-top filter
4
driver, so let's share something more meaningful. It also simplifies
5
some calculations.
6
7
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
8
Reviewed-by: Max Reitz <mreitz@redhat.com>
9
Message-id: 20190429090842.57910-3-vsementsov@virtuozzo.com
10
Signed-off-by: Max Reitz <mreitz@redhat.com>
11
---
12
block/backup.c | 48 +++++++++++++++++++++++-------------------------
13
1 file changed, 23 insertions(+), 25 deletions(-)
14
15
diff --git a/block/backup.c b/block/backup.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/block/backup.c
18
+++ b/block/backup.c
19
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job,
20
int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0;
21
int write_flags = job->serialize_target_writes ? BDRV_REQ_SERIALISING : 0;
22
23
- hbitmap_reset(job->copy_bitmap, start / job->cluster_size, 1);
24
+ assert(QEMU_IS_ALIGNED(start, job->cluster_size));
25
+ hbitmap_reset(job->copy_bitmap, start, job->cluster_size);
26
nbytes = MIN(job->cluster_size, job->len - start);
27
if (!*bounce_buffer) {
28
*bounce_buffer = blk_blockalign(blk, job->cluster_size);
29
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job,
30
31
return nbytes;
32
fail:
33
- hbitmap_set(job->copy_bitmap, start / job->cluster_size, 1);
34
+ hbitmap_set(job->copy_bitmap, start, job->cluster_size);
35
return ret;
36
37
}
38
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn backup_cow_with_offload(BackupBlockJob *job,
39
int write_flags = job->serialize_target_writes ? BDRV_REQ_SERIALISING : 0;
40
41
assert(QEMU_IS_ALIGNED(job->copy_range_size, job->cluster_size));
42
+ assert(QEMU_IS_ALIGNED(start, job->cluster_size));
43
nbytes = MIN(job->copy_range_size, end - start);
44
nr_clusters = DIV_ROUND_UP(nbytes, job->cluster_size);
45
- hbitmap_reset(job->copy_bitmap, start / job->cluster_size,
46
- nr_clusters);
47
+ hbitmap_reset(job->copy_bitmap, start, job->cluster_size * nr_clusters);
48
ret = blk_co_copy_range(blk, start, job->target, start, nbytes,
49
read_flags, write_flags);
50
if (ret < 0) {
51
trace_backup_do_cow_copy_range_fail(job, start, ret);
52
- hbitmap_set(job->copy_bitmap, start / job->cluster_size,
53
- nr_clusters);
54
+ hbitmap_set(job->copy_bitmap, start, job->cluster_size * nr_clusters);
55
return ret;
56
}
57
58
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
59
cow_request_begin(&cow_request, job, start, end);
60
61
while (start < end) {
62
- if (!hbitmap_get(job->copy_bitmap, start / job->cluster_size)) {
63
+ if (!hbitmap_get(job->copy_bitmap, start)) {
64
trace_backup_do_cow_skip(job, start);
65
start += job->cluster_size;
66
continue; /* already copied */
67
@@ -XXX,XX +XXX,XX @@ static void backup_clean(Job *job)
68
assert(s->target);
69
blk_unref(s->target);
70
s->target = NULL;
71
+
72
+ if (s->copy_bitmap) {
73
+ hbitmap_free(s->copy_bitmap);
74
+ s->copy_bitmap = NULL;
75
+ }
76
}
77
78
void backup_do_checkpoint(BlockJob *job, Error **errp)
79
{
80
BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common);
81
- int64_t len;
82
83
assert(block_job_driver(job) == &backup_job_driver);
84
85
@@ -XXX,XX +XXX,XX @@ void backup_do_checkpoint(BlockJob *job, Error **errp)
86
return;
87
}
88
89
- len = DIV_ROUND_UP(backup_job->len, backup_job->cluster_size);
90
- hbitmap_set(backup_job->copy_bitmap, 0, len);
91
+ hbitmap_set(backup_job->copy_bitmap, 0, backup_job->len);
92
}
93
94
static void backup_drain(BlockJob *job)
95
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn backup_run_incremental(BackupBlockJob *job)
96
{
97
int ret;
98
bool error_is_read;
99
- int64_t cluster;
100
+ int64_t offset;
101
HBitmapIter hbi;
102
103
hbitmap_iter_init(&hbi, job->copy_bitmap, 0);
104
- while ((cluster = hbitmap_iter_next(&hbi)) != -1) {
105
+ while ((offset = hbitmap_iter_next(&hbi)) != -1) {
106
do {
107
if (yield_and_check(job)) {
108
return 0;
109
}
110
- ret = backup_do_cow(job, cluster * job->cluster_size,
111
+ ret = backup_do_cow(job, offset,
112
job->cluster_size, &error_is_read, false);
113
if (ret < 0 && backup_error_action(job, error_is_read, -ret) ==
114
BLOCK_ERROR_ACTION_REPORT)
115
@@ -XXX,XX +XXX,XX @@ static void backup_incremental_init_copy_bitmap(BackupBlockJob *job)
116
while (bdrv_dirty_bitmap_next_dirty_area(job->sync_bitmap,
117
&offset, &bytes))
118
{
119
- uint64_t cluster = offset / job->cluster_size;
120
- uint64_t end_cluster = DIV_ROUND_UP(offset + bytes, job->cluster_size);
121
+ hbitmap_set(job->copy_bitmap, offset, bytes);
122
123
- hbitmap_set(job->copy_bitmap, cluster, end_cluster - cluster);
124
-
125
- offset = end_cluster * job->cluster_size;
126
+ offset += bytes;
127
if (offset >= job->len) {
128
break;
129
}
130
@@ -XXX,XX +XXX,XX @@ static void backup_incremental_init_copy_bitmap(BackupBlockJob *job)
131
132
/* TODO job_progress_set_remaining() would make more sense */
133
job_progress_update(&job->common.job,
134
- job->len - hbitmap_count(job->copy_bitmap) * job->cluster_size);
135
+ job->len - hbitmap_count(job->copy_bitmap));
136
}
137
138
static int coroutine_fn backup_run(Job *job, Error **errp)
139
{
140
BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
141
BlockDriverState *bs = blk_bs(s->common.blk);
142
- int64_t offset, nb_clusters;
143
+ int64_t offset;
144
int ret = 0;
145
146
QLIST_INIT(&s->inflight_reqs);
147
qemu_co_rwlock_init(&s->flush_rwlock);
148
149
- nb_clusters = DIV_ROUND_UP(s->len, s->cluster_size);
150
job_progress_set_remaining(job, s->len);
151
152
- s->copy_bitmap = hbitmap_alloc(nb_clusters, 0);
153
if (s->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
154
backup_incremental_init_copy_bitmap(s);
155
} else {
156
- hbitmap_set(s->copy_bitmap, 0, nb_clusters);
157
+ hbitmap_set(s->copy_bitmap, 0, s->len);
158
}
159
160
-
161
s->before_write.notify = backup_before_write_notify;
162
bdrv_add_before_write_notifier(bs, &s->before_write);
163
164
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn backup_run(Job *job, Error **errp)
165
/* wait until pending backup_do_cow() calls have completed */
166
qemu_co_rwlock_wrlock(&s->flush_rwlock);
167
qemu_co_rwlock_unlock(&s->flush_rwlock);
168
- hbitmap_free(s->copy_bitmap);
169
170
return ret;
171
}
172
@@ -XXX,XX +XXX,XX @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
173
} else {
174
job->cluster_size = MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size);
175
}
176
+
177
+ job->copy_bitmap = hbitmap_alloc(len, ctz32(job->cluster_size));
178
job->use_copy_range = true;
179
job->copy_range_size = MIN_NON_ZERO(blk_get_max_transfer(job->common.blk),
180
blk_get_max_transfer(job->target));
181
--
182
2.21.0
183
184
diff view generated by jsdifflib
Deleted patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
1
3
Split allocation checking to separate function and reduce nesting.
4
Consider bdrv_is_allocated() fail as allocated area, as copying more
5
than needed is not wrong (and we do it anyway) and seems better than
6
fail the whole job. And, most probably we will fail on the next read,
7
if there are real problem with source.
8
9
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
10
Reviewed-by: Max Reitz <mreitz@redhat.com>
11
Message-id: 20190429090842.57910-4-vsementsov@virtuozzo.com
12
Signed-off-by: Max Reitz <mreitz@redhat.com>
13
---
14
block/backup.c | 60 +++++++++++++++++++-------------------------------
15
1 file changed, 23 insertions(+), 37 deletions(-)
16
17
diff --git a/block/backup.c b/block/backup.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/block/backup.c
20
+++ b/block/backup.c
21
@@ -XXX,XX +XXX,XX @@ static bool coroutine_fn yield_and_check(BackupBlockJob *job)
22
return false;
23
}
24
25
+static bool bdrv_is_unallocated_range(BlockDriverState *bs,
26
+ int64_t offset, int64_t bytes)
27
+{
28
+ int64_t end = offset + bytes;
29
+
30
+ while (offset < end && !bdrv_is_allocated(bs, offset, bytes, &bytes)) {
31
+ if (bytes == 0) {
32
+ return true;
33
+ }
34
+ offset += bytes;
35
+ bytes = end - offset;
36
+ }
37
+
38
+ return offset >= end;
39
+}
40
+
41
static int coroutine_fn backup_run_incremental(BackupBlockJob *job)
42
{
43
int ret;
44
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn backup_run(Job *job, Error **errp)
45
for (offset = 0; offset < s->len;
46
offset += s->cluster_size) {
47
bool error_is_read;
48
- int alloced = 0;
49
50
if (yield_and_check(s)) {
51
break;
52
}
53
54
- if (s->sync_mode == MIRROR_SYNC_MODE_TOP) {
55
- int i;
56
- int64_t n;
57
-
58
- /* Check to see if these blocks are already in the
59
- * backing file. */
60
-
61
- for (i = 0; i < s->cluster_size;) {
62
- /* bdrv_is_allocated() only returns true/false based
63
- * on the first set of sectors it comes across that
64
- * are are all in the same state.
65
- * For that reason we must verify each sector in the
66
- * backup cluster length. We end up copying more than
67
- * needed but at some point that is always the case. */
68
- alloced =
69
- bdrv_is_allocated(bs, offset + i,
70
- s->cluster_size - i, &n);
71
- i += n;
72
-
73
- if (alloced || n == 0) {
74
- break;
75
- }
76
- }
77
-
78
- /* If the above loop never found any sectors that are in
79
- * the topmost image, skip this backup. */
80
- if (alloced == 0) {
81
- continue;
82
- }
83
- }
84
- /* FULL sync mode we copy the whole drive. */
85
- if (alloced < 0) {
86
- ret = alloced;
87
- } else {
88
- ret = backup_do_cow(s, offset, s->cluster_size,
89
- &error_is_read, false);
90
+ if (s->sync_mode == MIRROR_SYNC_MODE_TOP &&
91
+ bdrv_is_unallocated_range(bs, offset, s->cluster_size))
92
+ {
93
+ continue;
94
}
95
+
96
+ ret = backup_do_cow(s, offset, s->cluster_size,
97
+ &error_is_read, false);
98
if (ret < 0) {
99
/* Depending on error action, fail now or retry cluster */
100
BlockErrorAction action =
101
--
102
2.21.0
103
104
diff view generated by jsdifflib
Deleted patch
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
1
3
Do full, top and incremental mode copying all in one place. This
4
unifies the code path and helps further improvements.
5
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
7
Reviewed-by: Max Reitz <mreitz@redhat.com>
8
Message-id: 20190429090842.57910-5-vsementsov@virtuozzo.com
9
Signed-off-by: Max Reitz <mreitz@redhat.com>
10
---
11
block/backup.c | 43 ++++++++++---------------------------------
12
1 file changed, 10 insertions(+), 33 deletions(-)
13
14
diff --git a/block/backup.c b/block/backup.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/block/backup.c
17
+++ b/block/backup.c
18
@@ -XXX,XX +XXX,XX @@ static bool bdrv_is_unallocated_range(BlockDriverState *bs,
19
return offset >= end;
20
}
21
22
-static int coroutine_fn backup_run_incremental(BackupBlockJob *job)
23
+static int coroutine_fn backup_loop(BackupBlockJob *job)
24
{
25
int ret;
26
bool error_is_read;
27
int64_t offset;
28
HBitmapIter hbi;
29
+ BlockDriverState *bs = blk_bs(job->common.blk);
30
31
hbitmap_iter_init(&hbi, job->copy_bitmap, 0);
32
while ((offset = hbitmap_iter_next(&hbi)) != -1) {
33
+ if (job->sync_mode == MIRROR_SYNC_MODE_TOP &&
34
+ bdrv_is_unallocated_range(bs, offset, job->cluster_size))
35
+ {
36
+ hbitmap_reset(job->copy_bitmap, offset, job->cluster_size);
37
+ continue;
38
+ }
39
+
40
do {
41
if (yield_and_check(job)) {
42
return 0;
43
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn backup_run(Job *job, Error **errp)
44
{
45
BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
46
BlockDriverState *bs = blk_bs(s->common.blk);
47
- int64_t offset;
48
int ret = 0;
49
50
QLIST_INIT(&s->inflight_reqs);
51
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn backup_run(Job *job, Error **errp)
52
* notify callback service CoW requests. */
53
job_yield(job);
54
}
55
- } else if (s->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
56
- ret = backup_run_incremental(s);
57
} else {
58
- /* Both FULL and TOP SYNC_MODE's require copying.. */
59
- for (offset = 0; offset < s->len;
60
- offset += s->cluster_size) {
61
- bool error_is_read;
62
-
63
- if (yield_and_check(s)) {
64
- break;
65
- }
66
-
67
- if (s->sync_mode == MIRROR_SYNC_MODE_TOP &&
68
- bdrv_is_unallocated_range(bs, offset, s->cluster_size))
69
- {
70
- continue;
71
- }
72
-
73
- ret = backup_do_cow(s, offset, s->cluster_size,
74
- &error_is_read, false);
75
- if (ret < 0) {
76
- /* Depending on error action, fail now or retry cluster */
77
- BlockErrorAction action =
78
- backup_error_action(s, error_is_read, -ret);
79
- if (action == BLOCK_ERROR_ACTION_REPORT) {
80
- break;
81
- } else {
82
- offset -= s->cluster_size;
83
- continue;
84
- }
85
- }
86
- }
87
+ ret = backup_loop(s);
88
}
89
90
notifier_with_return_remove(&s->before_write);
91
--
92
2.21.0
93
94
diff view generated by jsdifflib
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
1
From: Nicolas Saenz Julienne <nsaenzju@redhat.com>
2
2
3
Split out cluster_size calculation. Move copy-bitmap creation above
3
'event-loop-base' provides basic property handling for all 'AioContext'
4
block-job creation, as we are going to share it with upcoming
4
based event loops. So let's define a new 'MainLoopClass' that inherits
5
backup-top filter, which also should be created before actual block job
5
from it. This will permit tweaking the main loop's properties through
6
creation.
6
qapi as well as through the command line using the '-object' keyword[1].
7
7
Only one instance of 'MainLoopClass' might be created at any time.
8
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
8
9
Message-id: 20190429090842.57910-6-vsementsov@virtuozzo.com
9
'EventLoopBaseClass' learns a new callback, 'can_be_deleted()' so as to
10
[mreitz: Dropped a paragraph from the commit message that was left over
10
mark 'MainLoop' as non-deletable.
11
from a previous version]
11
12
Signed-off-by: Max Reitz <mreitz@redhat.com>
12
[1] For example:
13
-object main-loop,id=main-loop,aio-max-batch=<value>
14
15
Signed-off-by: Nicolas Saenz Julienne <nsaenzju@redhat.com>
16
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
17
Acked-by: Markus Armbruster <armbru@redhat.com>
18
Message-id: 20220425075723.20019-3-nsaenzju@redhat.com
19
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
13
---
20
---
14
block/backup.c | 82 ++++++++++++++++++++++++++++++++------------------
21
qapi/qom.json | 13 ++++++++
15
1 file changed, 52 insertions(+), 30 deletions(-)
22
meson.build | 3 +-
16
23
include/qemu/main-loop.h | 10 ++++++
17
diff --git a/block/backup.c b/block/backup.c
24
include/sysemu/event-loop-base.h | 1 +
18
index XXXXXXX..XXXXXXX 100644
25
event-loop-base.c | 13 ++++++++
19
--- a/block/backup.c
26
util/main-loop.c | 56 ++++++++++++++++++++++++++++++++
20
+++ b/block/backup.c
27
6 files changed, 95 insertions(+), 1 deletion(-)
21
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver backup_job_driver = {
28
22
.drain = backup_drain,
29
diff --git a/qapi/qom.json b/qapi/qom.json
30
index XXXXXXX..XXXXXXX 100644
31
--- a/qapi/qom.json
32
+++ b/qapi/qom.json
33
@@ -XXX,XX +XXX,XX @@
34
'*poll-grow': 'int',
35
'*poll-shrink': 'int' } }
36
37
+##
38
+# @MainLoopProperties:
39
+#
40
+# Properties for the main-loop object.
41
+#
42
+# Since: 7.1
43
+##
44
+{ 'struct': 'MainLoopProperties',
45
+ 'base': 'EventLoopBaseProperties',
46
+ 'data': {} }
47
+
48
##
49
# @MemoryBackendProperties:
50
#
51
@@ -XXX,XX +XXX,XX @@
52
{ 'name': 'input-linux',
53
'if': 'CONFIG_LINUX' },
54
'iothread',
55
+ 'main-loop',
56
{ 'name': 'memory-backend-epc',
57
'if': 'CONFIG_LINUX' },
58
'memory-backend-file',
59
@@ -XXX,XX +XXX,XX @@
60
'input-linux': { 'type': 'InputLinuxProperties',
61
'if': 'CONFIG_LINUX' },
62
'iothread': 'IothreadProperties',
63
+ 'main-loop': 'MainLoopProperties',
64
'memory-backend-epc': { 'type': 'MemoryBackendEpcProperties',
65
'if': 'CONFIG_LINUX' },
66
'memory-backend-file': 'MemoryBackendFileProperties',
67
diff --git a/meson.build b/meson.build
68
index XXXXXXX..XXXXXXX 100644
69
--- a/meson.build
70
+++ b/meson.build
71
@@ -XXX,XX +XXX,XX @@ libqemuutil = static_library('qemuutil',
72
sources: util_ss.sources() + stub_ss.sources() + genh,
73
dependencies: [util_ss.dependencies(), libm, threads, glib, socket, malloc, pixman])
74
qemuutil = declare_dependency(link_with: libqemuutil,
75
- sources: genh + version_res)
76
+ sources: genh + version_res,
77
+ dependencies: [event_loop_base])
78
79
if have_system or have_user
80
decodetree = generator(find_program('scripts/decodetree.py'),
81
diff --git a/include/qemu/main-loop.h b/include/qemu/main-loop.h
82
index XXXXXXX..XXXXXXX 100644
83
--- a/include/qemu/main-loop.h
84
+++ b/include/qemu/main-loop.h
85
@@ -XXX,XX +XXX,XX @@
86
#define QEMU_MAIN_LOOP_H
87
88
#include "block/aio.h"
89
+#include "qom/object.h"
90
+#include "sysemu/event-loop-base.h"
91
92
#define SIG_IPI SIGUSR1
93
94
+#define TYPE_MAIN_LOOP "main-loop"
95
+OBJECT_DECLARE_TYPE(MainLoop, MainLoopClass, MAIN_LOOP)
96
+
97
+struct MainLoop {
98
+ EventLoopBase parent_obj;
99
+};
100
+typedef struct MainLoop MainLoop;
101
+
102
/**
103
* qemu_init_main_loop: Set up the process so that it can run the main loop.
104
*
105
diff --git a/include/sysemu/event-loop-base.h b/include/sysemu/event-loop-base.h
106
index XXXXXXX..XXXXXXX 100644
107
--- a/include/sysemu/event-loop-base.h
108
+++ b/include/sysemu/event-loop-base.h
109
@@ -XXX,XX +XXX,XX @@ struct EventLoopBaseClass {
110
111
void (*init)(EventLoopBase *base, Error **errp);
112
void (*update_params)(EventLoopBase *base, Error **errp);
113
+ bool (*can_be_deleted)(EventLoopBase *base);
23
};
114
};
24
115
25
+static int64_t backup_calculate_cluster_size(BlockDriverState *target,
116
struct EventLoopBase {
26
+ Error **errp)
117
diff --git a/event-loop-base.c b/event-loop-base.c
27
+{
118
index XXXXXXX..XXXXXXX 100644
28
+ int ret;
119
--- a/event-loop-base.c
29
+ BlockDriverInfo bdi;
120
+++ b/event-loop-base.c
30
+
121
@@ -XXX,XX +XXX,XX @@ static void event_loop_base_complete(UserCreatable *uc, Error **errp)
31
+ /*
122
}
32
+ * If there is no backing file on the target, we cannot rely on COW if our
123
}
33
+ * backup cluster size is smaller than the target cluster size. Even for
124
34
+ * targets with a backing file, try to avoid COW if possible.
125
+static bool event_loop_base_can_be_deleted(UserCreatable *uc)
35
+ */
126
+{
36
+ ret = bdrv_get_info(target, &bdi);
127
+ EventLoopBaseClass *bc = EVENT_LOOP_BASE_GET_CLASS(uc);
37
+ if (ret == -ENOTSUP && !target->backing) {
128
+ EventLoopBase *backend = EVENT_LOOP_BASE(uc);
38
+ /* Cluster size is not defined */
129
+
39
+ warn_report("The target block device doesn't provide "
130
+ if (bc->can_be_deleted) {
40
+ "information about the block size and it doesn't have a "
131
+ return bc->can_be_deleted(backend);
41
+ "backing file. The default block size of %u bytes is "
42
+ "used. If the actual block size of the target exceeds "
43
+ "this default, the backup may be unusable",
44
+ BACKUP_CLUSTER_SIZE_DEFAULT);
45
+ return BACKUP_CLUSTER_SIZE_DEFAULT;
46
+ } else if (ret < 0 && !target->backing) {
47
+ error_setg_errno(errp, -ret,
48
+ "Couldn't determine the cluster size of the target image, "
49
+ "which has no backing file");
50
+ error_append_hint(errp,
51
+ "Aborting, since this may create an unusable destination image\n");
52
+ return ret;
53
+ } else if (ret < 0 && target->backing) {
54
+ /* Not fatal; just trudge on ahead. */
55
+ return BACKUP_CLUSTER_SIZE_DEFAULT;
56
+ }
132
+ }
57
+
133
+
58
+ return MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size);
134
+ return true;
59
+}
135
+}
60
+
136
+
61
BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
137
static void event_loop_base_class_init(ObjectClass *klass, void *class_data)
62
BlockDriverState *target, int64_t speed,
63
MirrorSyncMode sync_mode, BdrvDirtyBitmap *sync_bitmap,
64
@@ -XXX,XX +XXX,XX @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
65
JobTxn *txn, Error **errp)
66
{
138
{
67
int64_t len;
139
UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass);
68
- BlockDriverInfo bdi;
140
ucc->complete = event_loop_base_complete;
69
BackupBlockJob *job = NULL;
141
+ ucc->can_be_deleted = event_loop_base_can_be_deleted;
70
int ret;
142
71
+ int64_t cluster_size;
143
object_class_property_add(klass, "aio-max-batch", "int",
72
+ HBitmap *copy_bitmap = NULL;
144
event_loop_base_get_param,
73
145
diff --git a/util/main-loop.c b/util/main-loop.c
74
assert(bs);
146
index XXXXXXX..XXXXXXX 100644
75
assert(target);
147
--- a/util/main-loop.c
76
@@ -XXX,XX +XXX,XX @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
148
+++ b/util/main-loop.c
77
goto error;
149
@@ -XXX,XX +XXX,XX @@
78
}
150
#include "qemu/error-report.h"
79
151
#include "qemu/queue.h"
80
+ cluster_size = backup_calculate_cluster_size(target, errp);
152
#include "qemu/compiler.h"
81
+ if (cluster_size < 0) {
153
+#include "qom/object.h"
82
+ goto error;
154
155
#ifndef _WIN32
156
#include <sys/wait.h>
157
@@ -XXX,XX +XXX,XX @@ int qemu_init_main_loop(Error **errp)
158
return 0;
159
}
160
161
+static void main_loop_update_params(EventLoopBase *base, Error **errp)
162
+{
163
+ if (!qemu_aio_context) {
164
+ error_setg(errp, "qemu aio context not ready");
165
+ return;
83
+ }
166
+ }
84
+
167
+
85
+ copy_bitmap = hbitmap_alloc(len, ctz32(cluster_size));
168
+ aio_context_set_aio_params(qemu_aio_context, base->aio_max_batch, errp);
86
+
169
+}
87
/* job->len is fixed, so we can't allow resize */
170
+
88
job = block_job_create(job_id, &backup_job_driver, txn, bs,
171
+MainLoop *mloop;
89
BLK_PERM_CONSISTENT_READ,
172
+
90
@@ -XXX,XX +XXX,XX @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
173
+static void main_loop_init(EventLoopBase *base, Error **errp)
91
174
+{
92
/* Detect image-fleecing (and similar) schemes */
175
+ MainLoop *m = MAIN_LOOP(base);
93
job->serialize_target_writes = bdrv_chain_contains(target, bs);
176
+
94
-
177
+ if (mloop) {
95
- /* If there is no backing file on the target, we cannot rely on COW if our
178
+ error_setg(errp, "only one main-loop instance allowed");
96
- * backup cluster size is smaller than the target cluster size. Even for
179
+ return;
97
- * targets with a backing file, try to avoid COW if possible. */
98
- ret = bdrv_get_info(target, &bdi);
99
- if (ret == -ENOTSUP && !target->backing) {
100
- /* Cluster size is not defined */
101
- warn_report("The target block device doesn't provide "
102
- "information about the block size and it doesn't have a "
103
- "backing file. The default block size of %u bytes is "
104
- "used. If the actual block size of the target exceeds "
105
- "this default, the backup may be unusable",
106
- BACKUP_CLUSTER_SIZE_DEFAULT);
107
- job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
108
- } else if (ret < 0 && !target->backing) {
109
- error_setg_errno(errp, -ret,
110
- "Couldn't determine the cluster size of the target image, "
111
- "which has no backing file");
112
- error_append_hint(errp,
113
- "Aborting, since this may create an unusable destination image\n");
114
- goto error;
115
- } else if (ret < 0 && target->backing) {
116
- /* Not fatal; just trudge on ahead. */
117
- job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
118
- } else {
119
- job->cluster_size = MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size);
120
- }
121
-
122
- job->copy_bitmap = hbitmap_alloc(len, ctz32(job->cluster_size));
123
+ job->cluster_size = cluster_size;
124
+ job->copy_bitmap = copy_bitmap;
125
+ copy_bitmap = NULL;
126
job->use_copy_range = true;
127
job->copy_range_size = MIN_NON_ZERO(blk_get_max_transfer(job->common.blk),
128
blk_get_max_transfer(job->target));
129
@@ -XXX,XX +XXX,XX @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
130
return &job->common;
131
132
error:
133
+ if (copy_bitmap) {
134
+ assert(!job || !job->copy_bitmap);
135
+ hbitmap_free(copy_bitmap);
136
+ }
180
+ }
137
if (sync_bitmap) {
181
+
138
bdrv_reclaim_dirty_bitmap(bs, sync_bitmap, NULL);
182
+ main_loop_update_params(base, errp);
139
}
183
+
184
+ mloop = m;
185
+ return;
186
+}
187
+
188
+static bool main_loop_can_be_deleted(EventLoopBase *base)
189
+{
190
+ return false;
191
+}
192
+
193
+static void main_loop_class_init(ObjectClass *oc, void *class_data)
194
+{
195
+ EventLoopBaseClass *bc = EVENT_LOOP_BASE_CLASS(oc);
196
+
197
+ bc->init = main_loop_init;
198
+ bc->update_params = main_loop_update_params;
199
+ bc->can_be_deleted = main_loop_can_be_deleted;
200
+}
201
+
202
+static const TypeInfo main_loop_info = {
203
+ .name = TYPE_MAIN_LOOP,
204
+ .parent = TYPE_EVENT_LOOP_BASE,
205
+ .class_init = main_loop_class_init,
206
+ .instance_size = sizeof(MainLoop),
207
+};
208
+
209
+static void main_loop_register_types(void)
210
+{
211
+ type_register_static(&main_loop_info);
212
+}
213
+
214
+type_init(main_loop_register_types)
215
+
216
static int max_priority;
217
218
#ifndef _WIN32
140
--
219
--
141
2.21.0
220
2.35.1
142
143
diff view generated by jsdifflib
Deleted patch
1
From: Alberto Garcia <berto@igalia.com>
2
1
3
bdrv_unref_child() does the following things:
4
5
- Updates the child->bs->inherits_from pointer.
6
- Calls bdrv_detach_child() to remove the BdrvChild from bs->children.
7
- Calls bdrv_unref() to unref the child BlockDriverState.
8
9
When bdrv_unref_child() was introduced in commit 33a604075c it was not
10
used in bdrv_close() because the drivers that had additional children
11
(like quorum or blkverify) had already called bdrv_unref() on their
12
children during their own close functions.
13
14
This was changed later (in 0bd6e91a7e for quorum, in 3e586be0b2 for
15
blkverify) so there's no reason not to use bdrv_unref_child() in
16
bdrv_close() anymore.
17
18
After this there's also no need to remove bs->backing and bs->file
19
separately from the rest of the children, so bdrv_close() can be
20
simplified.
21
22
Now bdrv_close() unrefs all children (before this patch it was only
23
bs->file and bs->backing). As a result, none of the callers of
24
brvd_attach_child() should remove their reference to child_bs (because
25
this function effectively steals that reference). This patch updates a
26
couple of tests that were doing their own bdrv_unref().
27
28
Signed-off-by: Alberto Garcia <berto@igalia.com>
29
Message-id: 6d1d5feaa53aa1ab127adb73d605dc4503e3abd5.1557754872.git.berto@igalia.com
30
[mreitz: s/where/were/]
31
Signed-off-by: Max Reitz <mreitz@redhat.com>
32
---
33
block.c | 16 +++-------------
34
tests/test-bdrv-drain.c | 6 ------
35
tests/test-bdrv-graph-mod.c | 1 -
36
3 files changed, 3 insertions(+), 20 deletions(-)
37
38
diff --git a/block.c b/block.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/block.c
41
+++ b/block.c
42
@@ -XXX,XX +XXX,XX @@ static void bdrv_close(BlockDriverState *bs)
43
bs->drv = NULL;
44
}
45
46
- bdrv_set_backing_hd(bs, NULL, &error_abort);
47
-
48
- if (bs->file != NULL) {
49
- bdrv_unref_child(bs, bs->file);
50
- bs->file = NULL;
51
- }
52
-
53
QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
54
- /* TODO Remove bdrv_unref() from drivers' close function and use
55
- * bdrv_unref_child() here */
56
- if (child->bs->inherits_from == bs) {
57
- child->bs->inherits_from = NULL;
58
- }
59
- bdrv_detach_child(child);
60
+ bdrv_unref_child(bs, child);
61
}
62
63
+ bs->backing = NULL;
64
+ bs->file = NULL;
65
g_free(bs->opaque);
66
bs->opaque = NULL;
67
atomic_set(&bs->copy_on_read, 0);
68
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
69
index XXXXXXX..XXXXXXX 100644
70
--- a/tests/test-bdrv-drain.c
71
+++ b/tests/test-bdrv-drain.c
72
@@ -XXX,XX +XXX,XX @@ static void test_detach_indirect(bool by_parent_cb)
73
bdrv_unref(parent_b);
74
blk_unref(blk);
75
76
- /* XXX Once bdrv_close() unref's children instead of just detaching them,
77
- * this won't be necessary any more. */
78
- bdrv_unref(a);
79
- bdrv_unref(a);
80
- bdrv_unref(c);
81
-
82
g_assert_cmpint(a->refcnt, ==, 1);
83
g_assert_cmpint(b->refcnt, ==, 1);
84
g_assert_cmpint(c->refcnt, ==, 1);
85
diff --git a/tests/test-bdrv-graph-mod.c b/tests/test-bdrv-graph-mod.c
86
index XXXXXXX..XXXXXXX 100644
87
--- a/tests/test-bdrv-graph-mod.c
88
+++ b/tests/test-bdrv-graph-mod.c
89
@@ -XXX,XX +XXX,XX @@ static void test_update_perm_tree(void)
90
g_assert_nonnull(local_err);
91
error_free(local_err);
92
93
- bdrv_unref(bs);
94
blk_unref(root);
95
}
96
97
--
98
2.21.0
99
100
diff view generated by jsdifflib
Deleted patch
1
From: Alberto Garcia <berto@igalia.com>
2
1
3
A consequence of the previous patch is that bdrv_attach_child()
4
transfers the reference to child_bs from the caller to parent_bs,
5
which will drop it on bdrv_close() or when someone calls
6
bdrv_unref_child().
7
8
But this only happens when bdrv_attach_child() succeeds. If it fails
9
then the caller is responsible for dropping the reference to child_bs.
10
11
This patch makes bdrv_attach_child() take the reference also when
12
there is an error, freeing the caller for having to do it.
13
14
A similar situation happens with bdrv_root_attach_child(), so the
15
changes on this patch affect both functions.
16
17
Signed-off-by: Alberto Garcia <berto@igalia.com>
18
Message-id: 20dfb3d9ccec559cdd1a9690146abad5d204a186.1557754872.git.berto@igalia.com
19
[mreitz: Removed now superfluous BdrvChild * variable in
20
bdrv_open_child()]
21
Signed-off-by: Max Reitz <mreitz@redhat.com>
22
---
23
block.c | 30 ++++++++++++++++++------------
24
block/block-backend.c | 3 +--
25
block/quorum.c | 1 -
26
blockjob.c | 2 +-
27
4 files changed, 20 insertions(+), 16 deletions(-)
28
29
diff --git a/block.c b/block.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/block.c
32
+++ b/block.c
33
@@ -XXX,XX +XXX,XX @@ static void bdrv_replace_child(BdrvChild *child, BlockDriverState *new_bs)
34
}
35
}
36
37
+/*
38
+ * This function steals the reference to child_bs from the caller.
39
+ * That reference is later dropped by bdrv_root_unref_child().
40
+ *
41
+ * On failure NULL is returned, errp is set and the reference to
42
+ * child_bs is also dropped.
43
+ */
44
BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs,
45
const char *child_name,
46
const BdrvChildRole *child_role,
47
@@ -XXX,XX +XXX,XX @@ BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs,
48
ret = bdrv_check_update_perm(child_bs, NULL, perm, shared_perm, NULL, errp);
49
if (ret < 0) {
50
bdrv_abort_perm_update(child_bs);
51
+ bdrv_unref(child_bs);
52
return NULL;
53
}
54
55
@@ -XXX,XX +XXX,XX @@ BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs,
56
return child;
57
}
58
59
+/*
60
+ * This function transfers the reference to child_bs from the caller
61
+ * to parent_bs. That reference is later dropped by parent_bs on
62
+ * bdrv_close() or if someone calls bdrv_unref_child().
63
+ *
64
+ * On failure NULL is returned, errp is set and the reference to
65
+ * child_bs is also dropped.
66
+ */
67
BdrvChild *bdrv_attach_child(BlockDriverState *parent_bs,
68
BlockDriverState *child_bs,
69
const char *child_name,
70
@@ -XXX,XX +XXX,XX @@ void bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd,
71
/* If backing_hd was already part of bs's backing chain, and
72
* inherits_from pointed recursively to bs then let's update it to
73
* point directly to bs (else it will become NULL). */
74
- if (update_inherits_from) {
75
+ if (bs->backing && update_inherits_from) {
76
backing_hd->inherits_from = bs;
77
}
78
- if (!bs->backing) {
79
- bdrv_unref(backing_hd);
80
- }
81
82
out:
83
bdrv_refresh_limits(bs, NULL);
84
@@ -XXX,XX +XXX,XX @@ BdrvChild *bdrv_open_child(const char *filename,
85
const BdrvChildRole *child_role,
86
bool allow_none, Error **errp)
87
{
88
- BdrvChild *c;
89
BlockDriverState *bs;
90
91
bs = bdrv_open_child_bs(filename, options, bdref_key, parent, child_role,
92
@@ -XXX,XX +XXX,XX @@ BdrvChild *bdrv_open_child(const char *filename,
93
return NULL;
94
}
95
96
- c = bdrv_attach_child(parent, bs, bdref_key, child_role, errp);
97
- if (!c) {
98
- bdrv_unref(bs);
99
- return NULL;
100
- }
101
-
102
- return c;
103
+ return bdrv_attach_child(parent, bs, bdref_key, child_role, errp);
104
}
105
106
/* TODO Future callers may need to specify parent/child_role in order for
107
diff --git a/block/block-backend.c b/block/block-backend.c
108
index XXXXXXX..XXXXXXX 100644
109
--- a/block/block-backend.c
110
+++ b/block/block-backend.c
111
@@ -XXX,XX +XXX,XX @@ BlockBackend *blk_new_open(const char *filename, const char *reference,
112
blk->root = bdrv_root_attach_child(bs, "root", &child_root,
113
perm, BLK_PERM_ALL, blk, errp);
114
if (!blk->root) {
115
- bdrv_unref(bs);
116
blk_unref(blk);
117
return NULL;
118
}
119
@@ -XXX,XX +XXX,XX @@ void blk_remove_bs(BlockBackend *blk)
120
int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp)
121
{
122
ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
123
+ bdrv_ref(bs);
124
blk->root = bdrv_root_attach_child(bs, "root", &child_root,
125
blk->perm, blk->shared_perm, blk, errp);
126
if (blk->root == NULL) {
127
return -EPERM;
128
}
129
- bdrv_ref(bs);
130
131
notifier_list_notify(&blk->insert_bs_notifiers, blk);
132
if (tgm->throttle_state) {
133
diff --git a/block/quorum.c b/block/quorum.c
134
index XXXXXXX..XXXXXXX 100644
135
--- a/block/quorum.c
136
+++ b/block/quorum.c
137
@@ -XXX,XX +XXX,XX @@ static void quorum_add_child(BlockDriverState *bs, BlockDriverState *child_bs,
138
child = bdrv_attach_child(bs, child_bs, indexstr, &child_format, errp);
139
if (child == NULL) {
140
s->next_child_index--;
141
- bdrv_unref(child_bs);
142
goto out;
143
}
144
s->children = g_renew(BdrvChild *, s->children, s->num_children + 1);
145
diff --git a/blockjob.c b/blockjob.c
146
index XXXXXXX..XXXXXXX 100644
147
--- a/blockjob.c
148
+++ b/blockjob.c
149
@@ -XXX,XX +XXX,XX @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
150
{
151
BdrvChild *c;
152
153
+ bdrv_ref(bs);
154
c = bdrv_root_attach_child(bs, name, &child_job, perm, shared_perm,
155
job, errp);
156
if (c == NULL) {
157
@@ -XXX,XX +XXX,XX @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
158
}
159
160
job->nodes = g_slist_prepend(job->nodes, c);
161
- bdrv_ref(bs);
162
bdrv_op_block_all(bs, job->blocker);
163
164
return 0;
165
--
166
2.21.0
167
168
diff view generated by jsdifflib
Deleted patch
1
From: Sam Eiderman <shmuel.eiderman@oracle.com>
2
1
3
In safe mode we open the entire chain, including the parent backing
4
file of the rebased file.
5
Do not open a new BlockBackend for the parent backing file, which
6
saves opening the rest of the chain twice, which for long chains
7
saves many "pricy" bdrv_open() calls.
8
9
Permissions for blk_new() were copied from blk_new_open() when
10
flags = 0.
11
12
Reviewed-by: Karl Heubaum <karl.heubaum@oracle.com>
13
Reviewed-by: Eyal Moscovici <eyal.moscovici@oracle.com>
14
Signed-off-by: Sagi Amit <sagi.amit@oracle.com>
15
Co-developed-by: Sagi Amit <sagi.amit@oracle.com>
16
Signed-off-by: Sam Eiderman <shmuel.eiderman@oracle.com>
17
Message-id: 20190523163337.4497-2-shmuel.eiderman@oracle.com
18
Signed-off-by: Max Reitz <mreitz@redhat.com>
19
---
20
qemu-img.c | 29 +++++++++--------------------
21
1 file changed, 9 insertions(+), 20 deletions(-)
22
23
diff --git a/qemu-img.c b/qemu-img.c
24
index XXXXXXX..XXXXXXX 100644
25
--- a/qemu-img.c
26
+++ b/qemu-img.c
27
@@ -XXX,XX +XXX,XX @@ static int img_rebase(int argc, char **argv)
28
29
/* For safe rebasing we need to compare old and new backing file */
30
if (!unsafe) {
31
- char backing_name[PATH_MAX];
32
QDict *options = NULL;
33
+ BlockDriverState *base_bs = backing_bs(bs);
34
35
- if (bs->backing) {
36
- if (bs->backing_format[0] != '\0') {
37
- options = qdict_new();
38
- qdict_put_str(options, "driver", bs->backing_format);
39
- }
40
-
41
- if (force_share) {
42
- if (!options) {
43
- options = qdict_new();
44
- }
45
- qdict_put_bool(options, BDRV_OPT_FORCE_SHARE, true);
46
- }
47
- bdrv_get_backing_filename(bs, backing_name, sizeof(backing_name));
48
- blk_old_backing = blk_new_open(backing_name, NULL,
49
- options, src_flags, &local_err);
50
- if (!blk_old_backing) {
51
+ if (base_bs) {
52
+ blk_old_backing = blk_new(BLK_PERM_CONSISTENT_READ,
53
+ BLK_PERM_ALL);
54
+ ret = blk_insert_bs(blk_old_backing, base_bs,
55
+ &local_err);
56
+ if (ret < 0) {
57
error_reportf_err(local_err,
58
- "Could not open old backing file '%s': ",
59
- backing_name);
60
- ret = -1;
61
+ "Could not reuse old backing file '%s': ",
62
+ base_bs->filename);
63
goto out;
64
}
65
} else {
66
--
67
2.21.0
68
69
diff view generated by jsdifflib
Deleted patch
1
From: Sam Eiderman <shmuel.eiderman@oracle.com>
2
1
3
In the following case:
4
5
(base) A <- B <- C (tip)
6
7
when running:
8
9
qemu-img rebase -b A C
10
11
QEMU would read all sectors not allocated in the file being rebased (C)
12
and compare them to the new base image (A), regardless of whether they
13
were changed or even allocated anywhere along the chain between the new
14
base and the top image (B). This causes many unneeded reads when
15
rebasing an image which represents a small diff of a large disk, as it
16
would read most of the disk's sectors.
17
18
Instead, use bdrv_is_allocated_above() to reduce the number of
19
unnecessary reads.
20
21
Reviewed-by: Karl Heubaum <karl.heubaum@oracle.com>
22
Signed-off-by: Sam Eiderman <shmuel.eiderman@oracle.com>
23
Signed-off-by: Eyal Moscovici <eyal.moscovici@oracle.com>
24
Message-id: 20190523163337.4497-3-shmuel.eiderman@oracle.com
25
Signed-off-by: Max Reitz <mreitz@redhat.com>
26
---
27
qemu-img.c | 25 ++++++++++++++++++++++++-
28
1 file changed, 24 insertions(+), 1 deletion(-)
29
30
diff --git a/qemu-img.c b/qemu-img.c
31
index XXXXXXX..XXXXXXX 100644
32
--- a/qemu-img.c
33
+++ b/qemu-img.c
34
@@ -XXX,XX +XXX,XX @@ static int img_rebase(int argc, char **argv)
35
BlockBackend *blk = NULL, *blk_old_backing = NULL, *blk_new_backing = NULL;
36
uint8_t *buf_old = NULL;
37
uint8_t *buf_new = NULL;
38
- BlockDriverState *bs = NULL;
39
+ BlockDriverState *bs = NULL, *prefix_chain_bs = NULL;
40
char *filename;
41
const char *fmt, *cache, *src_cache, *out_basefmt, *out_baseimg;
42
int c, flags, src_flags, ret;
43
@@ -XXX,XX +XXX,XX @@ static int img_rebase(int argc, char **argv)
44
goto out;
45
}
46
47
+ /*
48
+ * Find out whether we rebase an image on top of a previous image
49
+ * in its chain.
50
+ */
51
+ prefix_chain_bs = bdrv_find_backing_image(bs, out_real_path);
52
+
53
blk_new_backing = blk_new_open(out_real_path, NULL,
54
options, src_flags, &local_err);
55
g_free(out_real_path);
56
@@ -XXX,XX +XXX,XX @@ static int img_rebase(int argc, char **argv)
57
continue;
58
}
59
60
+ if (prefix_chain_bs) {
61
+ /*
62
+ * If cluster wasn't changed since prefix_chain, we don't need
63
+ * to take action
64
+ */
65
+ ret = bdrv_is_allocated_above(backing_bs(bs), prefix_chain_bs,
66
+ offset, n, &n);
67
+ if (ret < 0) {
68
+ error_report("error while reading image metadata: %s",
69
+ strerror(-ret));
70
+ goto out;
71
+ }
72
+ if (!ret) {
73
+ continue;
74
+ }
75
+ }
76
+
77
/*
78
* Read old and new backing file and take into consideration that
79
* backing files may be smaller than the COW image.
80
--
81
2.21.0
82
83
diff view generated by jsdifflib
Deleted patch
1
From: Sam Eiderman <shmuel.eiderman@oracle.com>
2
1
3
If a chain was detected, don't open a new BlockBackend from the target
4
backing file which will create a new BlockDriverState. Instead, create
5
an empty BlockBackend and attach the already open BlockDriverState.
6
7
Permissions for blk_new() were copied from blk_new_open() when
8
flags = 0.
9
10
Reviewed-by: Karl Heubaum <karl.heubaum@oracle.com>
11
Reviewed-by: Eyal Moscovici <eyal.moscovici@oracle.com>
12
Signed-off-by: Sagi Amit <sagi.amit@oracle.com>
13
Co-developed-by: Sagi Amit <sagi.amit@oracle.com>
14
Signed-off-by: Sam Eiderman <shmuel.eiderman@oracle.com>
15
Message-id: 20190523163337.4497-4-shmuel.eiderman@oracle.com
16
Signed-off-by: Max Reitz <mreitz@redhat.com>
17
---
18
qemu-img.c | 33 +++++++++++++++++++++++----------
19
1 file changed, 23 insertions(+), 10 deletions(-)
20
21
diff --git a/qemu-img.c b/qemu-img.c
22
index XXXXXXX..XXXXXXX 100644
23
--- a/qemu-img.c
24
+++ b/qemu-img.c
25
@@ -XXX,XX +XXX,XX @@ static int img_rebase(int argc, char **argv)
26
* in its chain.
27
*/
28
prefix_chain_bs = bdrv_find_backing_image(bs, out_real_path);
29
-
30
- blk_new_backing = blk_new_open(out_real_path, NULL,
31
- options, src_flags, &local_err);
32
- g_free(out_real_path);
33
- if (!blk_new_backing) {
34
- error_reportf_err(local_err,
35
- "Could not open new backing file '%s': ",
36
- out_baseimg);
37
- ret = -1;
38
- goto out;
39
+ if (prefix_chain_bs) {
40
+ g_free(out_real_path);
41
+ blk_new_backing = blk_new(BLK_PERM_CONSISTENT_READ,
42
+ BLK_PERM_ALL);
43
+ ret = blk_insert_bs(blk_new_backing, prefix_chain_bs,
44
+ &local_err);
45
+ if (ret < 0) {
46
+ error_reportf_err(local_err,
47
+ "Could not reuse backing file '%s': ",
48
+ out_baseimg);
49
+ goto out;
50
+ }
51
+ } else {
52
+ blk_new_backing = blk_new_open(out_real_path, NULL,
53
+ options, src_flags, &local_err);
54
+ g_free(out_real_path);
55
+ if (!blk_new_backing) {
56
+ error_reportf_err(local_err,
57
+ "Could not open new backing file '%s': ",
58
+ out_baseimg);
59
+ ret = -1;
60
+ goto out;
61
+ }
62
}
63
}
64
}
65
--
66
2.21.0
67
68
diff view generated by jsdifflib
1
From: Anton Nefedov <anton.nefedov@virtuozzo.com>
1
From: Nicolas Saenz Julienne <nsaenzju@redhat.com>
2
2
3
If COW areas of the newly allocated clusters are zeroes on the backing
3
The thread pool regulates itself: when idle, it kills threads until
4
image, efficient bdrv_write_zeroes(flags=BDRV_REQ_NO_FALLBACK) can be
4
empty, when in demand, it creates new threads until full. This behaviour
5
used on the whole cluster instead of writing explicit zero buffers later
5
doesn't play well with latency sensitive workloads where the price of
6
in perform_cow().
6
creating a new thread is too high. For example, when paired with qemu's
7
'-mlock', or using safety features like SafeStack, creating a new thread
8
has been measured take multiple milliseconds.
7
9
8
iotest 060:
10
In order to mitigate this let's introduce a new 'EventLoopBase'
9
write to the discarded cluster does not trigger COW anymore.
11
property to set the thread pool size. The threads will be created during
10
Use a backing image instead.
12
the pool's initialization or upon updating the property's value, remain
13
available during its lifetime regardless of demand, and destroyed upon
14
freeing it. A properly characterized workload will then be able to
15
configure the pool to avoid any latency spikes.
11
16
12
Signed-off-by: Anton Nefedov <anton.nefedov@virtuozzo.com>
17
Signed-off-by: Nicolas Saenz Julienne <nsaenzju@redhat.com>
13
Message-id: 20190516142749.81019-2-anton.nefedov@virtuozzo.com
18
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
14
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
19
Acked-by: Markus Armbruster <armbru@redhat.com>
15
Reviewed-by: Alberto Garcia <berto@igalia.com>
20
Message-id: 20220425075723.20019-4-nsaenzju@redhat.com
16
Signed-off-by: Max Reitz <mreitz@redhat.com>
21
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
17
---
22
---
18
qapi/block-core.json | 4 +-
23
qapi/qom.json | 10 +++++-
19
block/qcow2.h | 6 +++
24
include/block/aio.h | 10 ++++++
20
block/qcow2-cluster.c | 2 +-
25
include/block/thread-pool.h | 3 ++
21
block/qcow2.c | 85 ++++++++++++++++++++++++++++++++++++++
26
include/sysemu/event-loop-base.h | 4 +++
22
block/trace-events | 1 +
27
event-loop-base.c | 23 +++++++++++++
23
tests/qemu-iotests/060 | 7 +++-
28
iothread.c | 3 ++
24
tests/qemu-iotests/060.out | 5 ++-
29
util/aio-posix.c | 1 +
25
7 files changed, 106 insertions(+), 4 deletions(-)
30
util/async.c | 20 ++++++++++++
31
util/main-loop.c | 9 ++++++
32
util/thread-pool.c | 55 +++++++++++++++++++++++++++++---
33
10 files changed, 133 insertions(+), 5 deletions(-)
26
34
27
diff --git a/qapi/block-core.json b/qapi/block-core.json
35
diff --git a/qapi/qom.json b/qapi/qom.json
28
index XXXXXXX..XXXXXXX 100644
36
index XXXXXXX..XXXXXXX 100644
29
--- a/qapi/block-core.json
37
--- a/qapi/qom.json
30
+++ b/qapi/block-core.json
38
+++ b/qapi/qom.json
31
@@ -XXX,XX +XXX,XX @@
39
@@ -XXX,XX +XXX,XX @@
40
# 0 means that the engine will use its default.
41
# (default: 0)
32
#
42
#
33
# @cor_write: a write due to copy-on-read (since 2.11)
43
+# @thread-pool-min: minimum number of threads reserved in the thread pool
34
#
44
+# (default:0)
35
+# @cluster_alloc_space: an allocation of file space for a cluster (since 4.1)
36
+#
45
+#
37
# Since: 2.9
46
+# @thread-pool-max: maximum number of threads the thread pool can contain
47
+# (default:64)
48
+#
49
# Since: 7.1
38
##
50
##
39
{ 'enum': 'BlkdebugEvent', 'prefix': 'BLKDBG',
51
{ 'struct': 'EventLoopBaseProperties',
40
@@ -XXX,XX +XXX,XX @@
52
- 'data': { '*aio-max-batch': 'int' } }
41
'pwritev_rmw_tail', 'pwritev_rmw_after_tail', 'pwritev',
53
+ 'data': { '*aio-max-batch': 'int',
42
'pwritev_zero', 'pwritev_done', 'empty_image_prepare',
54
+ '*thread-pool-min': 'int',
43
'l1_shrink_write_table', 'l1_shrink_free_l2_clusters',
55
+ '*thread-pool-max': 'int' } }
44
- 'cor_write'] }
45
+ 'cor_write', 'cluster_alloc_space'] }
46
56
47
##
57
##
48
# @BlkdebugInjectErrorOptions:
58
# @IothreadProperties:
49
diff --git a/block/qcow2.h b/block/qcow2.h
59
diff --git a/include/block/aio.h b/include/block/aio.h
50
index XXXXXXX..XXXXXXX 100644
60
index XXXXXXX..XXXXXXX 100644
51
--- a/block/qcow2.h
61
--- a/include/block/aio.h
52
+++ b/block/qcow2.h
62
+++ b/include/block/aio.h
53
@@ -XXX,XX +XXX,XX @@ typedef struct QCowL2Meta
63
@@ -XXX,XX +XXX,XX @@ struct AioContext {
64
QSLIST_HEAD(, Coroutine) scheduled_coroutines;
65
QEMUBH *co_schedule_bh;
66
67
+ int thread_pool_min;
68
+ int thread_pool_max;
69
/* Thread pool for performing work and receiving completion callbacks.
70
* Has its own locking.
54
*/
71
*/
55
Qcow2COWRegion cow_end;
72
@@ -XXX,XX +XXX,XX @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
56
73
void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
57
+ /*
74
Error **errp);
58
+ * Indicates that COW regions are already handled and do not require
75
59
+ * any more processing.
76
+/**
60
+ */
77
+ * aio_context_set_thread_pool_params:
61
+ bool skip_cow;
78
+ * @ctx: the aio context
62
+
79
+ * @min: min number of threads to have readily available in the thread pool
63
/**
80
+ * @min: max number of threads the thread pool can contain
64
* The I/O vector with the data from the actual guest write request.
81
+ */
65
* If non-NULL, this is meant to be merged together with the data
82
+void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min,
66
diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c
83
+ int64_t max, Error **errp);
67
index XXXXXXX..XXXXXXX 100644
84
#endif
68
--- a/block/qcow2-cluster.c
85
diff --git a/include/block/thread-pool.h b/include/block/thread-pool.h
69
+++ b/block/qcow2-cluster.c
86
index XXXXXXX..XXXXXXX 100644
70
@@ -XXX,XX +XXX,XX @@ static int perform_cow(BlockDriverState *bs, QCowL2Meta *m)
87
--- a/include/block/thread-pool.h
71
assert(start->offset + start->nb_bytes <= end->offset);
88
+++ b/include/block/thread-pool.h
72
assert(!m->data_qiov || m->data_qiov->size == data_bytes);
89
@@ -XXX,XX +XXX,XX @@
73
90
74
- if (start->nb_bytes == 0 && end->nb_bytes == 0) {
91
#include "block/block.h"
75
+ if ((start->nb_bytes == 0 && end->nb_bytes == 0) || m->skip_cow) {
92
76
return 0;
93
+#define THREAD_POOL_MAX_THREADS_DEFAULT 64
94
+
95
typedef int ThreadPoolFunc(void *opaque);
96
97
typedef struct ThreadPool ThreadPool;
98
@@ -XXX,XX +XXX,XX @@ BlockAIOCB *thread_pool_submit_aio(ThreadPool *pool,
99
int coroutine_fn thread_pool_submit_co(ThreadPool *pool,
100
ThreadPoolFunc *func, void *arg);
101
void thread_pool_submit(ThreadPool *pool, ThreadPoolFunc *func, void *arg);
102
+void thread_pool_update_params(ThreadPool *pool, struct AioContext *ctx);
103
104
#endif
105
diff --git a/include/sysemu/event-loop-base.h b/include/sysemu/event-loop-base.h
106
index XXXXXXX..XXXXXXX 100644
107
--- a/include/sysemu/event-loop-base.h
108
+++ b/include/sysemu/event-loop-base.h
109
@@ -XXX,XX +XXX,XX @@ struct EventLoopBase {
110
111
/* AioContext AIO engine parameters */
112
int64_t aio_max_batch;
113
+
114
+ /* AioContext thread pool parameters */
115
+ int64_t thread_pool_min;
116
+ int64_t thread_pool_max;
117
};
118
#endif
119
diff --git a/event-loop-base.c b/event-loop-base.c
120
index XXXXXXX..XXXXXXX 100644
121
--- a/event-loop-base.c
122
+++ b/event-loop-base.c
123
@@ -XXX,XX +XXX,XX @@
124
#include "qemu/osdep.h"
125
#include "qom/object_interfaces.h"
126
#include "qapi/error.h"
127
+#include "block/thread-pool.h"
128
#include "sysemu/event-loop-base.h"
129
130
typedef struct {
131
@@ -XXX,XX +XXX,XX @@ typedef struct {
132
ptrdiff_t offset; /* field's byte offset in EventLoopBase struct */
133
} EventLoopBaseParamInfo;
134
135
+static void event_loop_base_instance_init(Object *obj)
136
+{
137
+ EventLoopBase *base = EVENT_LOOP_BASE(obj);
138
+
139
+ base->thread_pool_max = THREAD_POOL_MAX_THREADS_DEFAULT;
140
+}
141
+
142
static EventLoopBaseParamInfo aio_max_batch_info = {
143
"aio-max-batch", offsetof(EventLoopBase, aio_max_batch),
144
};
145
+static EventLoopBaseParamInfo thread_pool_min_info = {
146
+ "thread-pool-min", offsetof(EventLoopBase, thread_pool_min),
147
+};
148
+static EventLoopBaseParamInfo thread_pool_max_info = {
149
+ "thread-pool-max", offsetof(EventLoopBase, thread_pool_max),
150
+};
151
152
static void event_loop_base_get_param(Object *obj, Visitor *v,
153
const char *name, void *opaque, Error **errp)
154
@@ -XXX,XX +XXX,XX @@ static void event_loop_base_class_init(ObjectClass *klass, void *class_data)
155
event_loop_base_get_param,
156
event_loop_base_set_param,
157
NULL, &aio_max_batch_info);
158
+ object_class_property_add(klass, "thread-pool-min", "int",
159
+ event_loop_base_get_param,
160
+ event_loop_base_set_param,
161
+ NULL, &thread_pool_min_info);
162
+ object_class_property_add(klass, "thread-pool-max", "int",
163
+ event_loop_base_get_param,
164
+ event_loop_base_set_param,
165
+ NULL, &thread_pool_max_info);
166
}
167
168
static const TypeInfo event_loop_base_info = {
169
.name = TYPE_EVENT_LOOP_BASE,
170
.parent = TYPE_OBJECT,
171
.instance_size = sizeof(EventLoopBase),
172
+ .instance_init = event_loop_base_instance_init,
173
.class_size = sizeof(EventLoopBaseClass),
174
.class_init = event_loop_base_class_init,
175
.abstract = true,
176
diff --git a/iothread.c b/iothread.c
177
index XXXXXXX..XXXXXXX 100644
178
--- a/iothread.c
179
+++ b/iothread.c
180
@@ -XXX,XX +XXX,XX @@ static void iothread_set_aio_context_params(EventLoopBase *base, Error **errp)
181
aio_context_set_aio_params(iothread->ctx,
182
iothread->parent_obj.aio_max_batch,
183
errp);
184
+
185
+ aio_context_set_thread_pool_params(iothread->ctx, base->thread_pool_min,
186
+ base->thread_pool_max, errp);
187
}
188
189
190
diff --git a/util/aio-posix.c b/util/aio-posix.c
191
index XXXXXXX..XXXXXXX 100644
192
--- a/util/aio-posix.c
193
+++ b/util/aio-posix.c
194
@@ -XXX,XX +XXX,XX @@
195
196
#include "qemu/osdep.h"
197
#include "block/block.h"
198
+#include "block/thread-pool.h"
199
#include "qemu/main-loop.h"
200
#include "qemu/rcu.h"
201
#include "qemu/rcu_queue.h"
202
diff --git a/util/async.c b/util/async.c
203
index XXXXXXX..XXXXXXX 100644
204
--- a/util/async.c
205
+++ b/util/async.c
206
@@ -XXX,XX +XXX,XX @@ AioContext *aio_context_new(Error **errp)
207
208
ctx->aio_max_batch = 0;
209
210
+ ctx->thread_pool_min = 0;
211
+ ctx->thread_pool_max = THREAD_POOL_MAX_THREADS_DEFAULT;
212
+
213
return ctx;
214
fail:
215
g_source_destroy(&ctx->source);
216
@@ -XXX,XX +XXX,XX @@ void qemu_set_current_aio_context(AioContext *ctx)
217
assert(!get_my_aiocontext());
218
set_my_aiocontext(ctx);
219
}
220
+
221
+void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min,
222
+ int64_t max, Error **errp)
223
+{
224
+
225
+ if (min > max || !max || min > INT_MAX || max > INT_MAX) {
226
+ error_setg(errp, "bad thread-pool-min/thread-pool-max values");
227
+ return;
228
+ }
229
+
230
+ ctx->thread_pool_min = min;
231
+ ctx->thread_pool_max = max;
232
+
233
+ if (ctx->thread_pool) {
234
+ thread_pool_update_params(ctx->thread_pool, ctx);
235
+ }
236
+}
237
diff --git a/util/main-loop.c b/util/main-loop.c
238
index XXXXXXX..XXXXXXX 100644
239
--- a/util/main-loop.c
240
+++ b/util/main-loop.c
241
@@ -XXX,XX +XXX,XX @@
242
#include "sysemu/replay.h"
243
#include "qemu/main-loop.h"
244
#include "block/aio.h"
245
+#include "block/thread-pool.h"
246
#include "qemu/error-report.h"
247
#include "qemu/queue.h"
248
#include "qemu/compiler.h"
249
@@ -XXX,XX +XXX,XX @@ int qemu_init_main_loop(Error **errp)
250
251
static void main_loop_update_params(EventLoopBase *base, Error **errp)
252
{
253
+ ERRP_GUARD();
254
+
255
if (!qemu_aio_context) {
256
error_setg(errp, "qemu aio context not ready");
257
return;
77
}
258
}
78
259
79
diff --git a/block/qcow2.c b/block/qcow2.c
260
aio_context_set_aio_params(qemu_aio_context, base->aio_max_batch, errp);
80
index XXXXXXX..XXXXXXX 100644
261
+ if (*errp) {
81
--- a/block/qcow2.c
262
+ return;
82
+++ b/block/qcow2.c
263
+ }
83
@@ -XXX,XX +XXX,XX @@ static bool merge_cow(uint64_t offset, unsigned bytes,
264
+
84
continue;
265
+ aio_context_set_thread_pool_params(qemu_aio_context, base->thread_pool_min,
85
}
266
+ base->thread_pool_max, errp);
86
267
}
87
+ /* If COW regions are handled already, skip this too */
268
88
+ if (m->skip_cow) {
269
MainLoop *mloop;
89
+ continue;
270
diff --git a/util/thread-pool.c b/util/thread-pool.c
90
+ }
271
index XXXXXXX..XXXXXXX 100644
91
+
272
--- a/util/thread-pool.c
92
/* The data (middle) region must be immediately after the
273
+++ b/util/thread-pool.c
93
* start region */
274
@@ -XXX,XX +XXX,XX @@ struct ThreadPool {
94
if (l2meta_cow_start(m) + m->cow_start.nb_bytes != offset) {
275
QemuMutex lock;
95
@@ -XXX,XX +XXX,XX @@ static bool merge_cow(uint64_t offset, unsigned bytes,
276
QemuCond worker_stopped;
96
return false;
277
QemuSemaphore sem;
97
}
278
- int max_threads;
98
279
QEMUBH *new_thread_bh;
99
+static bool is_unallocated(BlockDriverState *bs, int64_t offset, int64_t bytes)
280
100
+{
281
/* The following variables are only accessed from one AioContext. */
101
+ int64_t nr;
282
@@ -XXX,XX +XXX,XX @@ struct ThreadPool {
102
+ return !bytes ||
283
int new_threads; /* backlog of threads we need to create */
103
+ (!bdrv_is_allocated_above(bs, NULL, offset, bytes, &nr) && nr == bytes);
284
int pending_threads; /* threads created but not running yet */
104
+}
285
bool stopping;
105
+
286
+ int min_threads;
106
+static bool is_zero_cow(BlockDriverState *bs, QCowL2Meta *m)
287
+ int max_threads;
288
};
289
290
+static inline bool back_to_sleep(ThreadPool *pool, int ret)
107
+{
291
+{
108
+ /*
292
+ /*
109
+ * This check is designed for optimization shortcut so it must be
293
+ * The semaphore timed out, we should exit the loop except when:
110
+ * efficient.
294
+ * - There is work to do, we raced with the signal.
111
+ * Instead of is_zero(), use is_unallocated() as it is faster (but not
295
+ * - The max threads threshold just changed, we raced with the signal.
112
+ * as accurate and can result in false negatives).
296
+ * - The thread pool forces a minimum number of readily available threads.
113
+ */
297
+ */
114
+ return is_unallocated(bs, m->offset + m->cow_start.offset,
298
+ if (ret == -1 && (!QTAILQ_EMPTY(&pool->request_list) ||
115
+ m->cow_start.nb_bytes) &&
299
+ pool->cur_threads > pool->max_threads ||
116
+ is_unallocated(bs, m->offset + m->cow_end.offset,
300
+ pool->cur_threads <= pool->min_threads)) {
117
+ m->cow_end.nb_bytes);
301
+ return true;
302
+ }
303
+
304
+ return false;
118
+}
305
+}
119
+
306
+
120
+static int handle_alloc_space(BlockDriverState *bs, QCowL2Meta *l2meta)
307
static void *worker_thread(void *opaque)
308
{
309
ThreadPool *pool = opaque;
310
@@ -XXX,XX +XXX,XX @@ static void *worker_thread(void *opaque)
311
ret = qemu_sem_timedwait(&pool->sem, 10000);
312
qemu_mutex_lock(&pool->lock);
313
pool->idle_threads--;
314
- } while (ret == -1 && !QTAILQ_EMPTY(&pool->request_list));
315
- if (ret == -1 || pool->stopping) {
316
+ } while (back_to_sleep(pool, ret));
317
+ if (ret == -1 || pool->stopping ||
318
+ pool->cur_threads > pool->max_threads) {
319
break;
320
}
321
322
@@ -XXX,XX +XXX,XX @@ void thread_pool_submit(ThreadPool *pool, ThreadPoolFunc *func, void *arg)
323
thread_pool_submit_aio(pool, func, arg, NULL, NULL);
324
}
325
326
+void thread_pool_update_params(ThreadPool *pool, AioContext *ctx)
121
+{
327
+{
122
+ BDRVQcow2State *s = bs->opaque;
328
+ qemu_mutex_lock(&pool->lock);
123
+ QCowL2Meta *m;
329
+
124
+
330
+ pool->min_threads = ctx->thread_pool_min;
125
+ if (!(s->data_file->bs->supported_zero_flags & BDRV_REQ_NO_FALLBACK)) {
331
+ pool->max_threads = ctx->thread_pool_max;
126
+ return 0;
332
+
127
+ }
333
+ /*
128
+
334
+ * We either have to:
129
+ if (bs->encrypted) {
335
+ * - Increase the number available of threads until over the min_threads
130
+ return 0;
336
+ * threshold.
131
+ }
337
+ * - Decrease the number of available threads until under the max_threads
132
+
338
+ * threshold.
133
+ for (m = l2meta; m != NULL; m = m->next) {
339
+ * - Do nothing. The current number of threads fall in between the min and
134
+ int ret;
340
+ * max thresholds. We'll let the pool manage itself.
135
+
341
+ */
136
+ if (!m->cow_start.nb_bytes && !m->cow_end.nb_bytes) {
342
+ for (int i = pool->cur_threads; i < pool->min_threads; i++) {
137
+ continue;
343
+ spawn_thread(pool);
138
+ }
344
+ }
139
+
345
+
140
+ if (!is_zero_cow(bs, m)) {
346
+ for (int i = pool->cur_threads; i > pool->max_threads; i--) {
141
+ continue;
347
+ qemu_sem_post(&pool->sem);
142
+ }
348
+ }
143
+
349
+
144
+ /*
350
+ qemu_mutex_unlock(&pool->lock);
145
+ * instead of writing zero COW buffers,
146
+ * efficiently zero out the whole clusters
147
+ */
148
+
149
+ ret = qcow2_pre_write_overlap_check(bs, 0, m->alloc_offset,
150
+ m->nb_clusters * s->cluster_size,
151
+ true);
152
+ if (ret < 0) {
153
+ return ret;
154
+ }
155
+
156
+ BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC_SPACE);
157
+ ret = bdrv_co_pwrite_zeroes(s->data_file, m->alloc_offset,
158
+ m->nb_clusters * s->cluster_size,
159
+ BDRV_REQ_NO_FALLBACK);
160
+ if (ret < 0) {
161
+ if (ret != -ENOTSUP && ret != -EAGAIN) {
162
+ return ret;
163
+ }
164
+ continue;
165
+ }
166
+
167
+ trace_qcow2_skip_cow(qemu_coroutine_self(), m->offset, m->nb_clusters);
168
+ m->skip_cow = true;
169
+ }
170
+ return 0;
171
+}
351
+}
172
+
352
+
173
static coroutine_fn int qcow2_co_pwritev(BlockDriverState *bs, uint64_t offset,
353
static void thread_pool_init_one(ThreadPool *pool, AioContext *ctx)
174
uint64_t bytes, QEMUIOVector *qiov,
354
{
175
int flags)
355
if (!ctx) {
176
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int qcow2_co_pwritev(BlockDriverState *bs, uint64_t offset,
356
@@ -XXX,XX +XXX,XX @@ static void thread_pool_init_one(ThreadPool *pool, AioContext *ctx)
177
qemu_iovec_add(&hd_qiov, cluster_data, cur_bytes);
357
qemu_mutex_init(&pool->lock);
178
}
358
qemu_cond_init(&pool->worker_stopped);
179
359
qemu_sem_init(&pool->sem, 0);
180
+ /* Try to efficiently initialize the physical space with zeroes */
360
- pool->max_threads = 64;
181
+ ret = handle_alloc_space(bs, l2meta);
361
pool->new_thread_bh = aio_bh_new(ctx, spawn_thread_bh_fn, pool);
182
+ if (ret < 0) {
362
183
+ goto out_unlocked;
363
QLIST_INIT(&pool->head);
184
+ }
364
QTAILQ_INIT(&pool->request_list);
185
+
365
+
186
/* If we need to do COW, check if it's possible to merge the
366
+ thread_pool_update_params(pool, ctx);
187
* writing of the guest data together with that of the COW regions.
367
}
188
* If it's not possible (or not necessary) then write the
368
189
diff --git a/block/trace-events b/block/trace-events
369
ThreadPool *thread_pool_new(AioContext *ctx)
190
index XXXXXXX..XXXXXXX 100644
191
--- a/block/trace-events
192
+++ b/block/trace-events
193
@@ -XXX,XX +XXX,XX @@ qcow2_writev_done_part(void *co, int cur_bytes) "co %p cur_bytes %d"
194
qcow2_writev_data(void *co, uint64_t offset) "co %p offset 0x%" PRIx64
195
qcow2_pwrite_zeroes_start_req(void *co, int64_t offset, int count) "co %p offset 0x%" PRIx64 " count %d"
196
qcow2_pwrite_zeroes(void *co, int64_t offset, int count) "co %p offset 0x%" PRIx64 " count %d"
197
+qcow2_skip_cow(void *co, uint64_t offset, int nb_clusters) "co %p offset 0x%" PRIx64 " nb_clusters %d"
198
199
# qcow2-cluster.c
200
qcow2_alloc_clusters_offset(void *co, uint64_t offset, int bytes) "co %p offset 0x%" PRIx64 " bytes %d"
201
diff --git a/tests/qemu-iotests/060 b/tests/qemu-iotests/060
202
index XXXXXXX..XXXXXXX 100755
203
--- a/tests/qemu-iotests/060
204
+++ b/tests/qemu-iotests/060
205
@@ -XXX,XX +XXX,XX @@ $QEMU_IO -c "$OPEN_RO" -c "read -P 1 0 512" | _filter_qemu_io
206
echo
207
echo "=== Testing overlap while COW is in flight ==="
208
echo
209
+BACKING_IMG=$TEST_IMG.base
210
+TEST_IMG=$BACKING_IMG _make_test_img 1G
211
+
212
+$QEMU_IO -c 'write 0k 64k' "$BACKING_IMG" | _filter_qemu_io
213
+
214
# compat=0.10 is required in order to make the following discard actually
215
# unallocate the sector rather than make it a zero sector - we want COW, after
216
# all.
217
-IMGOPTS='compat=0.10' _make_test_img 1G
218
+IMGOPTS='compat=0.10' _make_test_img -b "$BACKING_IMG" 1G
219
# Write two clusters, the second one enforces creation of an L2 table after
220
# the first data cluster.
221
$QEMU_IO -c 'write 0k 64k' -c 'write 512M 64k' "$TEST_IMG" | _filter_qemu_io
222
diff --git a/tests/qemu-iotests/060.out b/tests/qemu-iotests/060.out
223
index XXXXXXX..XXXXXXX 100644
224
--- a/tests/qemu-iotests/060.out
225
+++ b/tests/qemu-iotests/060.out
226
@@ -XXX,XX +XXX,XX @@ read 512/512 bytes at offset 0
227
228
=== Testing overlap while COW is in flight ===
229
230
-Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
231
+Formatting 'TEST_DIR/t.IMGFMT.base', fmt=IMGFMT size=1073741824
232
+wrote 65536/65536 bytes at offset 0
233
+64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
234
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824 backing_file=TEST_DIR/t.IMGFMT.base
235
wrote 65536/65536 bytes at offset 0
236
64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
237
wrote 65536/65536 bytes at offset 536870912
238
--
370
--
239
2.21.0
371
2.35.1
240
241
diff view generated by jsdifflib
Deleted patch
1
From: Andrey Shinkevich <andrey.shinkevich@virtuozzo.com>
2
1
3
Valgrind detects multiple issues in QEMU iotests when the memory is
4
used without being initialized. Valgrind may dump lots of unnecessary
5
reports what makes the memory issue analysis harder. Particularly,
6
that is true for the aligned bitmap directory and can be seen while
7
running the iotest #169. Padding the aligned space with zeros eases
8
the pain.
9
10
Signed-off-by: Andrey Shinkevich <andrey.shinkevich@virtuozzo.com>
11
Message-id: 1558961521-131620-1-git-send-email-andrey.shinkevich@virtuozzo.com
12
Signed-off-by: Max Reitz <mreitz@redhat.com>
13
---
14
block/qcow2-bitmap.c | 2 +-
15
1 file changed, 1 insertion(+), 1 deletion(-)
16
17
diff --git a/block/qcow2-bitmap.c b/block/qcow2-bitmap.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/block/qcow2-bitmap.c
20
+++ b/block/qcow2-bitmap.c
21
@@ -XXX,XX +XXX,XX @@ static int bitmap_list_store(BlockDriverState *bs, Qcow2BitmapList *bm_list,
22
dir_offset = *offset;
23
}
24
25
- dir = g_try_malloc(dir_size);
26
+ dir = g_try_malloc0(dir_size);
27
if (dir == NULL) {
28
return -ENOMEM;
29
}
30
--
31
2.21.0
32
33
diff view generated by jsdifflib
Deleted patch
1
From: John Snow <jsnow@redhat.com>
2
1
3
We mandate that the source node must be a root node; but there's no reason
4
I am aware of that it needs to be restricted to such. In some cases, we need
5
to make sure that there's a medium present, but in the general case we can
6
allow the backup job itself to do the graph checking.
7
8
This patch helps improve the error message when you try to backup from
9
the same node more than once, which is reflected in the change to test
10
056.
11
12
For backups with bitmaps, it will also show a better error message that
13
the bitmap is in use instead of giving you something cryptic like "need
14
a root node."
15
16
Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1707303
17
Signed-off-by: John Snow <jsnow@redhat.com>
18
Message-id: 20190521210053.8864-1-jsnow@redhat.com
19
Signed-off-by: Max Reitz <mreitz@redhat.com>
20
---
21
blockdev.c | 7 ++++++-
22
tests/qemu-iotests/056 | 2 +-
23
2 files changed, 7 insertions(+), 2 deletions(-)
24
25
diff --git a/blockdev.c b/blockdev.c
26
index XXXXXXX..XXXXXXX 100644
27
--- a/blockdev.c
28
+++ b/blockdev.c
29
@@ -XXX,XX +XXX,XX @@ static BlockJob *do_drive_backup(DriveBackup *backup, JobTxn *txn,
30
backup->compress = false;
31
}
32
33
- bs = qmp_get_root_bs(backup->device, errp);
34
+ bs = bdrv_lookup_bs(backup->device, backup->device, errp);
35
if (!bs) {
36
return NULL;
37
}
38
39
+ if (!bs->drv) {
40
+ error_setg(errp, "Device has no medium");
41
+ return NULL;
42
+ }
43
+
44
aio_context = bdrv_get_aio_context(bs);
45
aio_context_acquire(aio_context);
46
47
diff --git a/tests/qemu-iotests/056 b/tests/qemu-iotests/056
48
index XXXXXXX..XXXXXXX 100755
49
--- a/tests/qemu-iotests/056
50
+++ b/tests/qemu-iotests/056
51
@@ -XXX,XX +XXX,XX @@ class BackupTest(iotests.QMPTestCase):
52
res = self.vm.qmp('query-block-jobs')
53
self.assert_qmp(res, 'return[0]/status', 'concluded')
54
# Leave zombie job un-dismissed, observe a failure:
55
- res = self.qmp_backup_and_wait(serror='Need a root block node',
56
+ res = self.qmp_backup_and_wait(serror="Node 'drive0' is busy: block device is in use by block job: backup",
57
device='drive0', format=iotests.imgfmt,
58
sync='full', target=self.dest_img,
59
auto_dismiss=False)
60
--
61
2.21.0
62
63
diff view generated by jsdifflib