1
The following changes since commit 506e4a00de01e0b29fa83db5cbbc3d154253b4ea:
1
The following changes since commit eaefea537b476cb853e2edbdc68e969ec777e4bb:
2
2
3
Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-3.1-20180925' into staging (2018-09-25 13:30:45 +0100)
3
Merge remote-tracking branch 'remotes/mjt/tags/trivial-patches-fetch' into staging (2017-12-18 14:17:42 +0000)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://git.xanclic.moe/XanClic/qemu.git tags/pull-block-2018-09-25
7
git://github.com/stefanha/qemu.git tags/block-pull-request
8
8
9
for you to fetch changes up to 9c76ff9c16be890e70fce30754b096ff9950d1ee:
9
for you to fetch changes up to 7a9dda0d7f9831c2432620dcfefdadbb7ae888dc:
10
10
11
Merge remote-tracking branch 'kevin/tags/for-upstream' into block (2018-09-25 16:12:44 +0200)
11
qemu-iotests: add 203 savevm with IOThreads test (2017-12-19 10:25:09 +0000)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Block layer patches:
14
Pull request
15
- Drain fixes
15
16
- node-name parameters for block-commit
16
v2:
17
- Refactor block jobs to use transactional callbacks for exiting
17
* Fixed incorrect virtio_blk_data_plane_create() local_err refactoring in
18
"hw/block: Use errp directly rather than local_err" that broke virtio-blk
19
over virtio-mmio [Peter]
18
20
19
----------------------------------------------------------------
21
----------------------------------------------------------------
20
Alberto Garcia (2):
21
block: Fix use after free error in bdrv_open_inherit()
22
qemu-iotests: Test snapshot=on with nonexistent TMPDIR
23
22
24
Fam Zheng (1):
23
Mao Zhongyi (4):
25
job: Fix nested aio_poll() hanging in job_txn_apply
24
hw/block/nvme: Convert to realize
25
hw/block: Fix the return type
26
hw/block: Use errp directly rather than local_err
27
dev-storage: Fix the unusual function name
26
28
27
John Snow (16):
29
Mark Kanda (2):
28
block/commit: add block job creation flags
30
virtio-blk: make queue size configurable
29
block/mirror: add block job creation flags
31
virtio-blk: reject configs with logical block size > physical block
30
block/stream: add block job creation flags
32
size
31
block/commit: refactor commit to use job callbacks
32
block/mirror: don't install backing chain on abort
33
block/mirror: conservative mirror_exit refactor
34
block/stream: refactor stream to use job callbacks
35
tests/blockjob: replace Blockjob with Job
36
tests/test-blockjob: remove exit callback
37
tests/test-blockjob-txn: move .exit to .clean
38
jobs: remove .exit callback
39
qapi/block-commit: expose new job properties
40
qapi/block-mirror: expose new job properties
41
qapi/block-stream: expose new job properties
42
block/backup: qapi documentation fixup
43
blockdev: document transactional shortcomings
44
33
45
Kevin Wolf (21):
34
Paolo Bonzini (1):
46
commit: Add top-node/base-node options
35
block: avoid recursive AioContext acquire in bdrv_inactivate_all()
47
qemu-iotests: Test commit with top-node/base-node
48
job: Fix missing locking due to mismerge
49
blockjob: Wake up BDS when job becomes idle
50
aio-wait: Increase num_waiters even in home thread
51
test-bdrv-drain: Drain with block jobs in an I/O thread
52
test-blockjob: Acquire AioContext around job_cancel_sync()
53
job: Use AIO_WAIT_WHILE() in job_finish_sync()
54
test-bdrv-drain: Test AIO_WAIT_WHILE() in completion callback
55
block: Add missing locking in bdrv_co_drain_bh_cb()
56
block-backend: Add .drained_poll callback
57
block-backend: Fix potential double blk_delete()
58
block-backend: Decrease in_flight only after callback
59
blockjob: Lie better in child_job_drained_poll()
60
block: Remove aio_poll() in bdrv_drain_poll variants
61
test-bdrv-drain: Test nested poll in bdrv_drain_poll_top_level()
62
job: Avoid deadlocks in job_completed_txn_abort()
63
test-bdrv-drain: AIO_WAIT_WHILE() in job .commit/.abort
64
test-bdrv-drain: Fix outdated comments
65
block: Use a single global AioWait
66
test-bdrv-drain: Test draining job source child and parent
67
36
68
Max Reitz (1):
37
Stefan Hajnoczi (16):
69
Merge remote-tracking branch 'kevin/tags/for-upstream' into block
38
coroutine: simplify co_aio_sleep_ns() prototype
39
qdev: drop unused #include "sysemu/iothread.h"
40
blockdev: hold AioContext for bdrv_unref() in
41
external_snapshot_clean()
42
block: don't keep AioContext acquired after
43
external_snapshot_prepare()
44
block: don't keep AioContext acquired after drive_backup_prepare()
45
block: don't keep AioContext acquired after blockdev_backup_prepare()
46
block: don't keep AioContext acquired after
47
internal_snapshot_prepare()
48
block: drop unused BlockDirtyBitmapState->aio_context field
49
iothread: add iothread_by_id() API
50
blockdev: add x-blockdev-set-iothread testing command
51
qemu-iotests: add 202 external snapshots IOThread test
52
docs: mark nested AioContext locking as a legacy API
53
blockdev: add x-blockdev-set-iothread force boolean
54
iotests: add VM.add_object()
55
iothread: fix iothread_stop() race condition
56
qemu-iotests: add 203 savevm with IOThreads test
70
57
71
Sergio Lopez (2):
58
docs/devel/multiple-iothreads.txt | 7 +-
72
block/linux-aio: acquire AioContext before qemu_laio_process_completions
59
qapi/block-core.json | 40 ++++++
73
util/async: use qemu_aio_coroutine_enter in co_schedule_bh_cb
60
hw/block/dataplane/virtio-blk.h | 2 +-
74
61
include/hw/block/block.h | 4 +-
75
qapi/block-core.json | 104 ++++++++++++---
62
include/hw/virtio/virtio-blk.h | 1 +
76
include/block/aio-wait.h | 28 ++--
63
include/qemu/coroutine.h | 6 +-
77
include/block/block.h | 6 +-
64
include/sysemu/iothread.h | 4 +-
78
include/block/block_int.h | 18 ++-
65
block.c | 14 ++-
79
include/block/blockjob.h | 3 +
66
block/null.c | 3 +-
80
include/qemu/coroutine.h | 5 +
67
block/sheepdog.c | 3 +-
81
include/qemu/job.h | 23 ++--
68
blockdev.c | 259 +++++++++++++++++++++++++++-----------
82
block.c | 6 +-
69
hw/block/block.c | 15 ++-
83
block/block-backend.c | 31 +++--
70
hw/block/dataplane/virtio-blk.c | 12 +-
84
block/commit.c | 97 ++++++++------
71
hw/block/fdc.c | 17 +--
85
block/io.c | 30 +++--
72
hw/block/nvme.c | 23 ++--
86
block/linux-aio.c | 2 +-
73
hw/block/virtio-blk.c | 30 +++--
87
block/mirror.c | 49 +++++--
74
hw/core/qdev-properties-system.c | 1 -
88
block/stream.c | 28 ++--
75
hw/ide/qdev.c | 12 +-
89
blockdev.c | 84 ++++++++++--
76
hw/scsi/scsi-disk.c | 13 +-
90
blockjob.c | 9 +-
77
hw/usb/dev-storage.c | 29 ++---
91
hmp.c | 5 +-
78
iothread.c | 27 +++-
92
job.c | 144 +++++++++++----------
79
util/qemu-coroutine-sleep.c | 4 +-
93
tests/test-bdrv-drain.c | 294 +++++++++++++++++++++++++++++++++++++++---
80
tests/qemu-iotests/202 | 95 ++++++++++++++
94
tests/test-blockjob-txn.c | 4 +-
81
tests/qemu-iotests/202.out | 11 ++
95
tests/test-blockjob.c | 120 ++++++++---------
82
tests/qemu-iotests/203 | 59 +++++++++
96
util/aio-wait.c | 11 +-
83
tests/qemu-iotests/203.out | 6 +
97
util/async.c | 2 +-
84
tests/qemu-iotests/group | 2 +
98
util/qemu-coroutine.c | 5 +
85
tests/qemu-iotests/iotests.py | 5 +
99
tests/qemu-iotests/040 | 52 +++++++-
86
28 files changed, 531 insertions(+), 173 deletions(-)
100
tests/qemu-iotests/040.out | 4 +-
87
create mode 100755 tests/qemu-iotests/202
101
tests/qemu-iotests/051 | 3 +
88
create mode 100644 tests/qemu-iotests/202.out
102
tests/qemu-iotests/051.out | 3 +
89
create mode 100755 tests/qemu-iotests/203
103
tests/qemu-iotests/051.pc.out | 3 +
90
create mode 100644 tests/qemu-iotests/203.out
104
29 files changed, 856 insertions(+), 317 deletions(-)
105
91
106
--
92
--
107
2.17.1
93
2.14.3
108
94
109
95
diff view generated by jsdifflib
1
From: Kevin Wolf <kwolf@redhat.com>
1
The AioContext pointer argument to co_aio_sleep_ns() is only used for
2
the sleep timer. It does not affect where the caller coroutine is
3
resumed.
2
4
3
bdrv_do_drained_begin/end() assume that they are called with the
5
Due to changes to coroutine and AIO APIs it is now possible to drop the
4
AioContext lock of bs held. If we call drain functions from a coroutine
6
AioContext pointer argument. This is safe to do since no caller has
5
with the AioContext lock held, we yield and schedule a BH to move out of
7
specific requirements for which AioContext the timer must run in.
6
coroutine context. This means that the lock for the home context of the
7
coroutine is released and must be re-acquired in the bottom half.
8
8
9
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
9
This patch drops the AioContext pointer argument and renames the
10
Reviewed-by: Max Reitz <mreitz@redhat.com>
10
function to simplify the API.
11
12
Reported-by: Paolo Bonzini <pbonzini@redhat.com>
13
Reported-by: Eric Blake <eblake@redhat.com>
14
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
15
Reviewed-by: Eric Blake <eblake@redhat.com>
16
Message-id: 20171109102652.6360-1-stefanha@redhat.com
17
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
11
---
18
---
12
include/qemu/coroutine.h | 5 +++++
19
include/qemu/coroutine.h | 6 +-----
13
block/io.c | 15 +++++++++++++++
20
block/null.c | 3 +--
14
util/qemu-coroutine.c | 5 +++++
21
block/sheepdog.c | 3 +--
15
3 files changed, 25 insertions(+)
22
util/qemu-coroutine-sleep.c | 4 ++--
23
4 files changed, 5 insertions(+), 11 deletions(-)
16
24
17
diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h
25
diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h
18
index XXXXXXX..XXXXXXX 100644
26
index XXXXXXX..XXXXXXX 100644
19
--- a/include/qemu/coroutine.h
27
--- a/include/qemu/coroutine.h
20
+++ b/include/qemu/coroutine.h
28
+++ b/include/qemu/coroutine.h
21
@@ -XXX,XX +XXX,XX @@ void qemu_aio_coroutine_enter(AioContext *ctx, Coroutine *co);
29
@@ -XXX,XX +XXX,XX @@ void qemu_co_rwlock_unlock(CoRwlock *lock);
30
31
/**
32
* Yield the coroutine for a given duration
33
- *
34
- * Behaves similarly to co_sleep_ns(), but the sleeping coroutine will be
35
- * resumed when using aio_poll().
22
*/
36
*/
23
void coroutine_fn qemu_coroutine_yield(void);
37
-void coroutine_fn co_aio_sleep_ns(AioContext *ctx, QEMUClockType type,
24
38
- int64_t ns);
25
+/**
39
+void coroutine_fn qemu_co_sleep_ns(QEMUClockType type, int64_t ns);
26
+ * Get the AioContext of the given coroutine
40
27
+ */
28
+AioContext *coroutine_fn qemu_coroutine_get_aio_context(Coroutine *co);
29
+
30
/**
41
/**
31
* Get the currently executing coroutine
42
* Yield until a file descriptor becomes readable
32
*/
43
diff --git a/block/null.c b/block/null.c
33
diff --git a/block/io.c b/block/io.c
34
index XXXXXXX..XXXXXXX 100644
44
index XXXXXXX..XXXXXXX 100644
35
--- a/block/io.c
45
--- a/block/null.c
36
+++ b/block/io.c
46
+++ b/block/null.c
37
@@ -XXX,XX +XXX,XX @@ static void bdrv_co_drain_bh_cb(void *opaque)
47
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int null_co_common(BlockDriverState *bs)
38
BlockDriverState *bs = data->bs;
48
BDRVNullState *s = bs->opaque;
39
49
40
if (bs) {
50
if (s->latency_ns) {
41
+ AioContext *ctx = bdrv_get_aio_context(bs);
51
- co_aio_sleep_ns(bdrv_get_aio_context(bs), QEMU_CLOCK_REALTIME,
42
+ AioContext *co_ctx = qemu_coroutine_get_aio_context(co);
52
- s->latency_ns);
43
+
53
+ qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, s->latency_ns);
44
+ /*
54
}
45
+ * When the coroutine yielded, the lock for its home context was
55
return 0;
46
+ * released, so we need to re-acquire it here. If it explicitly
56
}
47
+ * acquired a different context, the lock is still held and we don't
57
diff --git a/block/sheepdog.c b/block/sheepdog.c
48
+ * want to lock it a second time (or AIO_WAIT_WHILE() would hang).
58
index XXXXXXX..XXXXXXX 100644
49
+ */
59
--- a/block/sheepdog.c
50
+ if (ctx == co_ctx) {
60
+++ b/block/sheepdog.c
51
+ aio_context_acquire(ctx);
61
@@ -XXX,XX +XXX,XX @@ static coroutine_fn void reconnect_to_sdog(void *opaque)
52
+ }
62
if (s->fd < 0) {
53
bdrv_dec_in_flight(bs);
63
DPRINTF("Wait for connection to be established\n");
54
if (data->begin) {
64
error_report_err(local_err);
55
bdrv_do_drained_begin(bs, data->recursive, data->parent,
65
- co_aio_sleep_ns(bdrv_get_aio_context(s->bs), QEMU_CLOCK_REALTIME,
56
@@ -XXX,XX +XXX,XX @@ static void bdrv_co_drain_bh_cb(void *opaque)
66
- 1000000000ULL);
57
bdrv_do_drained_end(bs, data->recursive, data->parent,
67
+ qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 1000000000ULL);
58
data->ignore_bds_parents);
59
}
68
}
60
+ if (ctx == co_ctx) {
69
};
61
+ aio_context_release(ctx);
70
62
+ }
71
diff --git a/util/qemu-coroutine-sleep.c b/util/qemu-coroutine-sleep.c
63
} else {
64
assert(data->begin);
65
bdrv_drain_all_begin();
66
diff --git a/util/qemu-coroutine.c b/util/qemu-coroutine.c
67
index XXXXXXX..XXXXXXX 100644
72
index XXXXXXX..XXXXXXX 100644
68
--- a/util/qemu-coroutine.c
73
--- a/util/qemu-coroutine-sleep.c
69
+++ b/util/qemu-coroutine.c
74
+++ b/util/qemu-coroutine-sleep.c
70
@@ -XXX,XX +XXX,XX @@ bool qemu_coroutine_entered(Coroutine *co)
75
@@ -XXX,XX +XXX,XX @@ static void co_sleep_cb(void *opaque)
76
aio_co_wake(sleep_cb->co);
77
}
78
79
-void coroutine_fn co_aio_sleep_ns(AioContext *ctx, QEMUClockType type,
80
- int64_t ns)
81
+void coroutine_fn qemu_co_sleep_ns(QEMUClockType type, int64_t ns)
71
{
82
{
72
return co->caller;
83
+ AioContext *ctx = qemu_get_current_aio_context();
73
}
84
CoSleepCB sleep_cb = {
74
+
85
.co = qemu_coroutine_self(),
75
+AioContext *coroutine_fn qemu_coroutine_get_aio_context(Coroutine *co)
86
};
76
+{
77
+ return co->ctx;
78
+}
79
--
87
--
80
2.17.1
88
2.14.3
81
89
82
90
diff view generated by jsdifflib
1
From: Kevin Wolf <kwolf@redhat.com>
1
From: Mao Zhongyi <maozy.fnst@cn.fujitsu.com>
2
2
3
This adds tests for calling AIO_WAIT_WHILE() in the .commit and .abort
3
Convert nvme_init() to realize and rename it to nvme_realize().
4
callbacks. Both reasons why .abort could be called for a single job are
5
tested: Either .run or .prepare could return an error.
6
4
7
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
5
Cc: John Snow <jsnow@redhat.com>
8
Reviewed-by: Max Reitz <mreitz@redhat.com>
6
Cc: Keith Busch <keith.busch@intel.com>
7
Cc: Kevin Wolf <kwolf@redhat.com>
8
Cc: Max Reitz <mreitz@redhat.com>
9
Cc: Markus Armbruster <armbru@redhat.com>
10
11
Signed-off-by: Mao Zhongyi <maozy.fnst@cn.fujitsu.com>
12
Message-id: 2882e72d795e04cbe2120f569d551aef2467ac60.1511317952.git.maozy.fnst@cn.fujitsu.com
13
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
9
---
14
---
10
tests/test-bdrv-drain.c | 116 +++++++++++++++++++++++++++++++++++-----
15
hw/block/nvme.c | 18 ++++++++++--------
11
1 file changed, 104 insertions(+), 12 deletions(-)
16
1 file changed, 10 insertions(+), 8 deletions(-)
12
17
13
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
18
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
14
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
15
--- a/tests/test-bdrv-drain.c
20
--- a/hw/block/nvme.c
16
+++ b/tests/test-bdrv-drain.c
21
+++ b/hw/block/nvme.c
17
@@ -XXX,XX +XXX,XX @@ static void test_iothread_drain_subtree(void)
22
@@ -XXX,XX +XXX,XX @@ static const MemoryRegionOps nvme_cmb_ops = {
18
19
typedef struct TestBlockJob {
20
BlockJob common;
21
+ int run_ret;
22
+ int prepare_ret;
23
bool should_complete;
24
} TestBlockJob;
25
26
@@ -XXX,XX +XXX,XX @@ static int test_job_prepare(Job *job)
27
28
/* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */
29
blk_flush(s->common.blk);
30
- return 0;
31
+ return s->prepare_ret;
32
+}
33
+
34
+static void test_job_commit(Job *job)
35
+{
36
+ TestBlockJob *s = container_of(job, TestBlockJob, common.job);
37
+
38
+ /* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */
39
+ blk_flush(s->common.blk);
40
+}
41
+
42
+static void test_job_abort(Job *job)
43
+{
44
+ TestBlockJob *s = container_of(job, TestBlockJob, common.job);
45
+
46
+ /* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */
47
+ blk_flush(s->common.blk);
48
}
49
50
static int coroutine_fn test_job_run(Job *job, Error **errp)
51
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn test_job_run(Job *job, Error **errp)
52
job_pause_point(&s->common.job);
53
}
54
55
- return 0;
56
+ return s->run_ret;
57
}
58
59
static void test_job_complete(Job *job, Error **errp)
60
@@ -XXX,XX +XXX,XX @@ BlockJobDriver test_job_driver = {
61
.run = test_job_run,
62
.complete = test_job_complete,
63
.prepare = test_job_prepare,
64
+ .commit = test_job_commit,
65
+ .abort = test_job_abort,
66
},
23
},
67
};
24
};
68
25
69
-static void test_blockjob_common(enum drain_type drain_type, bool use_iothread)
26
-static int nvme_init(PCIDevice *pci_dev)
70
+enum test_job_result {
27
+static void nvme_realize(PCIDevice *pci_dev, Error **errp)
71
+ TEST_JOB_SUCCESS,
72
+ TEST_JOB_FAIL_RUN,
73
+ TEST_JOB_FAIL_PREPARE,
74
+};
75
+
76
+static void test_blockjob_common(enum drain_type drain_type, bool use_iothread,
77
+ enum test_job_result result)
78
{
28
{
79
BlockBackend *blk_src, *blk_target;
29
NvmeCtrl *n = NVME(pci_dev);
80
BlockDriverState *src, *target;
30
NvmeIdCtrl *id = &n->id_ctrl;
81
BlockJob *job;
31
@@ -XXX,XX +XXX,XX @@ static int nvme_init(PCIDevice *pci_dev)
82
+ TestBlockJob *tjob;
32
Error *local_err = NULL;
83
IOThread *iothread = NULL;
33
84
AioContext *ctx;
34
if (!n->conf.blk) {
85
int ret;
35
- return -1;
86
@@ -XXX,XX +XXX,XX @@ static void test_blockjob_common(enum drain_type drain_type, bool use_iothread)
36
+ error_setg(errp, "drive property not set");
87
blk_insert_bs(blk_target, target, &error_abort);
37
+ return;
88
38
}
89
aio_context_acquire(ctx);
39
90
- job = block_job_create("job0", &test_job_driver, NULL, src, 0, BLK_PERM_ALL,
40
bs_size = blk_getlength(n->conf.blk);
91
- 0, 0, NULL, NULL, &error_abort);
41
if (bs_size < 0) {
92
+ tjob = block_job_create("job0", &test_job_driver, NULL, src,
42
- return -1;
93
+ 0, BLK_PERM_ALL,
43
+ error_setg(errp, "could not get backing file size");
94
+ 0, 0, NULL, NULL, &error_abort);
44
+ return;
95
+ job = &tjob->common;
45
}
96
block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort);
46
97
+
47
blkconf_serial(&n->conf, &n->serial);
98
+ switch (result) {
48
if (!n->serial) {
99
+ case TEST_JOB_SUCCESS:
49
- return -1;
100
+ break;
50
+ error_setg(errp, "serial property not set");
101
+ case TEST_JOB_FAIL_RUN:
51
+ return;
102
+ tjob->run_ret = -EIO;
52
}
103
+ break;
53
blkconf_blocksizes(&n->conf);
104
+ case TEST_JOB_FAIL_PREPARE:
54
blkconf_apply_backend_options(&n->conf, blk_is_read_only(n->conf.blk),
105
+ tjob->prepare_ret = -EIO;
55
false, &local_err);
106
+ break;
56
if (local_err) {
107
+ }
57
- error_report_err(local_err);
108
+
58
- return -1;
109
job_start(&job->job);
59
+ error_propagate(errp, local_err);
110
aio_context_release(ctx);
60
+ return;
111
61
}
112
@@ -XXX,XX +XXX,XX @@ static void test_blockjob_common(enum drain_type drain_type, bool use_iothread)
62
113
63
pci_conf = pci_dev->config;
114
aio_context_acquire(ctx);
64
@@ -XXX,XX +XXX,XX @@ static int nvme_init(PCIDevice *pci_dev)
115
ret = job_complete_sync(&job->job, &error_abort);
65
cpu_to_le64(n->ns_size >>
116
- g_assert_cmpint(ret, ==, 0);
66
id_ns->lbaf[NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas)].ds);
117
+ g_assert_cmpint(ret, ==, (result == TEST_JOB_SUCCESS ? 0 : -EIO));
67
}
118
68
- return 0;
119
if (use_iothread) {
120
blk_set_aio_context(blk_src, qemu_get_aio_context());
121
@@ -XXX,XX +XXX,XX @@ static void test_blockjob_common(enum drain_type drain_type, bool use_iothread)
122
123
static void test_blockjob_drain_all(void)
124
{
125
- test_blockjob_common(BDRV_DRAIN_ALL, false);
126
+ test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_SUCCESS);
127
}
69
}
128
70
129
static void test_blockjob_drain(void)
71
static void nvme_exit(PCIDevice *pci_dev)
130
{
72
@@ -XXX,XX +XXX,XX @@ static void nvme_class_init(ObjectClass *oc, void *data)
131
- test_blockjob_common(BDRV_DRAIN, false);
73
DeviceClass *dc = DEVICE_CLASS(oc);
132
+ test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_SUCCESS);
74
PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
133
}
75
134
76
- pc->init = nvme_init;
135
static void test_blockjob_drain_subtree(void)
77
+ pc->realize = nvme_realize;
136
{
78
pc->exit = nvme_exit;
137
- test_blockjob_common(BDRV_SUBTREE_DRAIN, false);
79
pc->class_id = PCI_CLASS_STORAGE_EXPRESS;
138
+ test_blockjob_common(BDRV_SUBTREE_DRAIN, false, TEST_JOB_SUCCESS);
80
pc->vendor_id = PCI_VENDOR_ID_INTEL;
139
+}
140
+
141
+static void test_blockjob_error_drain_all(void)
142
+{
143
+ test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_FAIL_RUN);
144
+ test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_FAIL_PREPARE);
145
+}
146
+
147
+static void test_blockjob_error_drain(void)
148
+{
149
+ test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_FAIL_RUN);
150
+ test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_FAIL_PREPARE);
151
+}
152
+
153
+static void test_blockjob_error_drain_subtree(void)
154
+{
155
+ test_blockjob_common(BDRV_SUBTREE_DRAIN, false, TEST_JOB_FAIL_RUN);
156
+ test_blockjob_common(BDRV_SUBTREE_DRAIN, false, TEST_JOB_FAIL_PREPARE);
157
}
158
159
static void test_blockjob_iothread_drain_all(void)
160
{
161
- test_blockjob_common(BDRV_DRAIN_ALL, true);
162
+ test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_SUCCESS);
163
}
164
165
static void test_blockjob_iothread_drain(void)
166
{
167
- test_blockjob_common(BDRV_DRAIN, true);
168
+ test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_SUCCESS);
169
}
170
171
static void test_blockjob_iothread_drain_subtree(void)
172
{
173
- test_blockjob_common(BDRV_SUBTREE_DRAIN, true);
174
+ test_blockjob_common(BDRV_SUBTREE_DRAIN, true, TEST_JOB_SUCCESS);
175
+}
176
+
177
+static void test_blockjob_iothread_error_drain_all(void)
178
+{
179
+ test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_FAIL_RUN);
180
+ test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_FAIL_PREPARE);
181
+}
182
+
183
+static void test_blockjob_iothread_error_drain(void)
184
+{
185
+ test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_FAIL_RUN);
186
+ test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_FAIL_PREPARE);
187
+}
188
+
189
+static void test_blockjob_iothread_error_drain_subtree(void)
190
+{
191
+ test_blockjob_common(BDRV_SUBTREE_DRAIN, true, TEST_JOB_FAIL_RUN);
192
+ test_blockjob_common(BDRV_SUBTREE_DRAIN, true, TEST_JOB_FAIL_PREPARE);
193
}
194
195
196
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
197
g_test_add_func("/bdrv-drain/blockjob/drain_subtree",
198
test_blockjob_drain_subtree);
199
200
+ g_test_add_func("/bdrv-drain/blockjob/error/drain_all",
201
+ test_blockjob_error_drain_all);
202
+ g_test_add_func("/bdrv-drain/blockjob/error/drain",
203
+ test_blockjob_error_drain);
204
+ g_test_add_func("/bdrv-drain/blockjob/error/drain_subtree",
205
+ test_blockjob_error_drain_subtree);
206
+
207
g_test_add_func("/bdrv-drain/blockjob/iothread/drain_all",
208
test_blockjob_iothread_drain_all);
209
g_test_add_func("/bdrv-drain/blockjob/iothread/drain",
210
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
211
g_test_add_func("/bdrv-drain/blockjob/iothread/drain_subtree",
212
test_blockjob_iothread_drain_subtree);
213
214
+ g_test_add_func("/bdrv-drain/blockjob/iothread/error/drain_all",
215
+ test_blockjob_iothread_error_drain_all);
216
+ g_test_add_func("/bdrv-drain/blockjob/iothread/error/drain",
217
+ test_blockjob_iothread_error_drain);
218
+ g_test_add_func("/bdrv-drain/blockjob/iothread/error/drain_subtree",
219
+ test_blockjob_iothread_error_drain_subtree);
220
+
221
g_test_add_func("/bdrv-drain/deletion/drain", test_delete_by_drain);
222
g_test_add_func("/bdrv-drain/detach/drain_all", test_detach_by_drain_all);
223
g_test_add_func("/bdrv-drain/detach/drain", test_detach_by_drain);
224
--
81
--
225
2.17.1
82
2.14.3
226
83
227
84
diff view generated by jsdifflib
1
From: John Snow <jsnow@redhat.com>
1
From: Mao Zhongyi <maozy.fnst@cn.fujitsu.com>
2
2
3
Signed-off-by: John Snow <jsnow@redhat.com>
3
When the function no success value to transmit, it usually make the
4
Reviewed-by: Max Reitz <mreitz@redhat.com>
4
function return void. It has turned out not to be a success, because
5
Message-id: 20180906130225.5118-8-jsnow@redhat.com
5
it means that the extra local_err variable and error_propagate() will
6
Reviewed-by: Jeff Cody <jcody@redhat.com>
6
be needed. It leads to cumbersome code, therefore, transmit success/
7
Signed-off-by: Max Reitz <mreitz@redhat.com>
7
failure in the return value is worth.
8
9
So fix the return type of blkconf_apply_backend_options(),
10
blkconf_geometry() and virtio_blk_data_plane_create() to avoid it.
11
12
Cc: John Snow <jsnow@redhat.com>
13
Cc: Kevin Wolf <kwolf@redhat.com>
14
Cc: Max Reitz <mreitz@redhat.com>
15
Cc: Stefan Hajnoczi <stefanha@redhat.com>
16
17
Signed-off-by: Mao Zhongyi <maozy.fnst@cn.fujitsu.com>
18
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
19
Message-id: ac0edc1fc70c4457e5cec94405eb7d1f89f9c2c1.1511317952.git.maozy.fnst@cn.fujitsu.com
20
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
8
---
21
---
9
block/stream.c | 23 +++++++++++++++--------
22
hw/block/dataplane/virtio-blk.h | 2 +-
10
1 file changed, 15 insertions(+), 8 deletions(-)
23
include/hw/block/block.h | 4 ++--
24
hw/block/block.c | 15 +++++++++------
25
hw/block/dataplane/virtio-blk.c | 12 +++++++-----
26
4 files changed, 19 insertions(+), 14 deletions(-)
11
27
12
diff --git a/block/stream.c b/block/stream.c
28
diff --git a/hw/block/dataplane/virtio-blk.h b/hw/block/dataplane/virtio-blk.h
13
index XXXXXXX..XXXXXXX 100644
29
index XXXXXXX..XXXXXXX 100644
14
--- a/block/stream.c
30
--- a/hw/block/dataplane/virtio-blk.h
15
+++ b/block/stream.c
31
+++ b/hw/block/dataplane/virtio-blk.h
16
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn stream_populate(BlockBackend *blk,
32
@@ -XXX,XX +XXX,XX @@
17
return blk_co_preadv(blk, offset, qiov.size, &qiov, BDRV_REQ_COPY_ON_READ);
33
34
typedef struct VirtIOBlockDataPlane VirtIOBlockDataPlane;
35
36
-void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
37
+bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
38
VirtIOBlockDataPlane **dataplane,
39
Error **errp);
40
void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s);
41
diff --git a/include/hw/block/block.h b/include/hw/block/block.h
42
index XXXXXXX..XXXXXXX 100644
43
--- a/include/hw/block/block.h
44
+++ b/include/hw/block/block.h
45
@@ -XXX,XX +XXX,XX @@ static inline unsigned int get_physical_block_exp(BlockConf *conf)
46
/* Configuration helpers */
47
48
void blkconf_serial(BlockConf *conf, char **serial);
49
-void blkconf_geometry(BlockConf *conf, int *trans,
50
+bool blkconf_geometry(BlockConf *conf, int *trans,
51
unsigned cyls_max, unsigned heads_max, unsigned secs_max,
52
Error **errp);
53
void blkconf_blocksizes(BlockConf *conf);
54
-void blkconf_apply_backend_options(BlockConf *conf, bool readonly,
55
+bool blkconf_apply_backend_options(BlockConf *conf, bool readonly,
56
bool resizable, Error **errp);
57
58
/* Hard disk geometry */
59
diff --git a/hw/block/block.c b/hw/block/block.c
60
index XXXXXXX..XXXXXXX 100644
61
--- a/hw/block/block.c
62
+++ b/hw/block/block.c
63
@@ -XXX,XX +XXX,XX @@ void blkconf_blocksizes(BlockConf *conf)
64
}
18
}
65
}
19
66
20
-static void stream_exit(Job *job)
67
-void blkconf_apply_backend_options(BlockConf *conf, bool readonly,
21
+static int stream_prepare(Job *job)
68
+bool blkconf_apply_backend_options(BlockConf *conf, bool readonly,
69
bool resizable, Error **errp)
22
{
70
{
23
StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
71
BlockBackend *blk = conf->blk;
24
BlockJob *bjob = &s->common;
72
@@ -XXX,XX +XXX,XX @@ void blkconf_apply_backend_options(BlockConf *conf, bool readonly,
25
BlockDriverState *bs = blk_bs(bjob->blk);
73
26
BlockDriverState *base = s->base;
74
ret = blk_set_perm(blk, perm, shared_perm, errp);
27
Error *local_err = NULL;
75
if (ret < 0) {
28
- int ret = job->ret;
76
- return;
29
+ int ret = 0;
77
+ return false;
30
78
}
31
- if (!job_is_cancelled(job) && bs->backing && ret == 0) {
79
32
+ if (bs->backing) {
80
switch (conf->wce) {
33
const char *base_id = NULL, *base_fmt = NULL;
81
@@ -XXX,XX +XXX,XX @@ void blkconf_apply_backend_options(BlockConf *conf, bool readonly,
34
if (base) {
82
35
base_id = s->backing_file_str;
83
blk_set_enable_write_cache(blk, wce);
36
@@ -XXX,XX +XXX,XX @@ static void stream_exit(Job *job)
84
blk_set_on_error(blk, rerror, werror);
37
bdrv_set_backing_hd(bs, base, &local_err);
85
+
38
if (local_err) {
86
+ return true;
39
error_report_err(local_err);
87
}
40
- ret = -EPERM;
88
41
- goto out;
89
-void blkconf_geometry(BlockConf *conf, int *ptrans,
42
+ return -EPERM;
90
+bool blkconf_geometry(BlockConf *conf, int *ptrans,
91
unsigned cyls_max, unsigned heads_max, unsigned secs_max,
92
Error **errp)
93
{
94
@@ -XXX,XX +XXX,XX @@ void blkconf_geometry(BlockConf *conf, int *ptrans,
95
if (conf->cyls || conf->heads || conf->secs) {
96
if (conf->cyls < 1 || conf->cyls > cyls_max) {
97
error_setg(errp, "cyls must be between 1 and %u", cyls_max);
98
- return;
99
+ return false;
100
}
101
if (conf->heads < 1 || conf->heads > heads_max) {
102
error_setg(errp, "heads must be between 1 and %u", heads_max);
103
- return;
104
+ return false;
105
}
106
if (conf->secs < 1 || conf->secs > secs_max) {
107
error_setg(errp, "secs must be between 1 and %u", secs_max);
108
- return;
109
+ return false;
43
}
110
}
44
}
111
}
45
112
+ return true;
46
-out:
113
}
47
+ return ret;
114
diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
48
+}
115
index XXXXXXX..XXXXXXX 100644
116
--- a/hw/block/dataplane/virtio-blk.c
117
+++ b/hw/block/dataplane/virtio-blk.c
118
@@ -XXX,XX +XXX,XX @@ static void notify_guest_bh(void *opaque)
119
}
120
121
/* Context: QEMU global mutex held */
122
-void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
123
+bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
124
VirtIOBlockDataPlane **dataplane,
125
Error **errp)
126
{
127
@@ -XXX,XX +XXX,XX @@ void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
128
error_setg(errp,
129
"device is incompatible with iothread "
130
"(transport does not support notifiers)");
131
- return;
132
+ return false;
133
}
134
if (!virtio_device_ioeventfd_enabled(vdev)) {
135
error_setg(errp, "ioeventfd is required for iothread");
136
- return;
137
+ return false;
138
}
139
140
/* If dataplane is (re-)enabled while the guest is running there could
141
@@ -XXX,XX +XXX,XX @@ void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
142
*/
143
if (blk_op_is_blocked(conf->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) {
144
error_prepend(errp, "cannot start virtio-blk dataplane: ");
145
- return;
146
+ return false;
147
}
148
}
149
/* Don't try if transport does not support notifiers. */
150
if (!virtio_device_ioeventfd_enabled(vdev)) {
151
- return;
152
+ return false;
153
}
154
155
s = g_new0(VirtIOBlockDataPlane, 1);
156
@@ -XXX,XX +XXX,XX @@ void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
157
s->batch_notify_vqs = bitmap_new(conf->num_queues);
158
159
*dataplane = s;
49
+
160
+
50
+static void stream_clean(Job *job)
161
+ return true;
51
+{
52
+ StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
53
+ BlockJob *bjob = &s->common;
54
+ BlockDriverState *bs = blk_bs(bjob->blk);
55
+
56
/* Reopen the image back in read-only mode if necessary */
57
if (s->bs_flags != bdrv_get_flags(bs)) {
58
/* Give up write permissions before making it read-only */
59
@@ -XXX,XX +XXX,XX @@ out:
60
}
61
62
g_free(s->backing_file_str);
63
- job->ret = ret;
64
}
162
}
65
163
66
static int coroutine_fn stream_run(Job *job, Error **errp)
164
/* Context: QEMU global mutex held */
67
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver stream_job_driver = {
68
.job_type = JOB_TYPE_STREAM,
69
.free = block_job_free,
70
.run = stream_run,
71
- .exit = stream_exit,
72
+ .prepare = stream_prepare,
73
+ .clean = stream_clean,
74
.user_resume = block_job_user_resume,
75
.drain = block_job_drain,
76
},
77
--
165
--
78
2.17.1
166
2.14.3
79
167
80
168
diff view generated by jsdifflib
1
From: Kevin Wolf <kwolf@redhat.com>
1
From: Mao Zhongyi <maozy.fnst@cn.fujitsu.com>
2
2
3
Amongst others, job_finalize_single() calls the .prepare/.commit/.abort
3
[Drop virtio_blk_data_plane_create() change that misinterprets return
4
callbacks of the individual job driver. Recently, their use was adapted
4
value when the virtio transport does not support dataplane.
5
for all block jobs so that they involve code calling AIO_WAIT_WHILE()
5
--Stefan]
6
now. Such code must be called under the AioContext lock for the
6
7
respective job, but without holding any other AioContext lock.
7
Cc: John Snow <jsnow@redhat.com>
8
8
Cc: Kevin Wolf <kwolf@redhat.com>
9
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
9
Cc: Max Reitz <mreitz@redhat.com>
10
Reviewed-by: Max Reitz <mreitz@redhat.com>
10
Cc: Keith Busch <keith.busch@intel.com>
11
Cc: Stefan Hajnoczi <stefanha@redhat.com>
12
Cc: "Michael S. Tsirkin" <mst@redhat.com>
13
Cc: Paolo Bonzini <pbonzini@redhat.com>
14
Cc: Gerd Hoffmann <kraxel@redhat.com>
15
Cc: Markus Armbruster <armbru@redhat.com>
16
17
Signed-off-by: Mao Zhongyi <maozy.fnst@cn.fujitsu.com>
18
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
19
Message-id: e77848d3735ba590f23ffbf8094379c646c33d79.1511317952.git.maozy.fnst@cn.fujitsu.com
20
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
11
---
21
---
12
job.c | 16 +++++++++++-----
22
hw/block/fdc.c | 17 ++++++-----------
13
1 file changed, 11 insertions(+), 5 deletions(-)
23
hw/block/nvme.c | 7 ++-----
14
24
hw/block/virtio-blk.c | 13 +++++--------
15
diff --git a/job.c b/job.c
25
hw/ide/qdev.c | 12 ++++--------
16
index XXXXXXX..XXXXXXX 100644
26
hw/scsi/scsi-disk.c | 13 ++++---------
17
--- a/job.c
27
hw/usb/dev-storage.c | 9 +++------
18
+++ b/job.c
28
6 files changed, 24 insertions(+), 47 deletions(-)
19
@@ -XXX,XX +XXX,XX @@ static void job_cancel_async(Job *job, bool force)
29
20
30
diff --git a/hw/block/fdc.c b/hw/block/fdc.c
21
static void job_completed_txn_abort(Job *job)
31
index XXXXXXX..XXXXXXX 100644
32
--- a/hw/block/fdc.c
33
+++ b/hw/block/fdc.c
34
@@ -XXX,XX +XXX,XX @@ static void fd_revalidate(FDrive *drv)
35
static void fd_change_cb(void *opaque, bool load, Error **errp)
22
{
36
{
23
+ AioContext *outer_ctx = job->aio_context;
37
FDrive *drive = opaque;
24
AioContext *ctx;
38
- Error *local_err = NULL;
25
JobTxn *txn = job->txn;
39
26
Job *other_job;
40
if (!load) {
27
@@ -XXX,XX +XXX,XX @@ static void job_completed_txn_abort(Job *job)
41
blk_set_perm(drive->blk, 0, BLK_PERM_ALL, &error_abort);
28
txn->aborting = true;
42
} else {
29
job_txn_ref(txn);
43
- blkconf_apply_backend_options(drive->conf,
30
44
- blk_is_read_only(drive->blk), false,
31
- /* We are the first failed job. Cancel other jobs. */
45
- &local_err);
32
- QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
46
- if (local_err) {
33
- ctx = other_job->aio_context;
47
- error_propagate(errp, local_err);
34
- aio_context_acquire(ctx);
48
+ if (!blkconf_apply_backend_options(drive->conf,
35
- }
49
+ blk_is_read_only(drive->blk), false,
36
+ /* We can only hold the single job's AioContext lock while calling
50
+ errp)) {
37
+ * job_finalize_single() because the finalization callbacks can involve
51
return;
38
+ * calls of AIO_WAIT_WHILE(), which could deadlock otherwise. */
39
+ aio_context_release(outer_ctx);
40
41
/* Other jobs are effectively cancelled by us, set the status for
42
* them; this job, however, may or may not be cancelled, depending
43
* on the caller, so leave it. */
44
QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
45
if (other_job != job) {
46
+ ctx = other_job->aio_context;
47
+ aio_context_acquire(ctx);
48
job_cancel_async(other_job, false);
49
+ aio_context_release(ctx);
50
}
52
}
51
}
53
}
52
while (!QLIST_EMPTY(&txn->jobs)) {
54
@@ -XXX,XX +XXX,XX @@ static void floppy_drive_realize(DeviceState *qdev, Error **errp)
53
other_job = QLIST_FIRST(&txn->jobs);
55
FloppyDrive *dev = FLOPPY_DRIVE(qdev);
54
ctx = other_job->aio_context;
56
FloppyBus *bus = FLOPPY_BUS(qdev->parent_bus);
55
+ aio_context_acquire(ctx);
57
FDrive *drive;
56
if (!job_is_completed(other_job)) {
58
- Error *local_err = NULL;
57
assert(job_is_cancelled(other_job));
59
int ret;
58
job_finish_sync(other_job, NULL, NULL);
60
59
@@ -XXX,XX +XXX,XX @@ static void job_completed_txn_abort(Job *job)
61
if (dev->unit == -1) {
60
aio_context_release(ctx);
62
@@ -XXX,XX +XXX,XX @@ static void floppy_drive_realize(DeviceState *qdev, Error **errp)
61
}
63
dev->conf.rerror = BLOCKDEV_ON_ERROR_AUTO;
62
64
dev->conf.werror = BLOCKDEV_ON_ERROR_AUTO;
63
+ aio_context_acquire(outer_ctx);
65
66
- blkconf_apply_backend_options(&dev->conf, blk_is_read_only(dev->conf.blk),
67
- false, &local_err);
68
- if (local_err) {
69
- error_propagate(errp, local_err);
70
+ if (!blkconf_apply_backend_options(&dev->conf,
71
+ blk_is_read_only(dev->conf.blk),
72
+ false, errp)) {
73
return;
74
}
75
76
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
77
index XXXXXXX..XXXXXXX 100644
78
--- a/hw/block/nvme.c
79
+++ b/hw/block/nvme.c
80
@@ -XXX,XX +XXX,XX @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp)
81
int i;
82
int64_t bs_size;
83
uint8_t *pci_conf;
84
- Error *local_err = NULL;
85
86
if (!n->conf.blk) {
87
error_setg(errp, "drive property not set");
88
@@ -XXX,XX +XXX,XX @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp)
89
return;
90
}
91
blkconf_blocksizes(&n->conf);
92
- blkconf_apply_backend_options(&n->conf, blk_is_read_only(n->conf.blk),
93
- false, &local_err);
94
- if (local_err) {
95
- error_propagate(errp, local_err);
96
+ if (!blkconf_apply_backend_options(&n->conf, blk_is_read_only(n->conf.blk),
97
+ false, errp)) {
98
return;
99
}
100
101
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
102
index XXXXXXX..XXXXXXX 100644
103
--- a/hw/block/virtio-blk.c
104
+++ b/hw/block/virtio-blk.c
105
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
106
}
107
108
blkconf_serial(&conf->conf, &conf->serial);
109
- blkconf_apply_backend_options(&conf->conf,
110
- blk_is_read_only(conf->conf.blk), true,
111
- &err);
112
- if (err) {
113
- error_propagate(errp, err);
114
+ if (!blkconf_apply_backend_options(&conf->conf,
115
+ blk_is_read_only(conf->conf.blk), true,
116
+ errp)) {
117
return;
118
}
119
s->original_wce = blk_enable_write_cache(conf->conf.blk);
120
- blkconf_geometry(&conf->conf, NULL, 65535, 255, 255, &err);
121
- if (err) {
122
- error_propagate(errp, err);
123
+ if (!blkconf_geometry(&conf->conf, NULL, 65535, 255, 255, errp)) {
124
return;
125
}
64
+
126
+
65
job_txn_unref(txn);
127
blkconf_blocksizes(&conf->conf);
66
}
128
67
129
virtio_init(vdev, "virtio-blk", VIRTIO_ID_BLOCK,
130
diff --git a/hw/ide/qdev.c b/hw/ide/qdev.c
131
index XXXXXXX..XXXXXXX 100644
132
--- a/hw/ide/qdev.c
133
+++ b/hw/ide/qdev.c
134
@@ -XXX,XX +XXX,XX @@ static void ide_dev_initfn(IDEDevice *dev, IDEDriveKind kind, Error **errp)
135
{
136
IDEBus *bus = DO_UPCAST(IDEBus, qbus, dev->qdev.parent_bus);
137
IDEState *s = bus->ifs + dev->unit;
138
- Error *err = NULL;
139
int ret;
140
141
if (!dev->conf.blk) {
142
@@ -XXX,XX +XXX,XX @@ static void ide_dev_initfn(IDEDevice *dev, IDEDriveKind kind, Error **errp)
143
144
blkconf_serial(&dev->conf, &dev->serial);
145
if (kind != IDE_CD) {
146
- blkconf_geometry(&dev->conf, &dev->chs_trans, 65535, 16, 255, &err);
147
- if (err) {
148
- error_propagate(errp, err);
149
+ if (!blkconf_geometry(&dev->conf, &dev->chs_trans, 65535, 16, 255,
150
+ errp)) {
151
return;
152
}
153
}
154
- blkconf_apply_backend_options(&dev->conf, kind == IDE_CD, kind != IDE_CD,
155
- &err);
156
- if (err) {
157
- error_propagate(errp, err);
158
+ if (!blkconf_apply_backend_options(&dev->conf, kind == IDE_CD,
159
+ kind != IDE_CD, errp)) {
160
return;
161
}
162
163
diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c
164
index XXXXXXX..XXXXXXX 100644
165
--- a/hw/scsi/scsi-disk.c
166
+++ b/hw/scsi/scsi-disk.c
167
@@ -XXX,XX +XXX,XX @@ static void scsi_disk_unit_attention_reported(SCSIDevice *dev)
168
static void scsi_realize(SCSIDevice *dev, Error **errp)
169
{
170
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
171
- Error *err = NULL;
172
173
if (!s->qdev.conf.blk) {
174
error_setg(errp, "drive property not set");
175
@@ -XXX,XX +XXX,XX @@ static void scsi_realize(SCSIDevice *dev, Error **errp)
176
}
177
178
if (dev->type == TYPE_DISK) {
179
- blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, &err);
180
- if (err) {
181
- error_propagate(errp, err);
182
+ if (!blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, errp)) {
183
return;
184
}
185
}
186
- blkconf_apply_backend_options(&dev->conf,
187
- blk_is_read_only(s->qdev.conf.blk),
188
- dev->type == TYPE_DISK, &err);
189
- if (err) {
190
- error_propagate(errp, err);
191
+ if (!blkconf_apply_backend_options(&dev->conf,
192
+ blk_is_read_only(s->qdev.conf.blk),
193
+ dev->type == TYPE_DISK, errp)) {
194
return;
195
}
196
197
diff --git a/hw/usb/dev-storage.c b/hw/usb/dev-storage.c
198
index XXXXXXX..XXXXXXX 100644
199
--- a/hw/usb/dev-storage.c
200
+++ b/hw/usb/dev-storage.c
201
@@ -XXX,XX +XXX,XX @@ static void usb_msd_realize_storage(USBDevice *dev, Error **errp)
202
MSDState *s = USB_STORAGE_DEV(dev);
203
BlockBackend *blk = s->conf.blk;
204
SCSIDevice *scsi_dev;
205
- Error *err = NULL;
206
207
if (!blk) {
208
error_setg(errp, "drive property not set");
209
@@ -XXX,XX +XXX,XX @@ static void usb_msd_realize_storage(USBDevice *dev, Error **errp)
210
211
blkconf_serial(&s->conf, &dev->serial);
212
blkconf_blocksizes(&s->conf);
213
- blkconf_apply_backend_options(&s->conf, blk_is_read_only(blk), true, &err);
214
- if (err) {
215
- error_propagate(errp, err);
216
+ if (!blkconf_apply_backend_options(&s->conf, blk_is_read_only(blk), true,
217
+ errp)) {
218
return;
219
}
220
221
@@ -XXX,XX +XXX,XX @@ static void usb_msd_realize_storage(USBDevice *dev, Error **errp)
222
&usb_msd_scsi_info_storage, NULL);
223
scsi_dev = scsi_bus_legacy_add_drive(&s->bus, blk, 0, !!s->removable,
224
s->conf.bootindex, dev->serial,
225
- &err);
226
+ errp);
227
blk_unref(blk);
228
if (!scsi_dev) {
229
- error_propagate(errp, err);
230
return;
231
}
232
usb_msd_handle_reset(dev);
68
--
233
--
69
2.17.1
234
2.14.3
70
235
71
236
diff view generated by jsdifflib
1
From: John Snow <jsnow@redhat.com>
1
From: Mao Zhongyi <maozy.fnst@cn.fujitsu.com>
2
2
3
Use the component callbacks; prepare, abort, and clean.
3
The function name of usb_msd_{realize,unrealize}_*,
4
usb_msd_class_initfn_* are unusual. Rename it to
5
usb_msd_*_{realize,unrealize}, usb_msd_class_*_initfn.
4
6
5
NB: prepare is only called when the job has not yet failed;
7
Cc: Gerd Hoffmann <kraxel@redhat.com>
6
and abort can be called after prepare.
7
8
8
complete -> prepare -> abort -> clean
9
Signed-off-by: Mao Zhongyi <maozy.fnst@cn.fujitsu.com>
9
complete -> abort -> clean
10
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
11
Message-id: 11e6003433abce35f3f4970e1acc71ee92dbcf51.1511317952.git.maozy.fnst@cn.fujitsu.com
12
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
13
---
14
hw/usb/dev-storage.c | 20 ++++++++++----------
15
1 file changed, 10 insertions(+), 10 deletions(-)
10
16
11
During refactor, a potential problem with bdrv_drop_intermediate
17
diff --git a/hw/usb/dev-storage.c b/hw/usb/dev-storage.c
12
was identified, the patched behavior is no worse than the pre-patch
13
behavior, so leave a FIXME for now to be fixed in a future patch.
14
15
Signed-off-by: John Snow <jsnow@redhat.com>
16
Reviewed-by: Max Reitz <mreitz@redhat.com>
17
Message-id: 20180906130225.5118-5-jsnow@redhat.com
18
Reviewed-by: Jeff Cody <jcody@redhat.com>
19
Signed-off-by: Max Reitz <mreitz@redhat.com>
20
---
21
block/commit.c | 92 ++++++++++++++++++++++++++++----------------------
22
1 file changed, 51 insertions(+), 41 deletions(-)
23
24
diff --git a/block/commit.c b/block/commit.c
25
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
26
--- a/block/commit.c
19
--- a/hw/usb/dev-storage.c
27
+++ b/block/commit.c
20
+++ b/hw/usb/dev-storage.c
28
@@ -XXX,XX +XXX,XX @@ typedef struct CommitBlockJob {
21
@@ -XXX,XX +XXX,XX @@ static void usb_msd_unrealize_storage(USBDevice *dev, Error **errp)
29
BlockDriverState *commit_top_bs;
22
object_unref(OBJECT(&s->bus));
30
BlockBackend *top;
31
BlockBackend *base;
32
+ BlockDriverState *base_bs;
33
BlockdevOnError on_error;
34
int base_flags;
35
char *backing_file_str;
36
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn commit_populate(BlockBackend *bs, BlockBackend *base,
37
return 0;
38
}
23
}
39
24
40
-static void commit_exit(Job *job)
25
-static void usb_msd_realize_storage(USBDevice *dev, Error **errp)
41
+static int commit_prepare(Job *job)
26
+static void usb_msd_storage_realize(USBDevice *dev, Error **errp)
42
{
27
{
43
CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
28
MSDState *s = USB_STORAGE_DEV(dev);
44
- BlockJob *bjob = &s->common;
29
BlockBackend *blk = s->conf.blk;
45
- BlockDriverState *top = blk_bs(s->top);
30
@@ -XXX,XX +XXX,XX @@ static void usb_msd_realize_storage(USBDevice *dev, Error **errp)
46
- BlockDriverState *base = blk_bs(s->base);
31
s->scsi_dev = scsi_dev;
47
- BlockDriverState *commit_top_bs = s->commit_top_bs;
48
- bool remove_commit_top_bs = false;
49
-
50
- /* Make sure commit_top_bs and top stay around until bdrv_replace_node() */
51
- bdrv_ref(top);
52
- bdrv_ref(commit_top_bs);
53
54
/* Remove base node parent that still uses BLK_PERM_WRITE/RESIZE before
55
* the normal backing chain can be restored. */
56
blk_unref(s->base);
57
+ s->base = NULL;
58
+
59
+ /* FIXME: bdrv_drop_intermediate treats total failures and partial failures
60
+ * identically. Further work is needed to disambiguate these cases. */
61
+ return bdrv_drop_intermediate(s->commit_top_bs, s->base_bs,
62
+ s->backing_file_str);
63
+}
64
65
- if (!job_is_cancelled(job) && job->ret == 0) {
66
- /* success */
67
- job->ret = bdrv_drop_intermediate(s->commit_top_bs, base,
68
- s->backing_file_str);
69
- } else {
70
- /* XXX Can (or should) we somehow keep 'consistent read' blocked even
71
- * after the failed/cancelled commit job is gone? If we already wrote
72
- * something to base, the intermediate images aren't valid any more. */
73
- remove_commit_top_bs = true;
74
+static void commit_abort(Job *job)
75
+{
76
+ CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
77
+ BlockDriverState *top_bs = blk_bs(s->top);
78
+
79
+ /* Make sure commit_top_bs and top stay around until bdrv_replace_node() */
80
+ bdrv_ref(top_bs);
81
+ bdrv_ref(s->commit_top_bs);
82
+
83
+ if (s->base) {
84
+ blk_unref(s->base);
85
}
86
87
+ /* free the blockers on the intermediate nodes so that bdrv_replace_nodes
88
+ * can succeed */
89
+ block_job_remove_all_bdrv(&s->common);
90
+
91
+ /* If bdrv_drop_intermediate() failed (or was not invoked), remove the
92
+ * commit filter driver from the backing chain now. Do this as the final
93
+ * step so that the 'consistent read' permission can be granted.
94
+ *
95
+ * XXX Can (or should) we somehow keep 'consistent read' blocked even
96
+ * after the failed/cancelled commit job is gone? If we already wrote
97
+ * something to base, the intermediate images aren't valid any more. */
98
+ bdrv_child_try_set_perm(s->commit_top_bs->backing, 0, BLK_PERM_ALL,
99
+ &error_abort);
100
+ bdrv_replace_node(s->commit_top_bs, backing_bs(s->commit_top_bs),
101
+ &error_abort);
102
+
103
+ bdrv_unref(s->commit_top_bs);
104
+ bdrv_unref(top_bs);
105
+}
106
+
107
+static void commit_clean(Job *job)
108
+{
109
+ CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
110
+
111
/* restore base open flags here if appropriate (e.g., change the base back
112
* to r/o). These reopens do not need to be atomic, since we won't abort
113
* even on failure here */
114
- if (s->base_flags != bdrv_get_flags(base)) {
115
- bdrv_reopen(base, s->base_flags, NULL);
116
+ if (s->base_flags != bdrv_get_flags(s->base_bs)) {
117
+ bdrv_reopen(s->base_bs, s->base_flags, NULL);
118
}
119
+
120
g_free(s->backing_file_str);
121
blk_unref(s->top);
122
-
123
- /* If there is more than one reference to the job (e.g. if called from
124
- * job_finish_sync()), job_completed() won't free it and therefore the
125
- * blockers on the intermediate nodes remain. This would cause
126
- * bdrv_set_backing_hd() to fail. */
127
- block_job_remove_all_bdrv(bjob);
128
-
129
- /* If bdrv_drop_intermediate() didn't already do that, remove the commit
130
- * filter driver from the backing chain. Do this as the final step so that
131
- * the 'consistent read' permission can be granted. */
132
- if (remove_commit_top_bs) {
133
- bdrv_child_try_set_perm(commit_top_bs->backing, 0, BLK_PERM_ALL,
134
- &error_abort);
135
- bdrv_replace_node(commit_top_bs, backing_bs(commit_top_bs),
136
- &error_abort);
137
- }
138
-
139
- bdrv_unref(commit_top_bs);
140
- bdrv_unref(top);
141
}
32
}
142
33
143
static int coroutine_fn commit_run(Job *job, Error **errp)
34
-static void usb_msd_unrealize_bot(USBDevice *dev, Error **errp)
144
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver commit_job_driver = {
35
+static void usb_msd_bot_unrealize(USBDevice *dev, Error **errp)
145
.user_resume = block_job_user_resume,
36
{
146
.drain = block_job_drain,
37
MSDState *s = USB_STORAGE_DEV(dev);
147
.run = commit_run,
38
148
- .exit = commit_exit,
39
object_unref(OBJECT(&s->bus));
149
+ .prepare = commit_prepare,
40
}
150
+ .abort = commit_abort,
41
151
+ .clean = commit_clean
42
-static void usb_msd_realize_bot(USBDevice *dev, Error **errp)
152
},
43
+static void usb_msd_bot_realize(USBDevice *dev, Error **errp)
44
{
45
MSDState *s = USB_STORAGE_DEV(dev);
46
DeviceState *d = DEVICE(dev);
47
@@ -XXX,XX +XXX,XX @@ static void usb_msd_class_initfn_common(ObjectClass *klass, void *data)
48
dc->vmsd = &vmstate_usb_msd;
49
}
50
51
-static void usb_msd_class_initfn_storage(ObjectClass *klass, void *data)
52
+static void usb_msd_class_storage_initfn(ObjectClass *klass, void *data)
53
{
54
DeviceClass *dc = DEVICE_CLASS(klass);
55
USBDeviceClass *uc = USB_DEVICE_CLASS(klass);
56
57
- uc->realize = usb_msd_realize_storage;
58
+ uc->realize = usb_msd_storage_realize;
59
uc->unrealize = usb_msd_unrealize_storage;
60
dc->props = msd_properties;
61
}
62
@@ -XXX,XX +XXX,XX @@ static void usb_msd_instance_init(Object *obj)
63
object_property_set_int(obj, -1, "bootindex", NULL);
64
}
65
66
-static void usb_msd_class_initfn_bot(ObjectClass *klass, void *data)
67
+static void usb_msd_class_bot_initfn(ObjectClass *klass, void *data)
68
{
69
USBDeviceClass *uc = USB_DEVICE_CLASS(klass);
70
71
- uc->realize = usb_msd_realize_bot;
72
- uc->unrealize = usb_msd_unrealize_bot;
73
+ uc->realize = usb_msd_bot_realize;
74
+ uc->unrealize = usb_msd_bot_unrealize;
75
uc->attached_settable = true;
76
}
77
78
static const TypeInfo msd_info = {
79
.name = "usb-storage",
80
.parent = TYPE_USB_STORAGE,
81
- .class_init = usb_msd_class_initfn_storage,
82
+ .class_init = usb_msd_class_storage_initfn,
83
.instance_init = usb_msd_instance_init,
153
};
84
};
154
85
155
@@ -XXX,XX +XXX,XX @@ void commit_start(const char *job_id, BlockDriverState *bs,
86
static const TypeInfo bot_info = {
156
if (ret < 0) {
87
.name = "usb-bot",
157
goto fail;
88
.parent = TYPE_USB_STORAGE,
158
}
89
- .class_init = usb_msd_class_initfn_bot,
159
+ s->base_bs = base;
90
+ .class_init = usb_msd_class_bot_initfn,
160
91
};
161
/* Required permissions are already taken with block_job_add_bdrv() */
92
162
s->top = blk_new(0, BLK_PERM_ALL);
93
static void usb_msd_register_types(void)
163
--
94
--
164
2.17.1
95
2.14.3
165
96
166
97
diff view generated by jsdifflib
1
From: Kevin Wolf <kwolf@redhat.com>
1
Commit 1351d1ec89eabebc9fdff20451a62c413d7accc1 ("qdev: drop iothread
2
property type") forgot to remove this include.
2
3
3
For the block job drain test, don't only test draining the source and
4
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
4
the target node, but create a backing chain for the source
5
Message-id: 20171205133954.31006-1-stefanha@redhat.com
5
(source_backing <- source <- source_overlay) and test draining each of
6
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
6
the nodes in it.
7
---
8
hw/core/qdev-properties-system.c | 1 -
9
1 file changed, 1 deletion(-)
7
10
8
When using iothreads, the source node (and therefore the job) is in a
11
diff --git a/hw/core/qdev-properties-system.c b/hw/core/qdev-properties-system.c
9
different AioContext than the drain, which happens from the main
10
thread. This way, the main thread waits in AIO_WAIT_WHILE() for the
11
iothread to make process and aio_wait_kick() is required to notify it.
12
The test validates that calling bdrv_wakeup() for a child or a parent
13
node will actually notify AIO_WAIT_WHILE() instead of letting it hang.
14
15
Increase the sleep time a bit (to 1 ms) because the test case is racy
16
and with the shorter sleep, it didn't reproduce the bug it is supposed
17
to test for me under 'rr record -n'.
18
19
This was because bdrv_drain_invoke_entry() (in the main thread) was only
20
called after the job had already reached the pause point, so we got a
21
bdrv_dec_in_flight() from the main thread and the additional
22
aio_wait_kick() when the job becomes idle (that we really wanted to test
23
here) wasn't even necessary any more to make progress.
24
25
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
26
Reviewed-by: Eric Blake <eblake@redhat.com>
27
Reviewed-by: Max Reitz <mreitz@redhat.com>
28
---
29
tests/test-bdrv-drain.c | 77 ++++++++++++++++++++++++++++++++++++-----
30
1 file changed, 69 insertions(+), 8 deletions(-)
31
32
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
33
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
34
--- a/tests/test-bdrv-drain.c
13
--- a/hw/core/qdev-properties-system.c
35
+++ b/tests/test-bdrv-drain.c
14
+++ b/hw/core/qdev-properties-system.c
36
@@ -XXX,XX +XXX,XX @@ typedef struct TestBlockJob {
15
@@ -XXX,XX +XXX,XX @@
37
BlockJob common;
16
#include "qapi/visitor.h"
38
int run_ret;
17
#include "chardev/char-fe.h"
39
int prepare_ret;
18
#include "sysemu/tpm_backend.h"
40
+ bool running;
19
-#include "sysemu/iothread.h"
41
bool should_complete;
20
42
} TestBlockJob;
21
static void get_pointer(Object *obj, Visitor *v, Property *prop,
43
22
char *(*print)(void *ptr),
44
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn test_job_run(Job *job, Error **errp)
45
{
46
TestBlockJob *s = container_of(job, TestBlockJob, common.job);
47
48
+ /* We are running the actual job code past the pause point in
49
+ * job_co_entry(). */
50
+ s->running = true;
51
+
52
job_transition_to_ready(&s->common.job);
53
while (!s->should_complete) {
54
/* Avoid job_sleep_ns() because it marks the job as !busy. We want to
55
* emulate some actual activity (probably some I/O) here so that drain
56
* has to wait for this activity to stop. */
57
- qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 100000);
58
+ qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 1000000);
59
+
60
job_pause_point(&s->common.job);
61
}
62
63
@@ -XXX,XX +XXX,XX @@ enum test_job_result {
64
TEST_JOB_FAIL_PREPARE,
65
};
66
67
-static void test_blockjob_common(enum drain_type drain_type, bool use_iothread,
68
- enum test_job_result result)
69
+enum test_job_drain_node {
70
+ TEST_JOB_DRAIN_SRC,
71
+ TEST_JOB_DRAIN_SRC_CHILD,
72
+ TEST_JOB_DRAIN_SRC_PARENT,
73
+};
74
+
75
+static void test_blockjob_common_drain_node(enum drain_type drain_type,
76
+ bool use_iothread,
77
+ enum test_job_result result,
78
+ enum test_job_drain_node drain_node)
79
{
80
BlockBackend *blk_src, *blk_target;
81
- BlockDriverState *src, *target;
82
+ BlockDriverState *src, *src_backing, *src_overlay, *target, *drain_bs;
83
BlockJob *job;
84
TestBlockJob *tjob;
85
IOThread *iothread = NULL;
86
@@ -XXX,XX +XXX,XX @@ static void test_blockjob_common(enum drain_type drain_type, bool use_iothread,
87
88
src = bdrv_new_open_driver(&bdrv_test, "source", BDRV_O_RDWR,
89
&error_abort);
90
+ src_backing = bdrv_new_open_driver(&bdrv_test, "source-backing",
91
+ BDRV_O_RDWR, &error_abort);
92
+ src_overlay = bdrv_new_open_driver(&bdrv_test, "source-overlay",
93
+ BDRV_O_RDWR, &error_abort);
94
+
95
+ bdrv_set_backing_hd(src_overlay, src, &error_abort);
96
+ bdrv_unref(src);
97
+ bdrv_set_backing_hd(src, src_backing, &error_abort);
98
+ bdrv_unref(src_backing);
99
+
100
blk_src = blk_new(BLK_PERM_ALL, BLK_PERM_ALL);
101
- blk_insert_bs(blk_src, src, &error_abort);
102
+ blk_insert_bs(blk_src, src_overlay, &error_abort);
103
+
104
+ switch (drain_node) {
105
+ case TEST_JOB_DRAIN_SRC:
106
+ drain_bs = src;
107
+ break;
108
+ case TEST_JOB_DRAIN_SRC_CHILD:
109
+ drain_bs = src_backing;
110
+ break;
111
+ case TEST_JOB_DRAIN_SRC_PARENT:
112
+ drain_bs = src_overlay;
113
+ break;
114
+ default:
115
+ g_assert_not_reached();
116
+ }
117
118
if (use_iothread) {
119
iothread = iothread_new();
120
@@ -XXX,XX +XXX,XX @@ static void test_blockjob_common(enum drain_type drain_type, bool use_iothread,
121
job_start(&job->job);
122
aio_context_release(ctx);
123
124
+ if (use_iothread) {
125
+ /* job_co_entry() is run in the I/O thread, wait for the actual job
126
+ * code to start (we don't want to catch the job in the pause point in
127
+ * job_co_entry(). */
128
+ while (!tjob->running) {
129
+ aio_poll(qemu_get_aio_context(), false);
130
+ }
131
+ }
132
+
133
g_assert_cmpint(job->job.pause_count, ==, 0);
134
g_assert_false(job->job.paused);
135
+ g_assert_true(tjob->running);
136
g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
137
138
- do_drain_begin_unlocked(drain_type, src);
139
+ do_drain_begin_unlocked(drain_type, drain_bs);
140
141
if (drain_type == BDRV_DRAIN_ALL) {
142
/* bdrv_drain_all() drains both src and target */
143
@@ -XXX,XX +XXX,XX @@ static void test_blockjob_common(enum drain_type drain_type, bool use_iothread,
144
g_assert_true(job->job.paused);
145
g_assert_false(job->job.busy); /* The job is paused */
146
147
- do_drain_end_unlocked(drain_type, src);
148
+ do_drain_end_unlocked(drain_type, drain_bs);
149
150
if (use_iothread) {
151
/* paused is reset in the I/O thread, wait for it */
152
@@ -XXX,XX +XXX,XX @@ static void test_blockjob_common(enum drain_type drain_type, bool use_iothread,
153
154
blk_unref(blk_src);
155
blk_unref(blk_target);
156
- bdrv_unref(src);
157
+ bdrv_unref(src_overlay);
158
bdrv_unref(target);
159
160
if (iothread) {
161
@@ -XXX,XX +XXX,XX @@ static void test_blockjob_common(enum drain_type drain_type, bool use_iothread,
162
}
163
}
164
165
+static void test_blockjob_common(enum drain_type drain_type, bool use_iothread,
166
+ enum test_job_result result)
167
+{
168
+ test_blockjob_common_drain_node(drain_type, use_iothread, result,
169
+ TEST_JOB_DRAIN_SRC);
170
+ test_blockjob_common_drain_node(drain_type, use_iothread, result,
171
+ TEST_JOB_DRAIN_SRC_CHILD);
172
+ if (drain_type == BDRV_SUBTREE_DRAIN) {
173
+ test_blockjob_common_drain_node(drain_type, use_iothread, result,
174
+ TEST_JOB_DRAIN_SRC_PARENT);
175
+ }
176
+}
177
+
178
static void test_blockjob_drain_all(void)
179
{
180
test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_SUCCESS);
181
--
23
--
182
2.17.1
24
2.14.3
183
25
184
26
diff view generated by jsdifflib
1
From: John Snow <jsnow@redhat.com>
1
bdrv_unref() requires the AioContext lock because bdrv_flush() uses
2
BDRV_POLL_WHILE(), which assumes the AioContext is currently held. If
3
BDRV_POLL_WHILE() runs without AioContext held the
4
pthread_mutex_unlock() call in aio_context_release() fails.
2
5
3
Signed-off-by: John Snow <jsnow@redhat.com>
6
This patch moves bdrv_unref() into the AioContext locked region to solve
4
Reviewed-by: Max Reitz <mreitz@redhat.com>
7
the following pthread_mutex_unlock() failure:
5
Message-id: 20180906130225.5118-14-jsnow@redhat.com
8
6
Reviewed-by: Jeff Cody <jcody@redhat.com>
9
#0 0x00007f566181969b in raise () at /lib64/libc.so.6
7
Signed-off-by: Max Reitz <mreitz@redhat.com>
10
#1 0x00007f566181b3b1 in abort () at /lib64/libc.so.6
11
#2 0x00005592cd590458 in error_exit (err=<optimized out>, msg=msg@entry=0x5592cdaf6d60 <__func__.23977> "qemu_mutex_unlock") at util/qemu-thread-posix.c:36
12
#3 0x00005592cd96e738 in qemu_mutex_unlock (mutex=mutex@entry=0x5592ce9505e0) at util/qemu-thread-posix.c:96
13
#4 0x00005592cd969b69 in aio_context_release (ctx=ctx@entry=0x5592ce950580) at util/async.c:507
14
#5 0x00005592cd8ead78 in bdrv_flush (bs=bs@entry=0x5592cfa87210) at block/io.c:2478
15
#6 0x00005592cd89df30 in bdrv_close (bs=0x5592cfa87210) at block.c:3207
16
#7 0x00005592cd89df30 in bdrv_delete (bs=0x5592cfa87210) at block.c:3395
17
#8 0x00005592cd89df30 in bdrv_unref (bs=0x5592cfa87210) at block.c:4418
18
#9 0x00005592cd6b7f86 in qmp_transaction (dev_list=<optimized out>, has_props=<optimized out>, props=<optimized out>, errp=errp@entry=0x7ffe4a1fc9d8) at blockdev.c:2308
19
20
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
21
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
22
Reviewed-by: Eric Blake <eblake@redhat.com>
23
Message-id: 20171206144550.22295-2-stefanha@redhat.com
24
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
8
---
25
---
9
qapi/block-core.json | 30 ++++++++++++++++++++++++++++--
26
blockdev.c | 2 +-
10
blockdev.c | 14 ++++++++++++++
27
1 file changed, 1 insertion(+), 1 deletion(-)
11
2 files changed, 42 insertions(+), 2 deletions(-)
12
28
13
diff --git a/qapi/block-core.json b/qapi/block-core.json
14
index XXXXXXX..XXXXXXX 100644
15
--- a/qapi/block-core.json
16
+++ b/qapi/block-core.json
17
@@ -XXX,XX +XXX,XX @@
18
# @copy-mode: when to copy data to the destination; defaults to 'background'
19
# (Since: 3.0)
20
#
21
+# @auto-finalize: When false, this job will wait in a PENDING state after it has
22
+# finished its work, waiting for @block-job-finalize before
23
+# making any block graph changes.
24
+# When true, this job will automatically
25
+# perform its abort or commit actions.
26
+# Defaults to true. (Since 3.1)
27
+#
28
+# @auto-dismiss: When false, this job will wait in a CONCLUDED state after it
29
+# has completely ceased all work, and awaits @block-job-dismiss.
30
+# When true, this job will automatically disappear from the query
31
+# list without user intervention.
32
+# Defaults to true. (Since 3.1)
33
# Since: 1.3
34
##
35
{ 'struct': 'DriveMirror',
36
@@ -XXX,XX +XXX,XX @@
37
'*speed': 'int', '*granularity': 'uint32',
38
'*buf-size': 'int', '*on-source-error': 'BlockdevOnError',
39
'*on-target-error': 'BlockdevOnError',
40
- '*unmap': 'bool', '*copy-mode': 'MirrorCopyMode' } }
41
+ '*unmap': 'bool', '*copy-mode': 'MirrorCopyMode',
42
+ '*auto-finalize': 'bool', '*auto-dismiss': 'bool' } }
43
44
##
45
# @BlockDirtyBitmap:
46
@@ -XXX,XX +XXX,XX @@
47
# @copy-mode: when to copy data to the destination; defaults to 'background'
48
# (Since: 3.0)
49
#
50
+# @auto-finalize: When false, this job will wait in a PENDING state after it has
51
+# finished its work, waiting for @block-job-finalize before
52
+# making any block graph changes.
53
+# When true, this job will automatically
54
+# perform its abort or commit actions.
55
+# Defaults to true. (Since 3.1)
56
+#
57
+# @auto-dismiss: When false, this job will wait in a CONCLUDED state after it
58
+# has completely ceased all work, and awaits @block-job-dismiss.
59
+# When true, this job will automatically disappear from the query
60
+# list without user intervention.
61
+# Defaults to true. (Since 3.1)
62
# Returns: nothing on success.
63
#
64
# Since: 2.6
65
@@ -XXX,XX +XXX,XX @@
66
'*buf-size': 'int', '*on-source-error': 'BlockdevOnError',
67
'*on-target-error': 'BlockdevOnError',
68
'*filter-node-name': 'str',
69
- '*copy-mode': 'MirrorCopyMode' } }
70
+ '*copy-mode': 'MirrorCopyMode',
71
+ '*auto-finalize': 'bool', '*auto-dismiss': 'bool' } }
72
73
##
74
# @block_set_io_throttle:
75
diff --git a/blockdev.c b/blockdev.c
29
diff --git a/blockdev.c b/blockdev.c
76
index XXXXXXX..XXXXXXX 100644
30
index XXXXXXX..XXXXXXX 100644
77
--- a/blockdev.c
31
--- a/blockdev.c
78
+++ b/blockdev.c
32
+++ b/blockdev.c
79
@@ -XXX,XX +XXX,XX @@ static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs,
33
@@ -XXX,XX +XXX,XX @@ static void external_snapshot_clean(BlkActionState *common)
80
bool has_filter_node_name,
34
DO_UPCAST(ExternalSnapshotState, common, common);
81
const char *filter_node_name,
35
if (state->aio_context) {
82
bool has_copy_mode, MirrorCopyMode copy_mode,
36
bdrv_drained_end(state->old_bs);
83
+ bool has_auto_finalize, bool auto_finalize,
37
- aio_context_release(state->aio_context);
84
+ bool has_auto_dismiss, bool auto_dismiss,
38
bdrv_unref(state->new_bs);
85
Error **errp)
39
+ aio_context_release(state->aio_context);
86
{
87
int job_flags = JOB_DEFAULT;
88
@@ -XXX,XX +XXX,XX @@ static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs,
89
if (!has_copy_mode) {
90
copy_mode = MIRROR_COPY_MODE_BACKGROUND;
91
}
40
}
92
+ if (has_auto_finalize && !auto_finalize) {
41
}
93
+ job_flags |= JOB_MANUAL_FINALIZE;
94
+ }
95
+ if (has_auto_dismiss && !auto_dismiss) {
96
+ job_flags |= JOB_MANUAL_DISMISS;
97
+ }
98
99
if (granularity != 0 && (granularity < 512 || granularity > 1048576 * 64)) {
100
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "granularity",
101
@@ -XXX,XX +XXX,XX @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp)
102
arg->has_unmap, arg->unmap,
103
false, NULL,
104
arg->has_copy_mode, arg->copy_mode,
105
+ arg->has_auto_finalize, arg->auto_finalize,
106
+ arg->has_auto_dismiss, arg->auto_dismiss,
107
&local_err);
108
bdrv_unref(target_bs);
109
error_propagate(errp, local_err);
110
@@ -XXX,XX +XXX,XX @@ void qmp_blockdev_mirror(bool has_job_id, const char *job_id,
111
bool has_filter_node_name,
112
const char *filter_node_name,
113
bool has_copy_mode, MirrorCopyMode copy_mode,
114
+ bool has_auto_finalize, bool auto_finalize,
115
+ bool has_auto_dismiss, bool auto_dismiss,
116
Error **errp)
117
{
118
BlockDriverState *bs;
119
@@ -XXX,XX +XXX,XX @@ void qmp_blockdev_mirror(bool has_job_id, const char *job_id,
120
true, true,
121
has_filter_node_name, filter_node_name,
122
has_copy_mode, copy_mode,
123
+ has_auto_finalize, auto_finalize,
124
+ has_auto_dismiss, auto_dismiss,
125
&local_err);
126
error_propagate(errp, local_err);
127
42
128
--
43
--
129
2.17.1
44
2.14.3
130
45
131
46
diff view generated by jsdifflib
1
From: John Snow <jsnow@redhat.com>
1
It is not necessary to hold AioContext across transactions anymore since
2
2
bdrv_drained_begin/end() is used to keep the nodes quiesced. In fact,
3
Add support for taking and passing forward job creation flags.
3
using the AioContext lock for this purpose was always buggy.
4
4
5
Signed-off-by: John Snow <jsnow@redhat.com>
5
This patch reduces the scope of AioContext locked regions. This is not
6
Reviewed-by: Max Reitz <mreitz@redhat.com>
6
just a cleanup but also fixes hangs that occur in BDRV_POLL_WHILE()
7
Reviewed-by: Jeff Cody <jcody@redhat.com>
7
because it is unware of recursive locking and does not release the
8
Message-id: 20180906130225.5118-2-jsnow@redhat.com
8
AioContext the necessary number of times to allow progress to be made.
9
Signed-off-by: Max Reitz <mreitz@redhat.com>
9
10
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
11
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
12
Reviewed-by: Eric Blake <eblake@redhat.com>
13
Message-id: 20171206144550.22295-3-stefanha@redhat.com
14
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
10
---
15
---
11
include/block/block_int.h | 5 ++++-
16
blockdev.c | 71 ++++++++++++++++++++++++++++++++++++++++++--------------------
12
block/commit.c | 5 +++--
17
1 file changed, 48 insertions(+), 23 deletions(-)
13
blockdev.c | 7 ++++---
18
14
3 files changed, 11 insertions(+), 6 deletions(-)
15
16
diff --git a/include/block/block_int.h b/include/block/block_int.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/include/block/block_int.h
19
+++ b/include/block/block_int.h
20
@@ -XXX,XX +XXX,XX @@ void stream_start(const char *job_id, BlockDriverState *bs,
21
* @bs: Active block device.
22
* @top: Top block device to be committed.
23
* @base: Block device that will be written into, and become the new top.
24
+ * @creation_flags: Flags that control the behavior of the Job lifetime.
25
+ * See @BlockJobCreateFlags
26
* @speed: The maximum speed, in bytes per second, or 0 for unlimited.
27
* @on_error: The action to take upon error.
28
* @backing_file_str: String to use as the backing file in @top's overlay
29
@@ -XXX,XX +XXX,XX @@ void stream_start(const char *job_id, BlockDriverState *bs,
30
*
31
*/
32
void commit_start(const char *job_id, BlockDriverState *bs,
33
- BlockDriverState *base, BlockDriverState *top, int64_t speed,
34
+ BlockDriverState *base, BlockDriverState *top,
35
+ int creation_flags, int64_t speed,
36
BlockdevOnError on_error, const char *backing_file_str,
37
const char *filter_node_name, Error **errp);
38
/**
39
diff --git a/block/commit.c b/block/commit.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/block/commit.c
42
+++ b/block/commit.c
43
@@ -XXX,XX +XXX,XX @@ static BlockDriver bdrv_commit_top = {
44
};
45
46
void commit_start(const char *job_id, BlockDriverState *bs,
47
- BlockDriverState *base, BlockDriverState *top, int64_t speed,
48
+ BlockDriverState *base, BlockDriverState *top,
49
+ int creation_flags, int64_t speed,
50
BlockdevOnError on_error, const char *backing_file_str,
51
const char *filter_node_name, Error **errp)
52
{
53
@@ -XXX,XX +XXX,XX @@ void commit_start(const char *job_id, BlockDriverState *bs,
54
}
55
56
s = block_job_create(job_id, &commit_job_driver, NULL, bs, 0, BLK_PERM_ALL,
57
- speed, JOB_DEFAULT, NULL, NULL, errp);
58
+ speed, creation_flags, NULL, NULL, errp);
59
if (!s) {
60
return;
61
}
62
diff --git a/blockdev.c b/blockdev.c
19
diff --git a/blockdev.c b/blockdev.c
63
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
64
--- a/blockdev.c
21
--- a/blockdev.c
65
+++ b/blockdev.c
22
+++ b/blockdev.c
66
@@ -XXX,XX +XXX,XX @@ void qmp_block_commit(bool has_job_id, const char *job_id, const char *device,
23
@@ -XXX,XX +XXX,XX @@ typedef struct ExternalSnapshotState {
67
* BlockdevOnError change for blkmirror makes it in
24
BlkActionState common;
68
*/
25
BlockDriverState *old_bs;
69
BlockdevOnError on_error = BLOCKDEV_ON_ERROR_REPORT;
26
BlockDriverState *new_bs;
70
+ int job_flags = JOB_DEFAULT;
27
- AioContext *aio_context;
71
28
bool overlay_appended;
72
if (!has_speed) {
29
} ExternalSnapshotState;
73
speed = 0;
30
74
@@ -XXX,XX +XXX,XX @@ void qmp_block_commit(bool has_job_id, const char *job_id, const char *device,
31
@@ -XXX,XX +XXX,XX @@ static void external_snapshot_prepare(BlkActionState *common,
75
goto out;
32
ExternalSnapshotState *state =
76
}
33
DO_UPCAST(ExternalSnapshotState, common, common);
77
commit_active_start(has_job_id ? job_id : NULL, bs, base_bs,
34
TransactionAction *action = common->action;
78
- JOB_DEFAULT, speed, on_error,
35
+ AioContext *aio_context;
79
+ job_flags, speed, on_error,
36
80
filter_node_name, NULL, NULL, false, &local_err);
37
/* 'blockdev-snapshot' and 'blockdev-snapshot-sync' have similar
81
} else {
38
* purpose but a different set of parameters */
82
BlockDriverState *overlay_bs = bdrv_find_overlay(bs, top_bs);
39
@@ -XXX,XX +XXX,XX @@ static void external_snapshot_prepare(BlkActionState *common,
83
if (bdrv_op_is_blocked(overlay_bs, BLOCK_OP_TYPE_COMMIT_TARGET, errp)) {
40
return;
84
goto out;
41
}
85
}
42
86
- commit_start(has_job_id ? job_id : NULL, bs, base_bs, top_bs, speed,
43
- /* Acquire AioContext now so any threads operating on old_bs stop */
87
- on_error, has_backing_file ? backing_file : NULL,
44
- state->aio_context = bdrv_get_aio_context(state->old_bs);
88
+ commit_start(has_job_id ? job_id : NULL, bs, base_bs, top_bs, job_flags,
45
- aio_context_acquire(state->aio_context);
89
+ speed, on_error, has_backing_file ? backing_file : NULL,
46
+ aio_context = bdrv_get_aio_context(state->old_bs);
90
filter_node_name, &local_err);
47
+ aio_context_acquire(aio_context);
91
}
48
+
92
if (local_err != NULL) {
49
+ /* Paired with .clean() */
50
bdrv_drained_begin(state->old_bs);
51
52
if (!bdrv_is_inserted(state->old_bs)) {
53
error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, device);
54
- return;
55
+ goto out;
56
}
57
58
if (bdrv_op_is_blocked(state->old_bs,
59
BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT, errp)) {
60
- return;
61
+ goto out;
62
}
63
64
if (!bdrv_is_read_only(state->old_bs)) {
65
if (bdrv_flush(state->old_bs)) {
66
error_setg(errp, QERR_IO_ERROR);
67
- return;
68
+ goto out;
69
}
70
}
71
72
if (!bdrv_is_first_non_filter(state->old_bs)) {
73
error_setg(errp, QERR_FEATURE_DISABLED, "snapshot");
74
- return;
75
+ goto out;
76
}
77
78
if (action->type == TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC) {
79
@@ -XXX,XX +XXX,XX @@ static void external_snapshot_prepare(BlkActionState *common,
80
81
if (node_name && !snapshot_node_name) {
82
error_setg(errp, "New snapshot node name missing");
83
- return;
84
+ goto out;
85
}
86
87
if (snapshot_node_name &&
88
bdrv_lookup_bs(snapshot_node_name, snapshot_node_name, NULL)) {
89
error_setg(errp, "New snapshot node name already in use");
90
- return;
91
+ goto out;
92
}
93
94
flags = state->old_bs->open_flags;
95
@@ -XXX,XX +XXX,XX @@ static void external_snapshot_prepare(BlkActionState *common,
96
int64_t size = bdrv_getlength(state->old_bs);
97
if (size < 0) {
98
error_setg_errno(errp, -size, "bdrv_getlength failed");
99
- return;
100
+ goto out;
101
}
102
bdrv_img_create(new_image_file, format,
103
state->old_bs->filename,
104
@@ -XXX,XX +XXX,XX @@ static void external_snapshot_prepare(BlkActionState *common,
105
NULL, size, flags, false, &local_err);
106
if (local_err) {
107
error_propagate(errp, local_err);
108
- return;
109
+ goto out;
110
}
111
}
112
113
@@ -XXX,XX +XXX,XX @@ static void external_snapshot_prepare(BlkActionState *common,
114
errp);
115
/* We will manually add the backing_hd field to the bs later */
116
if (!state->new_bs) {
117
- return;
118
+ goto out;
119
}
120
121
if (bdrv_has_blk(state->new_bs)) {
122
error_setg(errp, "The snapshot is already in use");
123
- return;
124
+ goto out;
125
}
126
127
if (bdrv_op_is_blocked(state->new_bs, BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT,
128
errp)) {
129
- return;
130
+ goto out;
131
}
132
133
if (state->new_bs->backing != NULL) {
134
error_setg(errp, "The snapshot already has a backing image");
135
- return;
136
+ goto out;
137
}
138
139
if (!state->new_bs->drv->supports_backing) {
140
error_setg(errp, "The snapshot does not support backing images");
141
- return;
142
+ goto out;
143
}
144
145
- bdrv_set_aio_context(state->new_bs, state->aio_context);
146
+ bdrv_set_aio_context(state->new_bs, aio_context);
147
148
/* This removes our old bs and adds the new bs. This is an operation that
149
* can fail, so we need to do it in .prepare; undoing it for abort is
150
@@ -XXX,XX +XXX,XX @@ static void external_snapshot_prepare(BlkActionState *common,
151
bdrv_append(state->new_bs, state->old_bs, &local_err);
152
if (local_err) {
153
error_propagate(errp, local_err);
154
- return;
155
+ goto out;
156
}
157
state->overlay_appended = true;
158
+
159
+out:
160
+ aio_context_release(aio_context);
161
}
162
163
static void external_snapshot_commit(BlkActionState *common)
164
{
165
ExternalSnapshotState *state =
166
DO_UPCAST(ExternalSnapshotState, common, common);
167
+ AioContext *aio_context;
168
+
169
+ aio_context = bdrv_get_aio_context(state->old_bs);
170
+ aio_context_acquire(aio_context);
171
172
/* We don't need (or want) to use the transactional
173
* bdrv_reopen_multiple() across all the entries at once, because we
174
@@ -XXX,XX +XXX,XX @@ static void external_snapshot_commit(BlkActionState *common)
175
bdrv_reopen(state->old_bs, state->old_bs->open_flags & ~BDRV_O_RDWR,
176
NULL);
177
}
178
+
179
+ aio_context_release(aio_context);
180
}
181
182
static void external_snapshot_abort(BlkActionState *common)
183
@@ -XXX,XX +XXX,XX @@ static void external_snapshot_abort(BlkActionState *common)
184
DO_UPCAST(ExternalSnapshotState, common, common);
185
if (state->new_bs) {
186
if (state->overlay_appended) {
187
+ AioContext *aio_context;
188
+
189
+ aio_context = bdrv_get_aio_context(state->old_bs);
190
+ aio_context_acquire(aio_context);
191
+
192
bdrv_ref(state->old_bs); /* we can't let bdrv_set_backind_hd()
193
close state->old_bs; we need it */
194
bdrv_set_backing_hd(state->new_bs, NULL, &error_abort);
195
bdrv_replace_node(state->new_bs, state->old_bs, &error_abort);
196
bdrv_unref(state->old_bs); /* bdrv_replace_node() ref'ed old_bs */
197
+
198
+ aio_context_release(aio_context);
199
}
200
}
201
}
202
@@ -XXX,XX +XXX,XX @@ static void external_snapshot_clean(BlkActionState *common)
203
{
204
ExternalSnapshotState *state =
205
DO_UPCAST(ExternalSnapshotState, common, common);
206
- if (state->aio_context) {
207
- bdrv_drained_end(state->old_bs);
208
- bdrv_unref(state->new_bs);
209
- aio_context_release(state->aio_context);
210
+ AioContext *aio_context;
211
+
212
+ if (!state->old_bs) {
213
+ return;
214
}
215
+
216
+ aio_context = bdrv_get_aio_context(state->old_bs);
217
+ aio_context_acquire(aio_context);
218
+
219
+ bdrv_drained_end(state->old_bs);
220
+ bdrv_unref(state->new_bs);
221
+
222
+ aio_context_release(aio_context);
223
}
224
225
typedef struct DriveBackupState {
93
--
226
--
94
2.17.1
227
2.14.3
95
228
96
229
diff view generated by jsdifflib
Deleted patch
1
From: John Snow <jsnow@redhat.com>
2
1
3
Add support for taking and passing forward job creation flags.
4
5
Signed-off-by: John Snow <jsnow@redhat.com>
6
Reviewed-by: Max Reitz <mreitz@redhat.com>
7
Reviewed-by: Jeff Cody <jcody@redhat.com>
8
Message-id: 20180906130225.5118-3-jsnow@redhat.com
9
Signed-off-by: Max Reitz <mreitz@redhat.com>
10
---
11
include/block/block_int.h | 5 ++++-
12
block/mirror.c | 5 +++--
13
blockdev.c | 3 ++-
14
3 files changed, 9 insertions(+), 4 deletions(-)
15
16
diff --git a/include/block/block_int.h b/include/block/block_int.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/include/block/block_int.h
19
+++ b/include/block/block_int.h
20
@@ -XXX,XX +XXX,XX @@ void commit_active_start(const char *job_id, BlockDriverState *bs,
21
* @target: Block device to write to.
22
* @replaces: Block graph node name to replace once the mirror is done. Can
23
* only be used when full mirroring is selected.
24
+ * @creation_flags: Flags that control the behavior of the Job lifetime.
25
+ * See @BlockJobCreateFlags
26
* @speed: The maximum speed, in bytes per second, or 0 for unlimited.
27
* @granularity: The chosen granularity for the dirty bitmap.
28
* @buf_size: The amount of data that can be in flight at one time.
29
@@ -XXX,XX +XXX,XX @@ void commit_active_start(const char *job_id, BlockDriverState *bs,
30
*/
31
void mirror_start(const char *job_id, BlockDriverState *bs,
32
BlockDriverState *target, const char *replaces,
33
- int64_t speed, uint32_t granularity, int64_t buf_size,
34
+ int creation_flags, int64_t speed,
35
+ uint32_t granularity, int64_t buf_size,
36
MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
37
BlockdevOnError on_source_error,
38
BlockdevOnError on_target_error,
39
diff --git a/block/mirror.c b/block/mirror.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/block/mirror.c
42
+++ b/block/mirror.c
43
@@ -XXX,XX +XXX,XX @@ fail:
44
45
void mirror_start(const char *job_id, BlockDriverState *bs,
46
BlockDriverState *target, const char *replaces,
47
- int64_t speed, uint32_t granularity, int64_t buf_size,
48
+ int creation_flags, int64_t speed,
49
+ uint32_t granularity, int64_t buf_size,
50
MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
51
BlockdevOnError on_source_error,
52
BlockdevOnError on_target_error,
53
@@ -XXX,XX +XXX,XX @@ void mirror_start(const char *job_id, BlockDriverState *bs,
54
}
55
is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
56
base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL;
57
- mirror_start_job(job_id, bs, JOB_DEFAULT, target, replaces,
58
+ mirror_start_job(job_id, bs, creation_flags, target, replaces,
59
speed, granularity, buf_size, backing_mode,
60
on_source_error, on_target_error, unmap, NULL, NULL,
61
&mirror_job_driver, is_none_mode, base, false,
62
diff --git a/blockdev.c b/blockdev.c
63
index XXXXXXX..XXXXXXX 100644
64
--- a/blockdev.c
65
+++ b/blockdev.c
66
@@ -XXX,XX +XXX,XX @@ static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs,
67
bool has_copy_mode, MirrorCopyMode copy_mode,
68
Error **errp)
69
{
70
+ int job_flags = JOB_DEFAULT;
71
72
if (!has_speed) {
73
speed = 0;
74
@@ -XXX,XX +XXX,XX @@ static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs,
75
* and will allow to check whether the node still exist at mirror completion
76
*/
77
mirror_start(job_id, bs, target,
78
- has_replaces ? replaces : NULL,
79
+ has_replaces ? replaces : NULL, job_flags,
80
speed, granularity, buf_size, sync, backing_mode,
81
on_source_error, on_target_error, unmap, filter_node_name,
82
copy_mode, errp);
83
--
84
2.17.1
85
86
diff view generated by jsdifflib
1
From: Kevin Wolf <kwolf@redhat.com>
1
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
3
Reviewed-by: Eric Blake <eblake@redhat.com>
4
Message-id: 20171206144550.22295-4-stefanha@redhat.com
5
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
6
---
7
blockdev.c | 42 ++++++++++++++++++++++++++++++++++--------
8
1 file changed, 34 insertions(+), 8 deletions(-)
2
9
3
Block jobs claim in .drained_poll() that they are in a quiescent state
10
diff --git a/blockdev.c b/blockdev.c
4
as soon as job->deferred_to_main_loop is true. This is obviously wrong,
5
they still have a completion BH to run. We only get away with this
6
because commit 91af091f923 added an unconditional aio_poll(false) to the
7
drain functions, but this is bypassing the regular drain mechanisms.
8
9
However, just removing this and telling that the job is still active
10
doesn't work either: The completion callbacks themselves call drain
11
functions (directly, or indirectly with bdrv_reopen), so they would
12
deadlock then.
13
14
As a better lie, tell that the job is active as long as the BH is
15
pending, but falsely call it quiescent from the point in the BH when the
16
completion callback is called. At this point, nested drain calls won't
17
deadlock because they ignore the job, and outer drains will wait for the
18
job to really reach a quiescent state because the callback is already
19
running.
20
21
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
22
Reviewed-by: Max Reitz <mreitz@redhat.com>
23
---
24
include/qemu/job.h | 3 +++
25
blockjob.c | 2 +-
26
job.c | 11 ++++++++++-
27
3 files changed, 14 insertions(+), 2 deletions(-)
28
29
diff --git a/include/qemu/job.h b/include/qemu/job.h
30
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
31
--- a/include/qemu/job.h
12
--- a/blockdev.c
32
+++ b/include/qemu/job.h
13
+++ b/blockdev.c
33
@@ -XXX,XX +XXX,XX @@ typedef struct Job {
14
@@ -XXX,XX +XXX,XX @@ static void external_snapshot_clean(BlkActionState *common)
34
* Set to false by the job while the coroutine has yielded and may be
15
typedef struct DriveBackupState {
35
* re-entered by job_enter(). There may still be I/O or event loop activity
16
BlkActionState common;
36
* pending. Accessed under block_job_mutex (in blockjob.c).
17
BlockDriverState *bs;
37
+ *
18
- AioContext *aio_context;
38
+ * When the job is deferred to the main loop, busy is true as long as the
19
BlockJob *job;
39
+ * bottom half is still pending.
20
} DriveBackupState;
40
*/
21
41
bool busy;
22
@@ -XXX,XX +XXX,XX @@ static void drive_backup_prepare(BlkActionState *common, Error **errp)
42
23
DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common);
43
diff --git a/blockjob.c b/blockjob.c
24
BlockDriverState *bs;
44
index XXXXXXX..XXXXXXX 100644
25
DriveBackup *backup;
45
--- a/blockjob.c
26
+ AioContext *aio_context;
46
+++ b/blockjob.c
27
Error *local_err = NULL;
47
@@ -XXX,XX +XXX,XX @@ static bool child_job_drained_poll(BdrvChild *c)
28
48
/* An inactive or completed job doesn't have any pending requests. Jobs
29
assert(common->action->type == TRANSACTION_ACTION_KIND_DRIVE_BACKUP);
49
* with !job->busy are either already paused or have a pause point after
30
@@ -XXX,XX +XXX,XX @@ static void drive_backup_prepare(BlkActionState *common, Error **errp)
50
* being reentered, so no job driver code will run before they pause. */
31
return;
51
- if (!job->busy || job_is_completed(job) || job->deferred_to_main_loop) {
52
+ if (!job->busy || job_is_completed(job)) {
53
return false;
54
}
32
}
55
33
56
diff --git a/job.c b/job.c
34
- /* AioContext is released in .clean() */
57
index XXXXXXX..XXXXXXX 100644
35
- state->aio_context = bdrv_get_aio_context(bs);
58
--- a/job.c
36
- aio_context_acquire(state->aio_context);
59
+++ b/job.c
37
+ aio_context = bdrv_get_aio_context(bs);
60
@@ -XXX,XX +XXX,XX @@ static void job_exit(void *opaque)
38
+ aio_context_acquire(aio_context);
61
AioContext *ctx = job->aio_context;
62
63
aio_context_acquire(ctx);
64
+
39
+
65
+ /* This is a lie, we're not quiescent, but still doing the completion
40
+ /* Paired with .clean() */
66
+ * callbacks. However, completion callbacks tend to involve operations that
41
bdrv_drained_begin(bs);
67
+ * drain block nodes, and if .drained_poll still returned true, we would
68
+ * deadlock. */
69
+ job->busy = false;
70
+ job_event_idle(job);
71
+
42
+
72
job_completed(job);
43
state->bs = bs;
44
45
state->job = do_drive_backup(backup, common->block_job_txn, &local_err);
46
if (local_err) {
47
error_propagate(errp, local_err);
48
- return;
49
+ goto out;
50
}
73
+
51
+
74
aio_context_release(ctx);
52
+out:
53
+ aio_context_release(aio_context);
75
}
54
}
76
55
77
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn job_co_entry(void *opaque)
56
static void drive_backup_commit(BlkActionState *common)
78
assert(job && job->driver && job->driver->run);
57
{
79
job_pause_point(job);
58
DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common);
80
job->ret = job->driver->run(job, &job->err);
59
+ AioContext *aio_context;
81
- job_event_idle(job);
60
+
82
job->deferred_to_main_loop = true;
61
+ aio_context = bdrv_get_aio_context(state->bs);
83
+ job->busy = true;
62
+ aio_context_acquire(aio_context);
84
aio_bh_schedule_oneshot(qemu_get_aio_context(), job_exit, job);
63
+
64
assert(state->job);
65
block_job_start(state->job);
66
+
67
+ aio_context_release(aio_context);
85
}
68
}
86
69
70
static void drive_backup_abort(BlkActionState *common)
71
@@ -XXX,XX +XXX,XX @@ static void drive_backup_abort(BlkActionState *common)
72
DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common);
73
74
if (state->job) {
75
+ AioContext *aio_context;
76
+
77
+ aio_context = bdrv_get_aio_context(state->bs);
78
+ aio_context_acquire(aio_context);
79
+
80
block_job_cancel_sync(state->job);
81
+
82
+ aio_context_release(aio_context);
83
}
84
}
85
86
static void drive_backup_clean(BlkActionState *common)
87
{
88
DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common);
89
+ AioContext *aio_context;
90
91
- if (state->aio_context) {
92
- bdrv_drained_end(state->bs);
93
- aio_context_release(state->aio_context);
94
+ if (!state->bs) {
95
+ return;
96
}
97
+
98
+ aio_context = bdrv_get_aio_context(state->bs);
99
+ aio_context_acquire(aio_context);
100
+
101
+ bdrv_drained_end(state->bs);
102
+
103
+ aio_context_release(aio_context);
104
}
105
106
typedef struct BlockdevBackupState {
87
--
107
--
88
2.17.1
108
2.14.3
89
109
90
110
diff view generated by jsdifflib
1
From: Kevin Wolf <kwolf@redhat.com>
1
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
3
Reviewed-by: Eric Blake <eblake@redhat.com>
4
Message-id: 20171206144550.22295-5-stefanha@redhat.com
5
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
6
---
7
blockdev.c | 44 ++++++++++++++++++++++++++++++++++----------
8
1 file changed, 34 insertions(+), 10 deletions(-)
2
9
3
When draining a block node, we recurse to its parent and for subtree
10
diff --git a/blockdev.c b/blockdev.c
4
drains also to its children. A single AIO_WAIT_WHILE() is then used to
5
wait for bdrv_drain_poll() to become true, which depends on all of the
6
nodes we recursed to. However, if the respective child or parent becomes
7
quiescent and calls bdrv_wakeup(), only the AioWait of the child/parent
8
is checked, while AIO_WAIT_WHILE() depends on the AioWait of the
9
original node.
10
11
Fix this by using a single AioWait for all callers of AIO_WAIT_WHILE().
12
13
This may mean that the draining thread gets a few more unnecessary
14
wakeups because an unrelated operation got completed, but we already
15
wake it up when something _could_ have changed rather than only if it
16
has certainly changed.
17
18
Apart from that, drain is a slow path anyway. In theory it would be
19
possible to use wakeups more selectively and still correctly, but the
20
gains are likely not worth the additional complexity. In fact, this
21
patch is a nice simplification for some places in the code.
22
23
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
24
Reviewed-by: Eric Blake <eblake@redhat.com>
25
Reviewed-by: Max Reitz <mreitz@redhat.com>
26
---
27
include/block/aio-wait.h | 22 +++++++++++-----------
28
include/block/block.h | 6 +-----
29
include/block/block_int.h | 3 ---
30
include/block/blockjob.h | 10 ----------
31
block.c | 5 -----
32
block/block-backend.c | 11 ++++-------
33
block/io.c | 7 ++-----
34
blockjob.c | 13 +------------
35
job.c | 3 +--
36
util/aio-wait.c | 11 ++++++-----
37
10 files changed, 26 insertions(+), 65 deletions(-)
38
39
diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h
40
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
41
--- a/include/block/aio-wait.h
12
--- a/blockdev.c
42
+++ b/include/block/aio-wait.h
13
+++ b/blockdev.c
43
@@ -XXX,XX +XXX,XX @@
14
@@ -XXX,XX +XXX,XX @@ typedef struct BlockdevBackupState {
44
/**
15
BlkActionState common;
45
* AioWait:
16
BlockDriverState *bs;
46
*
17
BlockJob *job;
47
- * An object that facilitates synchronous waiting on a condition. The main
18
- AioContext *aio_context;
48
- * loop can wait on an operation running in an IOThread as follows:
19
} BlockdevBackupState;
49
+ * An object that facilitates synchronous waiting on a condition. A single
20
50
+ * global AioWait object (global_aio_wait) is used internally.
21
static BlockJob *do_blockdev_backup(BlockdevBackup *backup, BlockJobTxn *txn,
51
+ *
22
@@ -XXX,XX +XXX,XX @@ static void blockdev_backup_prepare(BlkActionState *common, Error **errp)
52
+ * The main loop can wait on an operation running in an IOThread as follows:
23
BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common);
53
*
24
BlockdevBackup *backup;
54
- * AioWait *wait = ...;
25
BlockDriverState *bs, *target;
55
* AioContext *ctx = ...;
26
+ AioContext *aio_context;
56
* MyWork work = { .done = false };
27
Error *local_err = NULL;
57
* schedule_my_work_in_iothread(ctx, &work);
28
58
- * AIO_WAIT_WHILE(wait, ctx, !work.done);
29
assert(common->action->type == TRANSACTION_ACTION_KIND_BLOCKDEV_BACKUP);
59
+ * AIO_WAIT_WHILE(ctx, !work.done);
30
@@ -XXX,XX +XXX,XX @@ static void blockdev_backup_prepare(BlkActionState *common, Error **errp)
60
*
31
return;
61
* The IOThread must call aio_wait_kick() to notify the main loop when
32
}
62
* work.done changes:
33
63
@@ -XXX,XX +XXX,XX @@
34
- /* AioContext is released in .clean() */
64
* {
35
- state->aio_context = bdrv_get_aio_context(bs);
65
* ...
36
- if (state->aio_context != bdrv_get_aio_context(target)) {
66
* work.done = true;
37
- state->aio_context = NULL;
67
- * aio_wait_kick(wait);
38
+ aio_context = bdrv_get_aio_context(bs);
68
+ * aio_wait_kick();
39
+ if (aio_context != bdrv_get_aio_context(target)) {
69
* }
40
error_setg(errp, "Backup between two IO threads is not implemented");
70
*/
41
return;
71
typedef struct {
42
}
72
@@ -XXX,XX +XXX,XX @@ typedef struct {
43
- aio_context_acquire(state->aio_context);
73
unsigned num_waiters;
44
+ aio_context_acquire(aio_context);
74
} AioWait;
45
state->bs = bs;
75
76
+extern AioWait global_aio_wait;
77
+
46
+
78
/**
47
+ /* Paired with .clean() */
79
* AIO_WAIT_WHILE:
48
bdrv_drained_begin(state->bs);
80
- * @wait: the aio wait object
49
81
* @ctx: the aio context, or NULL if multiple aio contexts (for which the
50
state->job = do_blockdev_backup(backup, common->block_job_txn, &local_err);
82
* caller does not hold a lock) are involved in the polling condition.
51
if (local_err) {
83
* @cond: wait while this conditional expression is true
52
error_propagate(errp, local_err);
84
@@ -XXX,XX +XXX,XX @@ typedef struct {
53
- return;
85
* wait on conditions between two IOThreads since that could lead to deadlock,
54
+ goto out;
86
* go via the main loop instead.
55
}
87
*/
56
+
88
-#define AIO_WAIT_WHILE(wait, ctx, cond) ({ \
57
+out:
89
+#define AIO_WAIT_WHILE(ctx, cond) ({ \
58
+ aio_context_release(aio_context);
90
bool waited_ = false; \
91
- AioWait *wait_ = (wait); \
92
+ AioWait *wait_ = &global_aio_wait; \
93
AioContext *ctx_ = (ctx); \
94
/* Increment wait_->num_waiters before evaluating cond. */ \
95
atomic_inc(&wait_->num_waiters); \
96
@@ -XXX,XX +XXX,XX @@ typedef struct {
97
98
/**
99
* aio_wait_kick:
100
- * @wait: the aio wait object that should re-evaluate its condition
101
- *
102
* Wake up the main thread if it is waiting on AIO_WAIT_WHILE(). During
103
* synchronous operations performed in an IOThread, the main thread lets the
104
* IOThread's event loop run, waiting for the operation to complete. A
105
* aio_wait_kick() call will wake up the main thread.
106
*/
107
-void aio_wait_kick(AioWait *wait);
108
+void aio_wait_kick(void);
109
110
/**
111
* aio_wait_bh_oneshot:
112
diff --git a/include/block/block.h b/include/block/block.h
113
index XXXXXXX..XXXXXXX 100644
114
--- a/include/block/block.h
115
+++ b/include/block/block.h
116
@@ -XXX,XX +XXX,XX @@ void bdrv_drain_all_begin(void);
117
void bdrv_drain_all_end(void);
118
void bdrv_drain_all(void);
119
120
-/* Returns NULL when bs == NULL */
121
-AioWait *bdrv_get_aio_wait(BlockDriverState *bs);
122
-
123
#define BDRV_POLL_WHILE(bs, cond) ({ \
124
BlockDriverState *bs_ = (bs); \
125
- AIO_WAIT_WHILE(bdrv_get_aio_wait(bs_), \
126
- bdrv_get_aio_context(bs_), \
127
+ AIO_WAIT_WHILE(bdrv_get_aio_context(bs_), \
128
cond); })
129
130
int bdrv_pdiscard(BdrvChild *child, int64_t offset, int bytes);
131
diff --git a/include/block/block_int.h b/include/block/block_int.h
132
index XXXXXXX..XXXXXXX 100644
133
--- a/include/block/block_int.h
134
+++ b/include/block/block_int.h
135
@@ -XXX,XX +XXX,XX @@ struct BlockDriverState {
136
unsigned int in_flight;
137
unsigned int serialising_in_flight;
138
139
- /* Kicked to signal main loop when a request completes. */
140
- AioWait wait;
141
-
142
/* counter for nested bdrv_io_plug.
143
* Accessed with atomic ops.
144
*/
145
diff --git a/include/block/blockjob.h b/include/block/blockjob.h
146
index XXXXXXX..XXXXXXX 100644
147
--- a/include/block/blockjob.h
148
+++ b/include/block/blockjob.h
149
@@ -XXX,XX +XXX,XX @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
150
*/
151
void block_job_remove_all_bdrv(BlockJob *job);
152
153
-/**
154
- * block_job_wakeup_all_bdrv:
155
- * @job: The block job
156
- *
157
- * Calls bdrv_wakeup() for all BlockDriverStates that have been added to the
158
- * job. This function is to be called whenever child_job_drained_poll() would
159
- * go from true to false to notify waiting drain requests.
160
- */
161
-void block_job_wakeup_all_bdrv(BlockJob *job);
162
-
163
/**
164
* block_job_set_speed:
165
* @job: The job to set the speed for.
166
diff --git a/block.c b/block.c
167
index XXXXXXX..XXXXXXX 100644
168
--- a/block.c
169
+++ b/block.c
170
@@ -XXX,XX +XXX,XX @@ AioContext *bdrv_get_aio_context(BlockDriverState *bs)
171
return bs ? bs->aio_context : qemu_get_aio_context();
172
}
59
}
173
60
174
-AioWait *bdrv_get_aio_wait(BlockDriverState *bs)
61
static void blockdev_backup_commit(BlkActionState *common)
175
-{
176
- return bs ? &bs->wait : NULL;
177
-}
178
-
179
void bdrv_coroutine_enter(BlockDriverState *bs, Coroutine *co)
180
{
62
{
181
aio_co_enter(bdrv_get_aio_context(bs), co);
63
BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common);
182
diff --git a/block/block-backend.c b/block/block-backend.c
64
+ AioContext *aio_context;
183
index XXXXXXX..XXXXXXX 100644
65
+
184
--- a/block/block-backend.c
66
+ aio_context = bdrv_get_aio_context(state->bs);
185
+++ b/block/block-backend.c
67
+ aio_context_acquire(aio_context);
186
@@ -XXX,XX +XXX,XX @@ struct BlockBackend {
68
+
187
* Accessed with atomic ops.
69
assert(state->job);
188
*/
70
block_job_start(state->job);
189
unsigned int in_flight;
71
+
190
- AioWait wait;
72
+ aio_context_release(aio_context);
191
};
192
193
typedef struct BlockBackendAIOCB {
194
@@ -XXX,XX +XXX,XX @@ static void blk_inc_in_flight(BlockBackend *blk)
195
static void blk_dec_in_flight(BlockBackend *blk)
196
{
197
atomic_dec(&blk->in_flight);
198
- aio_wait_kick(&blk->wait);
199
+ aio_wait_kick();
200
}
73
}
201
74
202
static void error_callback_bh(void *opaque)
75
static void blockdev_backup_abort(BlkActionState *common)
203
@@ -XXX,XX +XXX,XX @@ void blk_drain(BlockBackend *blk)
76
@@ -XXX,XX +XXX,XX @@ static void blockdev_backup_abort(BlkActionState *common)
204
}
77
BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common);
205
78
206
/* We may have -ENOMEDIUM completions in flight */
79
if (state->job) {
207
- AIO_WAIT_WHILE(&blk->wait,
80
+ AioContext *aio_context;
208
- blk_get_aio_context(blk),
209
- atomic_mb_read(&blk->in_flight) > 0);
210
+ AIO_WAIT_WHILE(blk_get_aio_context(blk),
211
+ atomic_mb_read(&blk->in_flight) > 0);
212
213
if (bs) {
214
bdrv_drained_end(bs);
215
@@ -XXX,XX +XXX,XX @@ void blk_drain_all(void)
216
aio_context_acquire(ctx);
217
218
/* We may have -ENOMEDIUM completions in flight */
219
- AIO_WAIT_WHILE(&blk->wait, ctx,
220
- atomic_mb_read(&blk->in_flight) > 0);
221
+ AIO_WAIT_WHILE(ctx, atomic_mb_read(&blk->in_flight) > 0);
222
223
aio_context_release(ctx);
224
}
225
diff --git a/block/io.c b/block/io.c
226
index XXXXXXX..XXXXXXX 100644
227
--- a/block/io.c
228
+++ b/block/io.c
229
@@ -XXX,XX +XXX,XX @@
230
/* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
231
#define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
232
233
-static AioWait drain_all_aio_wait;
234
-
235
static void bdrv_parent_cb_resize(BlockDriverState *bs);
236
static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
237
int64_t offset, int bytes, BdrvRequestFlags flags);
238
@@ -XXX,XX +XXX,XX @@ void bdrv_drain_all_begin(void)
239
}
240
241
/* Now poll the in-flight requests */
242
- AIO_WAIT_WHILE(&drain_all_aio_wait, NULL, bdrv_drain_all_poll());
243
+ AIO_WAIT_WHILE(NULL, bdrv_drain_all_poll());
244
245
while ((bs = bdrv_next_all_states(bs))) {
246
bdrv_drain_assert_idle(bs);
247
@@ -XXX,XX +XXX,XX @@ void bdrv_inc_in_flight(BlockDriverState *bs)
248
249
void bdrv_wakeup(BlockDriverState *bs)
250
{
251
- aio_wait_kick(bdrv_get_aio_wait(bs));
252
- aio_wait_kick(&drain_all_aio_wait);
253
+ aio_wait_kick();
254
}
255
256
void bdrv_dec_in_flight(BlockDriverState *bs)
257
diff --git a/blockjob.c b/blockjob.c
258
index XXXXXXX..XXXXXXX 100644
259
--- a/blockjob.c
260
+++ b/blockjob.c
261
@@ -XXX,XX +XXX,XX @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
262
return 0;
263
}
264
265
-void block_job_wakeup_all_bdrv(BlockJob *job)
266
-{
267
- GSList *l;
268
-
269
- for (l = job->nodes; l; l = l->next) {
270
- BdrvChild *c = l->data;
271
- bdrv_wakeup(c->bs);
272
- }
273
-}
274
-
275
static void block_job_on_idle(Notifier *n, void *opaque)
276
{
277
- BlockJob *job = opaque;
278
- block_job_wakeup_all_bdrv(job);
279
+ aio_wait_kick();
280
}
281
282
bool block_job_is_internal(BlockJob *job)
283
diff --git a/job.c b/job.c
284
index XXXXXXX..XXXXXXX 100644
285
--- a/job.c
286
+++ b/job.c
287
@@ -XXX,XX +XXX,XX @@ void job_complete(Job *job, Error **errp)
288
int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp)
289
{
290
Error *local_err = NULL;
291
- AioWait dummy_wait = {};
292
int ret;
293
294
job_ref(job);
295
@@ -XXX,XX +XXX,XX @@ int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp)
296
return -EBUSY;
297
}
298
299
- AIO_WAIT_WHILE(&dummy_wait, job->aio_context,
300
+ AIO_WAIT_WHILE(job->aio_context,
301
(job_drain(job), !job_is_completed(job)));
302
303
ret = (job_is_cancelled(job) && job->ret == 0) ? -ECANCELED : job->ret;
304
diff --git a/util/aio-wait.c b/util/aio-wait.c
305
index XXXXXXX..XXXXXXX 100644
306
--- a/util/aio-wait.c
307
+++ b/util/aio-wait.c
308
@@ -XXX,XX +XXX,XX @@
309
#include "qemu/main-loop.h"
310
#include "block/aio-wait.h"
311
312
+AioWait global_aio_wait;
313
+
81
+
314
static void dummy_bh_cb(void *opaque)
82
+ aio_context = bdrv_get_aio_context(state->bs);
315
{
83
+ aio_context_acquire(aio_context);
316
/* The point is to make AIO_WAIT_WHILE()'s aio_poll() return */
84
+
317
}
85
block_job_cancel_sync(state->job);
318
86
+
319
-void aio_wait_kick(AioWait *wait)
87
+ aio_context_release(aio_context);
320
+void aio_wait_kick(void)
321
{
322
/* The barrier (or an atomic op) is in the caller. */
323
- if (atomic_read(&wait->num_waiters)) {
324
+ if (atomic_read(&global_aio_wait.num_waiters)) {
325
aio_bh_schedule_oneshot(qemu_get_aio_context(), dummy_bh_cb, NULL);
326
}
88
}
327
}
89
}
328
90
329
typedef struct {
91
static void blockdev_backup_clean(BlkActionState *common)
330
- AioWait wait;
92
{
331
bool done;
93
BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common);
332
QEMUBHFunc *cb;
94
+ AioContext *aio_context;
333
void *opaque;
95
334
@@ -XXX,XX +XXX,XX @@ static void aio_wait_bh(void *opaque)
96
- if (state->aio_context) {
335
data->cb(data->opaque);
97
- bdrv_drained_end(state->bs);
336
98
- aio_context_release(state->aio_context);
337
data->done = true;
99
+ if (!state->bs) {
338
- aio_wait_kick(&data->wait);
100
+ return;
339
+ aio_wait_kick();
101
}
102
+
103
+ aio_context = bdrv_get_aio_context(state->bs);
104
+ aio_context_acquire(aio_context);
105
+
106
+ bdrv_drained_end(state->bs);
107
+
108
+ aio_context_release(aio_context);
340
}
109
}
341
110
342
void aio_wait_bh_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
111
typedef struct BlockDirtyBitmapState {
343
@@ -XXX,XX +XXX,XX @@ void aio_wait_bh_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
344
assert(qemu_get_current_aio_context() == qemu_get_aio_context());
345
346
aio_bh_schedule_oneshot(ctx, aio_wait_bh, &data);
347
- AIO_WAIT_WHILE(&data.wait, ctx, !data.done);
348
+ AIO_WAIT_WHILE(ctx, !data.done);
349
}
350
--
112
--
351
2.17.1
113
2.14.3
352
114
353
115
diff view generated by jsdifflib
1
From: John Snow <jsnow@redhat.com>
1
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
3
Reviewed-by: Eric Blake <eblake@redhat.com>
4
Message-id: 20171206144550.22295-6-stefanha@redhat.com
5
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
6
---
7
blockdev.c | 47 +++++++++++++++++++++++++++++++----------------
8
1 file changed, 31 insertions(+), 16 deletions(-)
2
9
3
Add support for taking and passing forward job creation flags.
4
5
Signed-off-by: John Snow <jsnow@redhat.com>
6
Reviewed-by: Max Reitz <mreitz@redhat.com>
7
Reviewed-by: Jeff Cody <jcody@redhat.com>
8
Message-id: 20180906130225.5118-4-jsnow@redhat.com
9
Signed-off-by: Max Reitz <mreitz@redhat.com>
10
---
11
include/block/block_int.h | 5 ++++-
12
block/stream.c | 5 +++--
13
blockdev.c | 3 ++-
14
3 files changed, 9 insertions(+), 4 deletions(-)
15
16
diff --git a/include/block/block_int.h b/include/block/block_int.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/include/block/block_int.h
19
+++ b/include/block/block_int.h
20
@@ -XXX,XX +XXX,XX @@ int is_windows_drive(const char *filename);
21
* flatten the whole backing file chain onto @bs.
22
* @backing_file_str: The file name that will be written to @bs as the
23
* the new backing file if the job completes. Ignored if @base is %NULL.
24
+ * @creation_flags: Flags that control the behavior of the Job lifetime.
25
+ * See @BlockJobCreateFlags
26
* @speed: The maximum speed, in bytes per second, or 0 for unlimited.
27
* @on_error: The action to take upon error.
28
* @errp: Error object.
29
@@ -XXX,XX +XXX,XX @@ int is_windows_drive(const char *filename);
30
*/
31
void stream_start(const char *job_id, BlockDriverState *bs,
32
BlockDriverState *base, const char *backing_file_str,
33
- int64_t speed, BlockdevOnError on_error, Error **errp);
34
+ int creation_flags, int64_t speed,
35
+ BlockdevOnError on_error, Error **errp);
36
37
/**
38
* commit_start:
39
diff --git a/block/stream.c b/block/stream.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/block/stream.c
42
+++ b/block/stream.c
43
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver stream_job_driver = {
44
45
void stream_start(const char *job_id, BlockDriverState *bs,
46
BlockDriverState *base, const char *backing_file_str,
47
- int64_t speed, BlockdevOnError on_error, Error **errp)
48
+ int creation_flags, int64_t speed,
49
+ BlockdevOnError on_error, Error **errp)
50
{
51
StreamBlockJob *s;
52
BlockDriverState *iter;
53
@@ -XXX,XX +XXX,XX @@ void stream_start(const char *job_id, BlockDriverState *bs,
54
BLK_PERM_GRAPH_MOD,
55
BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED |
56
BLK_PERM_WRITE,
57
- speed, JOB_DEFAULT, NULL, NULL, errp);
58
+ speed, creation_flags, NULL, NULL, errp);
59
if (!s) {
60
goto fail;
61
}
62
diff --git a/blockdev.c b/blockdev.c
10
diff --git a/blockdev.c b/blockdev.c
63
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
64
--- a/blockdev.c
12
--- a/blockdev.c
65
+++ b/blockdev.c
13
+++ b/blockdev.c
66
@@ -XXX,XX +XXX,XX @@ void qmp_block_stream(bool has_job_id, const char *job_id, const char *device,
14
@@ -XXX,XX +XXX,XX @@ struct BlkActionState {
67
AioContext *aio_context;
15
typedef struct InternalSnapshotState {
68
Error *local_err = NULL;
16
BlkActionState common;
69
const char *base_name = NULL;
17
BlockDriverState *bs;
70
+ int job_flags = JOB_DEFAULT;
18
- AioContext *aio_context;
71
19
QEMUSnapshotInfo sn;
72
if (!has_on_error) {
20
bool created;
73
on_error = BLOCKDEV_ON_ERROR_REPORT;
21
} InternalSnapshotState;
74
@@ -XXX,XX +XXX,XX @@ void qmp_block_stream(bool has_job_id, const char *job_id, const char *device,
22
@@ -XXX,XX +XXX,XX @@ static void internal_snapshot_prepare(BlkActionState *common,
75
base_name = has_backing_file ? backing_file : base_name;
23
qemu_timeval tv;
76
24
BlockdevSnapshotInternal *internal;
77
stream_start(has_job_id ? job_id : NULL, bs, base_bs, base_name,
25
InternalSnapshotState *state;
78
- has_speed ? speed : 0, on_error, &local_err);
26
+ AioContext *aio_context;
79
+ job_flags, has_speed ? speed : 0, on_error, &local_err);
27
int ret1;
28
29
g_assert(common->action->type ==
30
@@ -XXX,XX +XXX,XX @@ static void internal_snapshot_prepare(BlkActionState *common,
31
return;
32
}
33
34
- /* AioContext is released in .clean() */
35
- state->aio_context = bdrv_get_aio_context(bs);
36
- aio_context_acquire(state->aio_context);
37
+ aio_context = bdrv_get_aio_context(bs);
38
+ aio_context_acquire(aio_context);
39
40
state->bs = bs;
41
+
42
+ /* Paired with .clean() */
43
bdrv_drained_begin(bs);
44
45
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_INTERNAL_SNAPSHOT, errp)) {
46
- return;
47
+ goto out;
48
}
49
50
if (bdrv_is_read_only(bs)) {
51
error_setg(errp, "Device '%s' is read only", device);
52
- return;
53
+ goto out;
54
}
55
56
if (!bdrv_can_snapshot(bs)) {
57
error_setg(errp, "Block format '%s' used by device '%s' "
58
"does not support internal snapshots",
59
bs->drv->format_name, device);
60
- return;
61
+ goto out;
62
}
63
64
if (!strlen(name)) {
65
error_setg(errp, "Name is empty");
66
- return;
67
+ goto out;
68
}
69
70
/* check whether a snapshot with name exist */
71
@@ -XXX,XX +XXX,XX @@ static void internal_snapshot_prepare(BlkActionState *common,
72
&local_err);
80
if (local_err) {
73
if (local_err) {
81
error_propagate(errp, local_err);
74
error_propagate(errp, local_err);
82
goto out;
75
- return;
76
+ goto out;
77
} else if (ret) {
78
error_setg(errp,
79
"Snapshot with name '%s' already exists on device '%s'",
80
name, device);
81
- return;
82
+ goto out;
83
}
84
85
/* 3. take the snapshot */
86
@@ -XXX,XX +XXX,XX @@ static void internal_snapshot_prepare(BlkActionState *common,
87
error_setg_errno(errp, -ret1,
88
"Failed to create snapshot '%s' on device '%s'",
89
name, device);
90
- return;
91
+ goto out;
92
}
93
94
/* 4. succeed, mark a snapshot is created */
95
state->created = true;
96
+
97
+out:
98
+ aio_context_release(aio_context);
99
}
100
101
static void internal_snapshot_abort(BlkActionState *common)
102
@@ -XXX,XX +XXX,XX @@ static void internal_snapshot_abort(BlkActionState *common)
103
DO_UPCAST(InternalSnapshotState, common, common);
104
BlockDriverState *bs = state->bs;
105
QEMUSnapshotInfo *sn = &state->sn;
106
+ AioContext *aio_context;
107
Error *local_error = NULL;
108
109
if (!state->created) {
110
return;
111
}
112
113
+ aio_context = bdrv_get_aio_context(state->bs);
114
+ aio_context_acquire(aio_context);
115
+
116
if (bdrv_snapshot_delete(bs, sn->id_str, sn->name, &local_error) < 0) {
117
error_reportf_err(local_error,
118
"Failed to delete snapshot with id '%s' and "
119
@@ -XXX,XX +XXX,XX @@ static void internal_snapshot_abort(BlkActionState *common)
120
sn->id_str, sn->name,
121
bdrv_get_device_name(bs));
122
}
123
+
124
+ aio_context_release(aio_context);
125
}
126
127
static void internal_snapshot_clean(BlkActionState *common)
128
{
129
InternalSnapshotState *state = DO_UPCAST(InternalSnapshotState,
130
common, common);
131
+ AioContext *aio_context;
132
133
- if (state->aio_context) {
134
- if (state->bs) {
135
- bdrv_drained_end(state->bs);
136
- }
137
- aio_context_release(state->aio_context);
138
+ if (!state->bs) {
139
+ return;
140
}
141
+
142
+ aio_context = bdrv_get_aio_context(state->bs);
143
+ aio_context_acquire(aio_context);
144
+
145
+ bdrv_drained_end(state->bs);
146
+
147
+ aio_context_release(aio_context);
148
}
149
150
/* external snapshot private data */
83
--
151
--
84
2.17.1
152
2.14.3
85
153
86
154
diff view generated by jsdifflib
Deleted patch
1
From: John Snow <jsnow@redhat.com>
2
1
3
In cases where we abort the block/mirror job, there's no point in
4
installing the new backing chain before we finish aborting.
5
6
Signed-off-by: John Snow <jsnow@redhat.com>
7
Message-id: 20180906130225.5118-6-jsnow@redhat.com
8
Reviewed-by: Jeff Cody <jcody@redhat.com>
9
Reviewed-by: Max Reitz <mreitz@redhat.com>
10
Signed-off-by: Max Reitz <mreitz@redhat.com>
11
---
12
block/mirror.c | 2 +-
13
1 file changed, 1 insertion(+), 1 deletion(-)
14
15
diff --git a/block/mirror.c b/block/mirror.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/block/mirror.c
18
+++ b/block/mirror.c
19
@@ -XXX,XX +XXX,XX @@ static void mirror_exit(Job *job)
20
* required before it could become a backing file of target_bs. */
21
bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
22
&error_abort);
23
- if (s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
24
+ if (ret == 0 && s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
25
BlockDriverState *backing = s->is_none_mode ? src : s->base;
26
if (backing_bs(target_bs) != backing) {
27
bdrv_set_backing_hd(target_bs, backing, &local_err);
28
--
29
2.17.1
30
31
diff view generated by jsdifflib
Deleted patch
1
From: John Snow <jsnow@redhat.com>
2
1
3
For purposes of minimum code movement, refactor the mirror_exit
4
callback to use the post-finalization callbacks in a trivial way.
5
6
Signed-off-by: John Snow <jsnow@redhat.com>
7
Message-id: 20180906130225.5118-7-jsnow@redhat.com
8
Reviewed-by: Jeff Cody <jcody@redhat.com>
9
Reviewed-by: Max Reitz <mreitz@redhat.com>
10
[mreitz: Added comment for the mirror_exit() function]
11
Signed-off-by: Max Reitz <mreitz@redhat.com>
12
---
13
block/mirror.c | 44 +++++++++++++++++++++++++++++++++-----------
14
1 file changed, 33 insertions(+), 11 deletions(-)
15
16
diff --git a/block/mirror.c b/block/mirror.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/block/mirror.c
19
+++ b/block/mirror.c
20
@@ -XXX,XX +XXX,XX @@ typedef struct MirrorBlockJob {
21
int max_iov;
22
bool initial_zeroing_ongoing;
23
int in_active_write_counter;
24
+ bool prepared;
25
} MirrorBlockJob;
26
27
typedef struct MirrorBDSOpaque {
28
@@ -XXX,XX +XXX,XX @@ static void mirror_wait_for_all_io(MirrorBlockJob *s)
29
}
30
}
31
32
-static void mirror_exit(Job *job)
33
+/**
34
+ * mirror_exit_common: handle both abort() and prepare() cases.
35
+ * for .prepare, returns 0 on success and -errno on failure.
36
+ * for .abort cases, denoted by abort = true, MUST return 0.
37
+ */
38
+static int mirror_exit_common(Job *job)
39
{
40
MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
41
BlockJob *bjob = &s->common;
42
@@ -XXX,XX +XXX,XX @@ static void mirror_exit(Job *job)
43
BlockDriverState *target_bs = blk_bs(s->target);
44
BlockDriverState *mirror_top_bs = s->mirror_top_bs;
45
Error *local_err = NULL;
46
- int ret = job->ret;
47
+ bool abort = job->ret < 0;
48
+ int ret = 0;
49
+
50
+ if (s->prepared) {
51
+ return 0;
52
+ }
53
+ s->prepared = true;
54
55
bdrv_release_dirty_bitmap(src, s->dirty_bitmap);
56
57
@@ -XXX,XX +XXX,XX @@ static void mirror_exit(Job *job)
58
* required before it could become a backing file of target_bs. */
59
bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
60
&error_abort);
61
- if (ret == 0 && s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
62
+ if (!abort && s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
63
BlockDriverState *backing = s->is_none_mode ? src : s->base;
64
if (backing_bs(target_bs) != backing) {
65
bdrv_set_backing_hd(target_bs, backing, &local_err);
66
@@ -XXX,XX +XXX,XX @@ static void mirror_exit(Job *job)
67
aio_context_acquire(replace_aio_context);
68
}
69
70
- if (s->should_complete && ret == 0) {
71
- BlockDriverState *to_replace = src;
72
- if (s->to_replace) {
73
- to_replace = s->to_replace;
74
- }
75
+ if (s->should_complete && !abort) {
76
+ BlockDriverState *to_replace = s->to_replace ?: src;
77
78
if (bdrv_get_flags(target_bs) != bdrv_get_flags(to_replace)) {
79
bdrv_reopen(target_bs, bdrv_get_flags(to_replace), NULL);
80
@@ -XXX,XX +XXX,XX @@ static void mirror_exit(Job *job)
81
bdrv_unref(mirror_top_bs);
82
bdrv_unref(src);
83
84
- job->ret = ret;
85
+ return ret;
86
+}
87
+
88
+static int mirror_prepare(Job *job)
89
+{
90
+ return mirror_exit_common(job);
91
+}
92
+
93
+static void mirror_abort(Job *job)
94
+{
95
+ int ret = mirror_exit_common(job);
96
+ assert(ret == 0);
97
}
98
99
static void mirror_throttle(MirrorBlockJob *s)
100
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver mirror_job_driver = {
101
.user_resume = block_job_user_resume,
102
.drain = block_job_drain,
103
.run = mirror_run,
104
- .exit = mirror_exit,
105
+ .prepare = mirror_prepare,
106
+ .abort = mirror_abort,
107
.pause = mirror_pause,
108
.complete = mirror_complete,
109
},
110
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver commit_active_job_driver = {
111
.user_resume = block_job_user_resume,
112
.drain = block_job_drain,
113
.run = mirror_run,
114
- .exit = mirror_exit,
115
+ .prepare = mirror_prepare,
116
+ .abort = mirror_abort,
117
.pause = mirror_pause,
118
.complete = mirror_complete,
119
},
120
--
121
2.17.1
122
123
diff view generated by jsdifflib
Deleted patch
1
From: John Snow <jsnow@redhat.com>
2
1
3
These tests don't actually test blockjobs anymore, they test
4
generic Job lifetimes. Change the types accordingly.
5
6
Signed-off-by: John Snow <jsnow@redhat.com>
7
Reviewed-by: Max Reitz <mreitz@redhat.com>
8
Message-id: 20180906130225.5118-9-jsnow@redhat.com
9
Reviewed-by: Jeff Cody <jcody@redhat.com>
10
Signed-off-by: Max Reitz <mreitz@redhat.com>
11
---
12
tests/test-blockjob.c | 98 ++++++++++++++++++++++---------------------
13
1 file changed, 50 insertions(+), 48 deletions(-)
14
15
diff --git a/tests/test-blockjob.c b/tests/test-blockjob.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/tests/test-blockjob.c
18
+++ b/tests/test-blockjob.c
19
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver test_cancel_driver = {
20
},
21
};
22
23
-static CancelJob *create_common(BlockJob **pjob)
24
+static CancelJob *create_common(Job **pjob)
25
{
26
BlockBackend *blk;
27
- BlockJob *job;
28
+ Job *job;
29
+ BlockJob *bjob;
30
CancelJob *s;
31
32
blk = create_blk(NULL);
33
- job = mk_job(blk, "Steve", &test_cancel_driver, true,
34
- JOB_MANUAL_FINALIZE | JOB_MANUAL_DISMISS);
35
- job_ref(&job->job);
36
- assert(job->job.status == JOB_STATUS_CREATED);
37
- s = container_of(job, CancelJob, common);
38
+ bjob = mk_job(blk, "Steve", &test_cancel_driver, true,
39
+ JOB_MANUAL_FINALIZE | JOB_MANUAL_DISMISS);
40
+ job = &bjob->job;
41
+ job_ref(job);
42
+ assert(job->status == JOB_STATUS_CREATED);
43
+ s = container_of(bjob, CancelJob, common);
44
s->blk = blk;
45
46
*pjob = job;
47
@@ -XXX,XX +XXX,XX @@ static void cancel_common(CancelJob *s)
48
49
static void test_cancel_created(void)
50
{
51
- BlockJob *job;
52
+ Job *job;
53
CancelJob *s;
54
55
s = create_common(&job);
56
@@ -XXX,XX +XXX,XX @@ static void test_cancel_created(void)
57
58
static void test_cancel_running(void)
59
{
60
- BlockJob *job;
61
+ Job *job;
62
CancelJob *s;
63
64
s = create_common(&job);
65
66
- job_start(&job->job);
67
- assert(job->job.status == JOB_STATUS_RUNNING);
68
+ job_start(job);
69
+ assert(job->status == JOB_STATUS_RUNNING);
70
71
cancel_common(s);
72
}
73
74
static void test_cancel_paused(void)
75
{
76
- BlockJob *job;
77
+ Job *job;
78
CancelJob *s;
79
80
s = create_common(&job);
81
82
- job_start(&job->job);
83
- assert(job->job.status == JOB_STATUS_RUNNING);
84
+ job_start(job);
85
+ assert(job->status == JOB_STATUS_RUNNING);
86
87
- job_user_pause(&job->job, &error_abort);
88
- job_enter(&job->job);
89
- assert(job->job.status == JOB_STATUS_PAUSED);
90
+ job_user_pause(job, &error_abort);
91
+ job_enter(job);
92
+ assert(job->status == JOB_STATUS_PAUSED);
93
94
cancel_common(s);
95
}
96
97
static void test_cancel_ready(void)
98
{
99
- BlockJob *job;
100
+ Job *job;
101
CancelJob *s;
102
103
s = create_common(&job);
104
105
- job_start(&job->job);
106
- assert(job->job.status == JOB_STATUS_RUNNING);
107
+ job_start(job);
108
+ assert(job->status == JOB_STATUS_RUNNING);
109
110
s->should_converge = true;
111
- job_enter(&job->job);
112
- assert(job->job.status == JOB_STATUS_READY);
113
+ job_enter(job);
114
+ assert(job->status == JOB_STATUS_READY);
115
116
cancel_common(s);
117
}
118
119
static void test_cancel_standby(void)
120
{
121
- BlockJob *job;
122
+ Job *job;
123
CancelJob *s;
124
125
s = create_common(&job);
126
127
- job_start(&job->job);
128
- assert(job->job.status == JOB_STATUS_RUNNING);
129
+ job_start(job);
130
+ assert(job->status == JOB_STATUS_RUNNING);
131
132
s->should_converge = true;
133
- job_enter(&job->job);
134
- assert(job->job.status == JOB_STATUS_READY);
135
+ job_enter(job);
136
+ assert(job->status == JOB_STATUS_READY);
137
138
- job_user_pause(&job->job, &error_abort);
139
- job_enter(&job->job);
140
- assert(job->job.status == JOB_STATUS_STANDBY);
141
+ job_user_pause(job, &error_abort);
142
+ job_enter(job);
143
+ assert(job->status == JOB_STATUS_STANDBY);
144
145
cancel_common(s);
146
}
147
148
static void test_cancel_pending(void)
149
{
150
- BlockJob *job;
151
+ Job *job;
152
CancelJob *s;
153
154
s = create_common(&job);
155
156
- job_start(&job->job);
157
- assert(job->job.status == JOB_STATUS_RUNNING);
158
+ job_start(job);
159
+ assert(job->status == JOB_STATUS_RUNNING);
160
161
s->should_converge = true;
162
- job_enter(&job->job);
163
- assert(job->job.status == JOB_STATUS_READY);
164
+ job_enter(job);
165
+ assert(job->status == JOB_STATUS_READY);
166
167
- job_complete(&job->job, &error_abort);
168
- job_enter(&job->job);
169
+ job_complete(job, &error_abort);
170
+ job_enter(job);
171
while (!s->completed) {
172
aio_poll(qemu_get_aio_context(), true);
173
}
174
- assert(job->job.status == JOB_STATUS_PENDING);
175
+ assert(job->status == JOB_STATUS_PENDING);
176
177
cancel_common(s);
178
}
179
180
static void test_cancel_concluded(void)
181
{
182
- BlockJob *job;
183
+ Job *job;
184
CancelJob *s;
185
186
s = create_common(&job);
187
188
- job_start(&job->job);
189
- assert(job->job.status == JOB_STATUS_RUNNING);
190
+ job_start(job);
191
+ assert(job->status == JOB_STATUS_RUNNING);
192
193
s->should_converge = true;
194
- job_enter(&job->job);
195
- assert(job->job.status == JOB_STATUS_READY);
196
+ job_enter(job);
197
+ assert(job->status == JOB_STATUS_READY);
198
199
- job_complete(&job->job, &error_abort);
200
- job_enter(&job->job);
201
+ job_complete(job, &error_abort);
202
+ job_enter(job);
203
while (!s->completed) {
204
aio_poll(qemu_get_aio_context(), true);
205
}
206
- assert(job->job.status == JOB_STATUS_PENDING);
207
+ assert(job->status == JOB_STATUS_PENDING);
208
209
- job_finalize(&job->job, &error_abort);
210
- assert(job->job.status == JOB_STATUS_CONCLUDED);
211
+ job_finalize(job, &error_abort);
212
+ assert(job->status == JOB_STATUS_CONCLUDED);
213
214
cancel_common(s);
215
}
216
--
217
2.17.1
218
219
diff view generated by jsdifflib
Deleted patch
1
From: John Snow <jsnow@redhat.com>
2
1
3
We remove the exit callback and the completed boolean along with it.
4
We can simulate it just fine by waiting for the job to defer to the
5
main loop, and then giving it one final kick to get the main loop
6
portion to run.
7
8
Signed-off-by: John Snow <jsnow@redhat.com>
9
Reviewed-by: Max Reitz <mreitz@redhat.com>
10
Message-id: 20180906130225.5118-10-jsnow@redhat.com
11
Reviewed-by: Jeff Cody <jcody@redhat.com>
12
Signed-off-by: Max Reitz <mreitz@redhat.com>
13
---
14
tests/test-blockjob.c | 16 ++++++----------
15
1 file changed, 6 insertions(+), 10 deletions(-)
16
17
diff --git a/tests/test-blockjob.c b/tests/test-blockjob.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/tests/test-blockjob.c
20
+++ b/tests/test-blockjob.c
21
@@ -XXX,XX +XXX,XX @@ typedef struct CancelJob {
22
BlockBackend *blk;
23
bool should_converge;
24
bool should_complete;
25
- bool completed;
26
} CancelJob;
27
28
-static void cancel_job_exit(Job *job)
29
-{
30
- CancelJob *s = container_of(job, CancelJob, common.job);
31
- s->completed = true;
32
-}
33
-
34
static void cancel_job_complete(Job *job, Error **errp)
35
{
36
CancelJob *s = container_of(job, CancelJob, common.job);
37
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver test_cancel_driver = {
38
.user_resume = block_job_user_resume,
39
.drain = block_job_drain,
40
.run = cancel_job_run,
41
- .exit = cancel_job_exit,
42
.complete = cancel_job_complete,
43
},
44
};
45
@@ -XXX,XX +XXX,XX @@ static void test_cancel_pending(void)
46
47
job_complete(job, &error_abort);
48
job_enter(job);
49
- while (!s->completed) {
50
+ while (!job->deferred_to_main_loop) {
51
aio_poll(qemu_get_aio_context(), true);
52
}
53
+ assert(job->status == JOB_STATUS_READY);
54
+ aio_poll(qemu_get_aio_context(), true);
55
assert(job->status == JOB_STATUS_PENDING);
56
57
cancel_common(s);
58
@@ -XXX,XX +XXX,XX @@ static void test_cancel_concluded(void)
59
60
job_complete(job, &error_abort);
61
job_enter(job);
62
- while (!s->completed) {
63
+ while (!job->deferred_to_main_loop) {
64
aio_poll(qemu_get_aio_context(), true);
65
}
66
+ assert(job->status == JOB_STATUS_READY);
67
+ aio_poll(qemu_get_aio_context(), true);
68
assert(job->status == JOB_STATUS_PENDING);
69
70
job_finalize(job, &error_abort);
71
--
72
2.17.1
73
74
diff view generated by jsdifflib
1
From: John Snow <jsnow@redhat.com>
1
The dirty bitmap actions in qmp_transaction have not used AioContext
2
since the dirty bitmap locking discipline was introduced in commit
3
2119882c7eb7e2c612b24fc0c8d86f5887d6f1c3 ("block: introduce
4
dirty_bitmap_mutex"). Remove the unused field.
2
5
3
Presently only the backup job really guarantees what one would consider
6
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
4
transactional semantics. To guard against someone helpfully adding them
7
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
5
in the future, document that there are shortcomings in the model that
8
Reviewed-by: Eric Blake <eblake@redhat.com>
6
would need to be audited at that time.
9
Message-id: 20171206144550.22295-7-stefanha@redhat.com
7
10
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
8
Signed-off-by: John Snow <jsnow@redhat.com>
9
Message-id: 20180906130225.5118-17-jsnow@redhat.com
10
Reviewed-by: Jeff Cody <jcody@redhat.com>
11
Reviewed-by: Max Reitz <mreitz@redhat.com>
12
Signed-off-by: Max Reitz <mreitz@redhat.com>
13
---
11
---
14
blockdev.c | 8 +++++++-
12
blockdev.c | 13 -------------
15
1 file changed, 7 insertions(+), 1 deletion(-)
13
1 file changed, 13 deletions(-)
16
14
17
diff --git a/blockdev.c b/blockdev.c
15
diff --git a/blockdev.c b/blockdev.c
18
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
19
--- a/blockdev.c
17
--- a/blockdev.c
20
+++ b/blockdev.c
18
+++ b/blockdev.c
19
@@ -XXX,XX +XXX,XX @@ typedef struct BlockDirtyBitmapState {
20
BlkActionState common;
21
BdrvDirtyBitmap *bitmap;
22
BlockDriverState *bs;
23
- AioContext *aio_context;
24
HBitmap *backup;
25
bool prepared;
26
} BlockDirtyBitmapState;
27
@@ -XXX,XX +XXX,XX @@ static void block_dirty_bitmap_clear_prepare(BlkActionState *common,
28
}
29
30
bdrv_clear_dirty_bitmap(state->bitmap, &state->backup);
31
- /* AioContext is released in .clean() */
32
}
33
34
static void block_dirty_bitmap_clear_abort(BlkActionState *common)
35
@@ -XXX,XX +XXX,XX @@ static void block_dirty_bitmap_clear_commit(BlkActionState *common)
36
hbitmap_free(state->backup);
37
}
38
39
-static void block_dirty_bitmap_clear_clean(BlkActionState *common)
40
-{
41
- BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
42
- common, common);
43
-
44
- if (state->aio_context) {
45
- aio_context_release(state->aio_context);
46
- }
47
-}
48
-
49
static void abort_prepare(BlkActionState *common, Error **errp)
50
{
51
error_setg(errp, "Transaction aborted using Abort action");
21
@@ -XXX,XX +XXX,XX @@ static const BlkActionOps actions[] = {
52
@@ -XXX,XX +XXX,XX @@ static const BlkActionOps actions[] = {
22
.instance_size = sizeof(BlockDirtyBitmapState),
53
.prepare = block_dirty_bitmap_clear_prepare,
23
.prepare = block_dirty_bitmap_disable_prepare,
54
.commit = block_dirty_bitmap_clear_commit,
24
.abort = block_dirty_bitmap_disable_abort,
55
.abort = block_dirty_bitmap_clear_abort,
25
- }
56
- .clean = block_dirty_bitmap_clear_clean,
26
+ },
57
}
27
+ /* Where are transactions for MIRROR, COMMIT and STREAM?
28
+ * Although these blockjobs use transaction callbacks like the backup job,
29
+ * these jobs do not necessarily adhere to transaction semantics.
30
+ * These jobs may not fully undo all of their actions on abort, nor do they
31
+ * necessarily work in transactions with more than one job in them.
32
+ */
33
};
58
};
34
59
35
/**
36
--
60
--
37
2.17.1
61
2.14.3
38
62
39
63
diff view generated by jsdifflib
1
From: Kevin Wolf <kwolf@redhat.com>
1
Encapsulate IOThread QOM object lookup so that callers don't need to
2
know how and where IOThread objects live.
2
3
3
Commit 89bd030533e changed the test case from using job_sleep_ns() to
4
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
4
using qemu_co_sleep_ns() instead. Also, block_job_sleep_ns() became
5
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
5
job_sleep_ns() in commit 5d43e86e11f.
6
Reviewed-by: Eric Blake <eblake@redhat.com>
7
Message-id: 20171206144550.22295-8-stefanha@redhat.com
8
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
9
---
10
include/sysemu/iothread.h | 1 +
11
iothread.c | 7 +++++++
12
2 files changed, 8 insertions(+)
6
13
7
In both cases, some comments in the test case were not updated. Do that
14
diff --git a/include/sysemu/iothread.h b/include/sysemu/iothread.h
8
now.
9
10
Reported-by: Max Reitz <mreitz@redhat.com>
11
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
12
Reviewed-by: Eric Blake <eblake@redhat.com>
13
---
14
tests/test-bdrv-drain.c | 10 +++++-----
15
1 file changed, 5 insertions(+), 5 deletions(-)
16
17
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
18
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
19
--- a/tests/test-bdrv-drain.c
16
--- a/include/sysemu/iothread.h
20
+++ b/tests/test-bdrv-drain.c
17
+++ b/include/sysemu/iothread.h
21
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn test_job_run(Job *job, Error **errp)
18
@@ -XXX,XX +XXX,XX @@ typedef struct {
22
19
OBJECT_CHECK(IOThread, obj, TYPE_IOTHREAD)
23
job_transition_to_ready(&s->common.job);
20
24
while (!s->should_complete) {
21
char *iothread_get_id(IOThread *iothread);
25
- /* Avoid block_job_sleep_ns() because it marks the job as !busy. We
22
+IOThread *iothread_by_id(const char *id);
26
- * want to emulate some actual activity (probably some I/O) here so
23
AioContext *iothread_get_aio_context(IOThread *iothread);
27
- * that drain has to wait for this acitivity to stop. */
24
void iothread_stop_all(void);
28
+ /* Avoid job_sleep_ns() because it marks the job as !busy. We want to
25
GMainContext *iothread_get_g_main_context(IOThread *iothread);
29
+ * emulate some actual activity (probably some I/O) here so that drain
26
diff --git a/iothread.c b/iothread.c
30
+ * has to wait for this activity to stop. */
27
index XXXXXXX..XXXXXXX 100644
31
qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 100000);
28
--- a/iothread.c
32
job_pause_point(&s->common.job);
29
+++ b/iothread.c
33
}
30
@@ -XXX,XX +XXX,XX @@ void iothread_destroy(IOThread *iothread)
34
@@ -XXX,XX +XXX,XX @@ static void test_blockjob_common(enum drain_type drain_type, bool use_iothread,
31
{
35
32
object_unparent(OBJECT(iothread));
36
g_assert_cmpint(job->job.pause_count, ==, 0);
33
}
37
g_assert_false(job->job.paused);
34
+
38
- g_assert_true(job->job.busy); /* We're in job_sleep_ns() */
35
+/* Lookup IOThread by its id. Only finds user-created objects, not internal
39
+ g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
36
+ * iothread_create() objects. */
40
37
+IOThread *iothread_by_id(const char *id)
41
do_drain_begin_unlocked(drain_type, src);
38
+{
42
39
+ return IOTHREAD(object_resolve_path_type(id, TYPE_IOTHREAD, NULL));
43
@@ -XXX,XX +XXX,XX @@ static void test_blockjob_common(enum drain_type drain_type, bool use_iothread,
40
+}
44
45
g_assert_cmpint(job->job.pause_count, ==, 0);
46
g_assert_false(job->job.paused);
47
- g_assert_true(job->job.busy); /* We're in job_sleep_ns() */
48
+ g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
49
50
aio_context_acquire(ctx);
51
ret = job_complete_sync(&job->job, &error_abort);
52
--
41
--
53
2.17.1
42
2.14.3
54
43
55
44
diff view generated by jsdifflib
1
From: John Snow <jsnow@redhat.com>
1
Currently there is no easy way for iotests to ensure that a BDS is bound
2
to a particular IOThread. Normally the virtio-blk device calls
3
blk_set_aio_context() when dataplane is enabled during guest driver
4
initialization. This never happens in iotests since -machine
5
accel=qtest means there is no guest activity (including device driver
6
initialization).
2
7
3
Signed-off-by: John Snow <jsnow@redhat.com>
8
This patch adds a QMP command to explicitly assign IOThreads in test
4
Reviewed-by: Max Reitz <mreitz@redhat.com>
9
cases. See qapi/block-core.json for a description of the command.
5
Message-id: 20180906130225.5118-15-jsnow@redhat.com
10
6
Reviewed-by: Jeff Cody <jcody@redhat.com>
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
7
Signed-off-by: Max Reitz <mreitz@redhat.com>
12
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
13
Reviewed-by: Eric Blake <eblake@redhat.com>
14
Message-id: 20171206144550.22295-9-stefanha@redhat.com
15
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
8
---
16
---
9
qapi/block-core.json | 16 +++++++++++++++-
17
qapi/block-core.json | 36 ++++++++++++++++++++++++++++++++++++
10
blockdev.c | 9 +++++++++
18
blockdev.c | 41 +++++++++++++++++++++++++++++++++++++++++
11
hmp.c | 5 +++--
19
2 files changed, 77 insertions(+)
12
3 files changed, 27 insertions(+), 3 deletions(-)
13
20
14
diff --git a/qapi/block-core.json b/qapi/block-core.json
21
diff --git a/qapi/block-core.json b/qapi/block-core.json
15
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
16
--- a/qapi/block-core.json
23
--- a/qapi/block-core.json
17
+++ b/qapi/block-core.json
24
+++ b/qapi/block-core.json
18
@@ -XXX,XX +XXX,XX @@
25
@@ -XXX,XX +XXX,XX @@
19
# 'stop' and 'enospc' can only be used if the block device
26
'data' : { 'parent': 'str',
20
# supports io-status (see BlockInfo). Since 1.3.
27
'*child': 'str',
21
#
28
'*node': 'str' } }
22
+# @auto-finalize: When false, this job will wait in a PENDING state after it has
29
+
23
+# finished its work, waiting for @block-job-finalize before
30
+##
24
+# making any block graph changes.
31
+# @x-blockdev-set-iothread:
25
+# When true, this job will automatically
26
+# perform its abort or commit actions.
27
+# Defaults to true. (Since 3.1)
28
+#
32
+#
29
+# @auto-dismiss: When false, this job will wait in a CONCLUDED state after it
33
+# Move @node and its children into the @iothread. If @iothread is null then
30
+# has completely ceased all work, and awaits @block-job-dismiss.
34
+# move @node and its children into the main loop.
31
+# When true, this job will automatically disappear from the query
32
+# list without user intervention.
33
+# Defaults to true. (Since 3.1)
34
+#
35
+#
35
# Returns: Nothing on success. If @device does not exist, DeviceNotFound.
36
+# The node must not be attached to a BlockBackend.
36
#
37
+#
37
# Since: 1.1
38
+# @node-name: the name of the block driver node
38
@@ -XXX,XX +XXX,XX @@
39
+#
39
{ 'command': 'block-stream',
40
+# @iothread: the name of the IOThread object or null for the main loop
40
'data': { '*job-id': 'str', 'device': 'str', '*base': 'str',
41
+#
41
'*base-node': 'str', '*backing-file': 'str', '*speed': 'int',
42
+# Note: this command is experimental and intended for test cases that need
42
- '*on-error': 'BlockdevOnError' } }
43
+# control over IOThreads only.
43
+ '*on-error': 'BlockdevOnError',
44
+#
44
+ '*auto-finalize': 'bool', '*auto-dismiss': 'bool' } }
45
+# Since: 2.12
45
46
+#
46
##
47
+# Example:
47
# @block-job-set-speed:
48
+#
49
+# 1. Move a node into an IOThread
50
+# -> { "execute": "x-blockdev-set-iothread",
51
+# "arguments": { "node-name": "disk1",
52
+# "iothread": "iothread0" } }
53
+# <- { "return": {} }
54
+#
55
+# 2. Move a node into the main loop
56
+# -> { "execute": "x-blockdev-set-iothread",
57
+# "arguments": { "node-name": "disk1",
58
+# "iothread": null } }
59
+# <- { "return": {} }
60
+#
61
+##
62
+{ 'command': 'x-blockdev-set-iothread',
63
+ 'data' : { 'node-name': 'str',
64
+ 'iothread': 'StrOrNull' } }
48
diff --git a/blockdev.c b/blockdev.c
65
diff --git a/blockdev.c b/blockdev.c
49
index XXXXXXX..XXXXXXX 100644
66
index XXXXXXX..XXXXXXX 100644
50
--- a/blockdev.c
67
--- a/blockdev.c
51
+++ b/blockdev.c
68
+++ b/blockdev.c
52
@@ -XXX,XX +XXX,XX @@ void qmp_block_stream(bool has_job_id, const char *job_id, const char *device,
69
@@ -XXX,XX +XXX,XX @@
53
bool has_backing_file, const char *backing_file,
70
#include "qapi/qmp/qerror.h"
54
bool has_speed, int64_t speed,
71
#include "qapi/qobject-output-visitor.h"
55
bool has_on_error, BlockdevOnError on_error,
72
#include "sysemu/sysemu.h"
56
+ bool has_auto_finalize, bool auto_finalize,
73
+#include "sysemu/iothread.h"
57
+ bool has_auto_dismiss, bool auto_dismiss,
74
#include "block/block_int.h"
58
Error **errp)
75
#include "qmp-commands.h"
59
{
76
#include "block/trace.h"
60
BlockDriverState *bs, *iter;
77
@@ -XXX,XX +XXX,XX @@ BlockJobInfoList *qmp_query_block_jobs(Error **errp)
61
@@ -XXX,XX +XXX,XX @@ void qmp_block_stream(bool has_job_id, const char *job_id, const char *device,
78
return head;
62
/* backing_file string overrides base bs filename */
79
}
63
base_name = has_backing_file ? backing_file : base_name;
80
64
81
+void qmp_x_blockdev_set_iothread(const char *node_name, StrOrNull *iothread,
65
+ if (has_auto_finalize && !auto_finalize) {
82
+ Error **errp)
66
+ job_flags |= JOB_MANUAL_FINALIZE;
83
+{
67
+ }
84
+ AioContext *old_context;
68
+ if (has_auto_dismiss && !auto_dismiss) {
85
+ AioContext *new_context;
69
+ job_flags |= JOB_MANUAL_DISMISS;
86
+ BlockDriverState *bs;
87
+
88
+ bs = bdrv_find_node(node_name);
89
+ if (!bs) {
90
+ error_setg(errp, "Cannot find node %s", node_name);
91
+ return;
70
+ }
92
+ }
71
+
93
+
72
stream_start(has_job_id ? job_id : NULL, bs, base_bs, base_name,
94
+ /* If we want to allow more extreme test scenarios this guard could be
73
job_flags, has_speed ? speed : 0, on_error, &local_err);
95
+ * removed. For now it protects against accidents. */
74
if (local_err) {
96
+ if (bdrv_has_blk(bs)) {
75
diff --git a/hmp.c b/hmp.c
97
+ error_setg(errp, "Node %s is in use", node_name);
76
index XXXXXXX..XXXXXXX 100644
98
+ return;
77
--- a/hmp.c
99
+ }
78
+++ b/hmp.c
100
+
79
@@ -XXX,XX +XXX,XX @@ void hmp_block_stream(Monitor *mon, const QDict *qdict)
101
+ if (iothread->type == QTYPE_QSTRING) {
80
int64_t speed = qdict_get_try_int(qdict, "speed", 0);
102
+ IOThread *obj = iothread_by_id(iothread->u.s);
81
103
+ if (!obj) {
82
qmp_block_stream(true, device, device, base != NULL, base, false, NULL,
104
+ error_setg(errp, "Cannot find iothread %s", iothread->u.s);
83
- false, NULL, qdict_haskey(qdict, "speed"), speed,
105
+ return;
84
- true, BLOCKDEV_ON_ERROR_REPORT, &error);
106
+ }
85
+ false, NULL, qdict_haskey(qdict, "speed"), speed, true,
107
+
86
+ BLOCKDEV_ON_ERROR_REPORT, false, false, false, false,
108
+ new_context = iothread_get_aio_context(obj);
87
+ &error);
109
+ } else {
88
110
+ new_context = qemu_get_aio_context();
89
hmp_handle_error(mon, &error);
111
+ }
90
}
112
+
113
+ old_context = bdrv_get_aio_context(bs);
114
+ aio_context_acquire(old_context);
115
+
116
+ bdrv_set_aio_context(bs, new_context);
117
+
118
+ aio_context_release(old_context);
119
+}
120
+
121
QemuOptsList qemu_common_drive_opts = {
122
.name = "drive",
123
.head = QTAILQ_HEAD_INITIALIZER(qemu_common_drive_opts.head),
91
--
124
--
92
2.17.1
125
2.14.3
93
126
94
127
diff view generated by jsdifflib
1
From: Kevin Wolf <kwolf@redhat.com>
1
QMP 'transaction' blockdev-snapshot-sync with multiple disks in an
2
IOThread is an untested code path. Several bugs have been found in
3
connection with this command. This patch adds a test case to prevent
4
future regressions.
2
5
3
This is a regression test for a deadlock that could occur in callbacks
6
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
4
called from the aio_poll() in bdrv_drain_poll_top_level(). The
7
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
5
AioContext lock wasn't released and therefore would be taken a second
8
Reviewed-by: Eric Blake <eblake@redhat.com>
6
time in the callback. This would cause a possible AIO_WAIT_WHILE() in
9
Message-id: 20171206144550.22295-10-stefanha@redhat.com
7
the callback to hang.
10
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
11
---
12
tests/qemu-iotests/202 | 95 ++++++++++++++++++++++++++++++++++++++++++++++
13
tests/qemu-iotests/202.out | 11 ++++++
14
tests/qemu-iotests/group | 1 +
15
3 files changed, 107 insertions(+)
16
create mode 100755 tests/qemu-iotests/202
17
create mode 100644 tests/qemu-iotests/202.out
8
18
9
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
19
diff --git a/tests/qemu-iotests/202 b/tests/qemu-iotests/202
10
Reviewed-by: Fam Zheng <famz@redhat.com>
20
new file mode 100755
11
---
21
index XXXXXXX..XXXXXXX
12
tests/test-bdrv-drain.c | 13 +++++++++++++
22
--- /dev/null
13
1 file changed, 13 insertions(+)
23
+++ b/tests/qemu-iotests/202
14
24
@@ -XXX,XX +XXX,XX @@
15
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
25
+#!/usr/bin/env python
26
+#
27
+# Copyright (C) 2017 Red Hat, Inc.
28
+#
29
+# This program is free software; you can redistribute it and/or modify
30
+# it under the terms of the GNU General Public License as published by
31
+# the Free Software Foundation; either version 2 of the License, or
32
+# (at your option) any later version.
33
+#
34
+# This program is distributed in the hope that it will be useful,
35
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
36
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
37
+# GNU General Public License for more details.
38
+#
39
+# You should have received a copy of the GNU General Public License
40
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
41
+#
42
+# Creator/Owner: Stefan Hajnoczi <stefanha@redhat.com>
43
+#
44
+# Check that QMP 'transaction' blockdev-snapshot-sync with multiple drives on a
45
+# single IOThread completes successfully. This particular command triggered a
46
+# hang due to recursive AioContext locking and BDRV_POLL_WHILE(). Protect
47
+# against regressions.
48
+
49
+import iotests
50
+
51
+iotests.verify_image_format(supported_fmts=['qcow2'])
52
+iotests.verify_platform(['linux'])
53
+
54
+with iotests.FilePath('disk0.img') as disk0_img_path, \
55
+ iotests.FilePath('disk1.img') as disk1_img_path, \
56
+ iotests.FilePath('disk0-snap.img') as disk0_snap_img_path, \
57
+ iotests.FilePath('disk1-snap.img') as disk1_snap_img_path, \
58
+ iotests.VM() as vm:
59
+
60
+ img_size = '10M'
61
+ iotests.qemu_img_pipe('create', '-f', iotests.imgfmt, disk0_img_path, img_size)
62
+ iotests.qemu_img_pipe('create', '-f', iotests.imgfmt, disk1_img_path, img_size)
63
+
64
+ iotests.log('Launching VM...')
65
+ vm.launch()
66
+
67
+ iotests.log('Adding IOThread...')
68
+ iotests.log(vm.qmp('object-add',
69
+ qom_type='iothread',
70
+ id='iothread0'))
71
+
72
+ iotests.log('Adding blockdevs...')
73
+ iotests.log(vm.qmp('blockdev-add',
74
+ driver=iotests.imgfmt,
75
+ node_name='disk0',
76
+ file={
77
+ 'driver': 'file',
78
+ 'filename': disk0_img_path,
79
+ }))
80
+ iotests.log(vm.qmp('blockdev-add',
81
+ driver=iotests.imgfmt,
82
+ node_name='disk1',
83
+ file={
84
+ 'driver': 'file',
85
+ 'filename': disk1_img_path,
86
+ }))
87
+
88
+ iotests.log('Setting iothread...')
89
+ iotests.log(vm.qmp('x-blockdev-set-iothread',
90
+ node_name='disk0',
91
+ iothread='iothread0'))
92
+ iotests.log(vm.qmp('x-blockdev-set-iothread',
93
+ node_name='disk1',
94
+ iothread='iothread0'))
95
+
96
+ iotests.log('Creating external snapshots...')
97
+ iotests.log(vm.qmp(
98
+ 'transaction',
99
+ actions=[
100
+ {
101
+ 'data': {
102
+ 'node-name': 'disk0',
103
+ 'snapshot-file': disk0_snap_img_path,
104
+ 'snapshot-node-name': 'disk0-snap',
105
+ 'mode': 'absolute-paths',
106
+ 'format': iotests.imgfmt,
107
+ },
108
+ 'type': 'blockdev-snapshot-sync'
109
+ }, {
110
+ 'data': {
111
+ 'node-name': 'disk1',
112
+ 'snapshot-file': disk1_snap_img_path,
113
+ 'snapshot-node-name': 'disk1-snap',
114
+ 'mode': 'absolute-paths',
115
+ 'format': iotests.imgfmt
116
+ },
117
+ 'type': 'blockdev-snapshot-sync'
118
+ }
119
+ ]))
120
diff --git a/tests/qemu-iotests/202.out b/tests/qemu-iotests/202.out
121
new file mode 100644
122
index XXXXXXX..XXXXXXX
123
--- /dev/null
124
+++ b/tests/qemu-iotests/202.out
125
@@ -XXX,XX +XXX,XX @@
126
+Launching VM...
127
+Adding IOThread...
128
+{u'return': {}}
129
+Adding blockdevs...
130
+{u'return': {}}
131
+{u'return': {}}
132
+Setting iothread...
133
+{u'return': {}}
134
+{u'return': {}}
135
+Creating external snapshots...
136
+{u'return': {}}
137
diff --git a/tests/qemu-iotests/group b/tests/qemu-iotests/group
16
index XXXXXXX..XXXXXXX 100644
138
index XXXXXXX..XXXXXXX 100644
17
--- a/tests/test-bdrv-drain.c
139
--- a/tests/qemu-iotests/group
18
+++ b/tests/test-bdrv-drain.c
140
+++ b/tests/qemu-iotests/group
19
@@ -XXX,XX +XXX,XX @@ static void test_iothread_aio_cb(void *opaque, int ret)
141
@@ -XXX,XX +XXX,XX @@
20
qemu_event_set(&done_event);
142
197 rw auto quick
21
}
143
198 rw auto
22
144
200 rw auto
23
+static void test_iothread_main_thread_bh(void *opaque)
145
+202 rw auto quick
24
+{
25
+ struct test_iothread_data *data = opaque;
26
+
27
+ /* Test that the AioContext is not yet locked in a random BH that is
28
+ * executed during drain, otherwise this would deadlock. */
29
+ aio_context_acquire(bdrv_get_aio_context(data->bs));
30
+ bdrv_flush(data->bs);
31
+ aio_context_release(bdrv_get_aio_context(data->bs));
32
+}
33
+
34
/*
35
* Starts an AIO request on a BDS that runs in the AioContext of iothread 1.
36
* The request involves a BH on iothread 2 before it can complete.
37
@@ -XXX,XX +XXX,XX @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread)
38
aio_context_acquire(ctx_a);
39
}
40
41
+ aio_bh_schedule_oneshot(ctx_a, test_iothread_main_thread_bh, &data);
42
+
43
/* The request is running on the IOThread a. Draining its block device
44
* will make sure that it has completed as far as the BDS is concerned,
45
* but the drain in this thread can continue immediately after
46
--
146
--
47
2.17.1
147
2.14.3
48
148
49
149
diff view generated by jsdifflib
1
From: Kevin Wolf <kwolf@redhat.com>
1
From: Mark Kanda <mark.kanda@oracle.com>
2
2
3
This is a regression test for a deadlock that occurred in block job
3
Depending on the configuration, it can be beneficial to adjust the virtio-blk
4
completion callbacks (via job_defer_to_main_loop) because the AioContext
4
queue size to something other than the current default of 128. Add a new
5
lock was taken twice: once in job_finish_sync() and then again in
5
property to make the queue size configurable.
6
job_defer_to_main_loop_bh(). This would cause AIO_WAIT_WHILE() to hang.
7
6
8
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
7
Signed-off-by: Mark Kanda <mark.kanda@oracle.com>
9
Reviewed-by: Fam Zheng <famz@redhat.com>
8
Reviewed-by: Karl Heubaum <karl.heubaum@oracle.com>
9
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
10
Reviewed-by: Ameya More <ameya.more@oracle.com>
11
Message-id: 52e6d742811f10dbd16e996e86cf375b9577c187.1513005190.git.mark.kanda@oracle.com
12
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
10
---
13
---
11
tests/test-bdrv-drain.c | 10 ++++++++++
14
include/hw/virtio/virtio-blk.h | 1 +
12
1 file changed, 10 insertions(+)
15
hw/block/virtio-blk.c | 10 +++++++++-
16
2 files changed, 10 insertions(+), 1 deletion(-)
13
17
14
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
18
diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h
15
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
16
--- a/tests/test-bdrv-drain.c
20
--- a/include/hw/virtio/virtio-blk.h
17
+++ b/tests/test-bdrv-drain.c
21
+++ b/include/hw/virtio/virtio-blk.h
18
@@ -XXX,XX +XXX,XX @@ typedef struct TestBlockJob {
22
@@ -XXX,XX +XXX,XX @@ struct VirtIOBlkConf
19
bool should_complete;
23
uint32_t config_wce;
20
} TestBlockJob;
24
uint32_t request_merging;
21
25
uint16_t num_queues;
22
+static int test_job_prepare(Job *job)
26
+ uint16_t queue_size;
23
+{
24
+ TestBlockJob *s = container_of(job, TestBlockJob, common.job);
25
+
26
+ /* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */
27
+ blk_flush(s->common.blk);
28
+ return 0;
29
+}
30
+
31
static int coroutine_fn test_job_run(Job *job, Error **errp)
32
{
33
TestBlockJob *s = container_of(job, TestBlockJob, common.job);
34
@@ -XXX,XX +XXX,XX @@ BlockJobDriver test_job_driver = {
35
.drain = block_job_drain,
36
.run = test_job_run,
37
.complete = test_job_complete,
38
+ .prepare = test_job_prepare,
39
},
40
};
27
};
41
28
29
struct VirtIOBlockDataPlane;
30
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
31
index XXXXXXX..XXXXXXX 100644
32
--- a/hw/block/virtio-blk.c
33
+++ b/hw/block/virtio-blk.c
34
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
35
error_setg(errp, "num-queues property must be larger than 0");
36
return;
37
}
38
+ if (!is_power_of_2(conf->queue_size) ||
39
+ conf->queue_size > VIRTQUEUE_MAX_SIZE) {
40
+ error_setg(errp, "invalid queue-size property (%" PRIu16 "), "
41
+ "must be a power of 2 (max %d)",
42
+ conf->queue_size, VIRTQUEUE_MAX_SIZE);
43
+ return;
44
+ }
45
46
blkconf_serial(&conf->conf, &conf->serial);
47
if (!blkconf_apply_backend_options(&conf->conf,
48
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
49
s->sector_mask = (s->conf.conf.logical_block_size / BDRV_SECTOR_SIZE) - 1;
50
51
for (i = 0; i < conf->num_queues; i++) {
52
- virtio_add_queue(vdev, 128, virtio_blk_handle_output);
53
+ virtio_add_queue(vdev, conf->queue_size, virtio_blk_handle_output);
54
}
55
virtio_blk_data_plane_create(vdev, conf, &s->dataplane, &err);
56
if (err != NULL) {
57
@@ -XXX,XX +XXX,XX @@ static Property virtio_blk_properties[] = {
58
DEFINE_PROP_BIT("request-merging", VirtIOBlock, conf.request_merging, 0,
59
true),
60
DEFINE_PROP_UINT16("num-queues", VirtIOBlock, conf.num_queues, 1),
61
+ DEFINE_PROP_UINT16("queue-size", VirtIOBlock, conf.queue_size, 128),
62
DEFINE_PROP_LINK("iothread", VirtIOBlock, conf.iothread, TYPE_IOTHREAD,
63
IOThread *),
64
DEFINE_PROP_END_OF_LIST(),
42
--
65
--
43
2.17.1
66
2.14.3
44
67
45
68
diff view generated by jsdifflib
1
From: John Snow <jsnow@redhat.com>
1
From: Mark Kanda <mark.kanda@oracle.com>
2
2
3
The exit callback in this test actually only performs cleanup.
3
virtio-blk logical block size should never be larger than physical block
4
size because it doesn't make sense to have such configurations. QEMU doesn't
5
have a way to effectively express this condition; the best it can do is
6
report the physical block exponent as 0 - indicating the logical block size
7
equals the physical block size.
4
8
5
Signed-off-by: John Snow <jsnow@redhat.com>
9
This is identical to commit 3da023b5827543ee4c022986ea2ad9d1274410b2
6
Reviewed-by: Max Reitz <mreitz@redhat.com>
10
but applied to virtio-blk (instead of virtio-scsi).
7
Message-id: 20180906130225.5118-11-jsnow@redhat.com
11
8
Reviewed-by: Jeff Cody <jcody@redhat.com>
12
Signed-off-by: Mark Kanda <mark.kanda@oracle.com>
9
Signed-off-by: Max Reitz <mreitz@redhat.com>
13
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
14
Reviewed-by: Ameya More <ameya.more@oracle.com>
15
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
16
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
17
Message-id: 773169891f9f2deb4cb7c4ef2655580dbe24c1d1.1513005190.git.mark.kanda@oracle.com
18
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
10
---
19
---
11
tests/test-blockjob-txn.c | 4 ++--
20
hw/block/virtio-blk.c | 7 +++++++
12
1 file changed, 2 insertions(+), 2 deletions(-)
21
1 file changed, 7 insertions(+)
13
22
14
diff --git a/tests/test-blockjob-txn.c b/tests/test-blockjob-txn.c
23
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
15
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
16
--- a/tests/test-blockjob-txn.c
25
--- a/hw/block/virtio-blk.c
17
+++ b/tests/test-blockjob-txn.c
26
+++ b/hw/block/virtio-blk.c
18
@@ -XXX,XX +XXX,XX @@ typedef struct {
27
@@ -XXX,XX +XXX,XX @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
19
int *result;
28
20
} TestBlockJob;
29
blkconf_blocksizes(&conf->conf);
21
30
22
-static void test_block_job_exit(Job *job)
31
+ if (conf->conf.logical_block_size >
23
+static void test_block_job_clean(Job *job)
32
+ conf->conf.physical_block_size) {
24
{
33
+ error_setg(errp,
25
BlockJob *bjob = container_of(job, BlockJob, job);
34
+ "logical_block_size > physical_block_size not supported");
26
BlockDriverState *bs = blk_bs(bjob->blk);
35
+ return;
27
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver test_block_job_driver = {
36
+ }
28
.user_resume = block_job_user_resume,
37
+
29
.drain = block_job_drain,
38
virtio_init(vdev, "virtio-blk", VIRTIO_ID_BLOCK,
30
.run = test_block_job_run,
39
sizeof(struct virtio_blk_config));
31
- .exit = test_block_job_exit,
32
+ .clean = test_block_job_clean,
33
},
34
};
35
40
36
--
41
--
37
2.17.1
42
2.14.3
38
43
39
44
diff view generated by jsdifflib
Deleted patch
1
From: John Snow <jsnow@redhat.com>
2
1
3
Now that all of the jobs use the component finalization callbacks,
4
there's no use for the heavy-hammer .exit callback anymore.
5
6
job_exit becomes a glorified type shim so that we can call
7
job_completed from aio_bh_schedule_oneshot.
8
9
Move these three functions down into job.c to eliminate a
10
forward reference.
11
12
Signed-off-by: John Snow <jsnow@redhat.com>
13
Reviewed-by: Max Reitz <mreitz@redhat.com>
14
Message-id: 20180906130225.5118-12-jsnow@redhat.com
15
Reviewed-by: Jeff Cody <jcody@redhat.com>
16
Signed-off-by: Max Reitz <mreitz@redhat.com>
17
---
18
include/qemu/job.h | 11 -------
19
job.c | 77 ++++++++++++++++++++--------------------------
20
2 files changed, 34 insertions(+), 54 deletions(-)
21
22
diff --git a/include/qemu/job.h b/include/qemu/job.h
23
index XXXXXXX..XXXXXXX 100644
24
--- a/include/qemu/job.h
25
+++ b/include/qemu/job.h
26
@@ -XXX,XX +XXX,XX @@ struct JobDriver {
27
*/
28
void (*drain)(Job *job);
29
30
- /**
31
- * If the callback is not NULL, exit will be invoked from the main thread
32
- * when the job's coroutine has finished, but before transactional
33
- * convergence; before @prepare or @abort.
34
- *
35
- * FIXME TODO: This callback is only temporary to transition remaining jobs
36
- * to prepare/commit/abort/clean callbacks and will be removed before 3.1.
37
- * is released.
38
- */
39
- void (*exit)(Job *job);
40
-
41
/**
42
* If the callback is not NULL, prepare will be invoked when all the jobs
43
* belonging to the same transaction complete; or upon this job's completion
44
diff --git a/job.c b/job.c
45
index XXXXXXX..XXXXXXX 100644
46
--- a/job.c
47
+++ b/job.c
48
@@ -XXX,XX +XXX,XX @@ void job_drain(Job *job)
49
}
50
}
51
52
-static void job_completed(Job *job);
53
-
54
-static void job_exit(void *opaque)
55
-{
56
- Job *job = (Job *)opaque;
57
- AioContext *aio_context = job->aio_context;
58
-
59
- if (job->driver->exit) {
60
- aio_context_acquire(aio_context);
61
- job->driver->exit(job);
62
- aio_context_release(aio_context);
63
- }
64
- job_completed(job);
65
-}
66
-
67
-/**
68
- * All jobs must allow a pause point before entering their job proper. This
69
- * ensures that jobs can be paused prior to being started, then resumed later.
70
- */
71
-static void coroutine_fn job_co_entry(void *opaque)
72
-{
73
- Job *job = opaque;
74
-
75
- assert(job && job->driver && job->driver->run);
76
- job_pause_point(job);
77
- job->ret = job->driver->run(job, &job->err);
78
- job->deferred_to_main_loop = true;
79
- aio_bh_schedule_oneshot(qemu_get_aio_context(), job_exit, job);
80
-}
81
-
82
-
83
-void job_start(Job *job)
84
-{
85
- assert(job && !job_started(job) && job->paused &&
86
- job->driver && job->driver->run);
87
- job->co = qemu_coroutine_create(job_co_entry, job);
88
- job->pause_count--;
89
- job->busy = true;
90
- job->paused = false;
91
- job_state_transition(job, JOB_STATUS_RUNNING);
92
- aio_co_enter(job->aio_context, job->co);
93
-}
94
-
95
/* Assumes the block_job_mutex is held */
96
static bool job_timer_not_pending(Job *job)
97
{
98
@@ -XXX,XX +XXX,XX @@ static void job_completed(Job *job)
99
}
100
}
101
102
+/** Useful only as a type shim for aio_bh_schedule_oneshot. */
103
+static void job_exit(void *opaque)
104
+{
105
+ Job *job = (Job *)opaque;
106
+ job_completed(job);
107
+}
108
+
109
+/**
110
+ * All jobs must allow a pause point before entering their job proper. This
111
+ * ensures that jobs can be paused prior to being started, then resumed later.
112
+ */
113
+static void coroutine_fn job_co_entry(void *opaque)
114
+{
115
+ Job *job = opaque;
116
+
117
+ assert(job && job->driver && job->driver->run);
118
+ job_pause_point(job);
119
+ job->ret = job->driver->run(job, &job->err);
120
+ job->deferred_to_main_loop = true;
121
+ aio_bh_schedule_oneshot(qemu_get_aio_context(), job_exit, job);
122
+}
123
+
124
+void job_start(Job *job)
125
+{
126
+ assert(job && !job_started(job) && job->paused &&
127
+ job->driver && job->driver->run);
128
+ job->co = qemu_coroutine_create(job_co_entry, job);
129
+ job->pause_count--;
130
+ job->busy = true;
131
+ job->paused = false;
132
+ job_state_transition(job, JOB_STATUS_RUNNING);
133
+ aio_co_enter(job->aio_context, job->co);
134
+}
135
+
136
void job_cancel(Job *job, bool force)
137
{
138
if (job->status == JOB_STATUS_CONCLUDED) {
139
--
140
2.17.1
141
142
diff view generated by jsdifflib
Deleted patch
1
From: John Snow <jsnow@redhat.com>
2
1
3
Signed-off-by: John Snow <jsnow@redhat.com>
4
Reviewed-by: Max Reitz <mreitz@redhat.com>
5
Message-id: 20180906130225.5118-13-jsnow@redhat.com
6
Reviewed-by: Jeff Cody <jcody@redhat.com>
7
Signed-off-by: Max Reitz <mreitz@redhat.com>
8
---
9
qapi/block-core.json | 16 +++++++++++++++-
10
blockdev.c | 8 ++++++++
11
2 files changed, 23 insertions(+), 1 deletion(-)
12
13
diff --git a/qapi/block-core.json b/qapi/block-core.json
14
index XXXXXXX..XXXXXXX 100644
15
--- a/qapi/block-core.json
16
+++ b/qapi/block-core.json
17
@@ -XXX,XX +XXX,XX @@
18
# above @top. If this option is not given, a node name is
19
# autogenerated. (Since: 2.9)
20
#
21
+# @auto-finalize: When false, this job will wait in a PENDING state after it has
22
+# finished its work, waiting for @block-job-finalize before
23
+# making any block graph changes.
24
+# When true, this job will automatically
25
+# perform its abort or commit actions.
26
+# Defaults to true. (Since 3.1)
27
+#
28
+# @auto-dismiss: When false, this job will wait in a CONCLUDED state after it
29
+# has completely ceased all work, and awaits @block-job-dismiss.
30
+# When true, this job will automatically disappear from the query
31
+# list without user intervention.
32
+# Defaults to true. (Since 3.1)
33
+#
34
# Returns: Nothing on success
35
# If @device does not exist, DeviceNotFound
36
# Any other error returns a GenericError.
37
@@ -XXX,XX +XXX,XX @@
38
{ 'command': 'block-commit',
39
'data': { '*job-id': 'str', 'device': 'str', '*base': 'str', '*top': 'str',
40
'*backing-file': 'str', '*speed': 'int',
41
- '*filter-node-name': 'str' } }
42
+ '*filter-node-name': 'str',
43
+ '*auto-finalize': 'bool', '*auto-dismiss': 'bool' } }
44
45
##
46
# @drive-backup:
47
diff --git a/blockdev.c b/blockdev.c
48
index XXXXXXX..XXXXXXX 100644
49
--- a/blockdev.c
50
+++ b/blockdev.c
51
@@ -XXX,XX +XXX,XX @@ void qmp_block_commit(bool has_job_id, const char *job_id, const char *device,
52
bool has_backing_file, const char *backing_file,
53
bool has_speed, int64_t speed,
54
bool has_filter_node_name, const char *filter_node_name,
55
+ bool has_auto_finalize, bool auto_finalize,
56
+ bool has_auto_dismiss, bool auto_dismiss,
57
Error **errp)
58
{
59
BlockDriverState *bs;
60
@@ -XXX,XX +XXX,XX @@ void qmp_block_commit(bool has_job_id, const char *job_id, const char *device,
61
if (!has_filter_node_name) {
62
filter_node_name = NULL;
63
}
64
+ if (has_auto_finalize && !auto_finalize) {
65
+ job_flags |= JOB_MANUAL_FINALIZE;
66
+ }
67
+ if (has_auto_dismiss && !auto_dismiss) {
68
+ job_flags |= JOB_MANUAL_DISMISS;
69
+ }
70
71
/* Important Note:
72
* libvirt relies on the DeviceNotFound error class in order to probe for
73
--
74
2.17.1
75
76
diff view generated by jsdifflib
Deleted patch
1
From: John Snow <jsnow@redhat.com>
2
1
3
Fix documentation to match the other jobs amended for 3.1.
4
5
Signed-off-by: John Snow <jsnow@redhat.com>
6
Reviewed-by: Max Reitz <mreitz@redhat.com>
7
Message-id: 20180906130225.5118-16-jsnow@redhat.com
8
Reviewed-by: Jeff Cody <jcody@redhat.com>
9
Signed-off-by: Max Reitz <mreitz@redhat.com>
10
---
11
qapi/block-core.json | 18 ++++++++++--------
12
1 file changed, 10 insertions(+), 8 deletions(-)
13
14
diff --git a/qapi/block-core.json b/qapi/block-core.json
15
index XXXXXXX..XXXXXXX 100644
16
--- a/qapi/block-core.json
17
+++ b/qapi/block-core.json
18
@@ -XXX,XX +XXX,XX @@
19
# a different block device than @device).
20
#
21
# @auto-finalize: When false, this job will wait in a PENDING state after it has
22
-# finished its work, waiting for @block-job-finalize.
23
-# When true, this job will automatically perform its abort or
24
-# commit actions.
25
+# finished its work, waiting for @block-job-finalize before
26
+# making any block graph changes.
27
+# When true, this job will automatically
28
+# perform its abort or commit actions.
29
# Defaults to true. (Since 2.12)
30
#
31
# @auto-dismiss: When false, this job will wait in a CONCLUDED state after it
32
-# has completed ceased all work, and wait for @block-job-dismiss.
33
+# has completely ceased all work, and awaits @block-job-dismiss.
34
# When true, this job will automatically disappear from the query
35
# list without user intervention.
36
# Defaults to true. (Since 2.12)
37
@@ -XXX,XX +XXX,XX @@
38
# a different block device than @device).
39
#
40
# @auto-finalize: When false, this job will wait in a PENDING state after it has
41
-# finished its work, waiting for @block-job-finalize.
42
-# When true, this job will automatically perform its abort or
43
-# commit actions.
44
+# finished its work, waiting for @block-job-finalize before
45
+# making any block graph changes.
46
+# When true, this job will automatically
47
+# perform its abort or commit actions.
48
# Defaults to true. (Since 2.12)
49
#
50
# @auto-dismiss: When false, this job will wait in a CONCLUDED state after it
51
-# has completed ceased all work, and wait for @block-job-dismiss.
52
+# has completely ceased all work, and awaits @block-job-dismiss.
53
# When true, this job will automatically disappear from the query
54
# list without user intervention.
55
# Defaults to true. (Since 2.12)
56
--
57
2.17.1
58
59
diff view generated by jsdifflib
1
From: Alberto Garcia <berto@igalia.com>
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
2
3
When a block device is opened with BDRV_O_SNAPSHOT and the
3
BDRV_POLL_WHILE() does not support recursive AioContext locking. It
4
bdrv_append_temp_snapshot() call fails then the error code path tries
4
only releases the AioContext lock once regardless of how many times the
5
to unref the already destroyed 'options' QDict.
5
caller has acquired it. This results in a hang since the IOThread does
6
not make progress while the AioContext is still locked.
6
7
7
This can be reproduced easily by setting TMPDIR to a location where
8
The following steps trigger the hang:
8
the QEMU process can't write:
9
9
10
$ TMPDIR=/nonexistent $QEMU -drive driver=null-co,snapshot=on
10
$ qemu-system-x86_64 -M accel=kvm -m 1G -cpu host \
11
-object iothread,id=iothread0 \
12
-device virtio-scsi-pci,iothread=iothread0 \
13
-drive if=none,id=drive0,file=test.img,format=raw \
14
-device scsi-hd,drive=drive0 \
15
-drive if=none,id=drive1,file=test.img,format=raw \
16
-device scsi-hd,drive=drive1
17
$ qemu-system-x86_64 ...same options... \
18
-incoming tcp::1234
19
(qemu) migrate tcp:127.0.0.1:1234
20
...hang...
11
21
12
Signed-off-by: Alberto Garcia <berto@igalia.com>
22
Tested-by: Stefan Hajnoczi <stefanha@redhat.com>
13
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
23
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
24
Reviewed-by: Eric Blake <eblake@redhat.com>
25
Message-id: 20171207201320.19284-2-stefanha@redhat.com
26
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
14
---
27
---
15
block.c | 1 +
28
block.c | 14 +++++++++++---
16
1 file changed, 1 insertion(+)
29
1 file changed, 11 insertions(+), 3 deletions(-)
17
30
18
diff --git a/block.c b/block.c
31
diff --git a/block.c b/block.c
19
index XXXXXXX..XXXXXXX 100644
32
index XXXXXXX..XXXXXXX 100644
20
--- a/block.c
33
--- a/block.c
21
+++ b/block.c
34
+++ b/block.c
22
@@ -XXX,XX +XXX,XX @@ static BlockDriverState *bdrv_open_inherit(const char *filename,
35
@@ -XXX,XX +XXX,XX @@ int bdrv_inactivate_all(void)
23
bdrv_parent_cb_change_media(bs, true);
36
BdrvNextIterator it;
24
37
int ret = 0;
25
qobject_unref(options);
38
int pass;
26
+ options = NULL;
39
+ GSList *aio_ctxs = NULL, *ctx;
27
40
28
/* For snapshot=on, create a temporary qcow2 overlay. bs points to the
41
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
29
* temporary snapshot afterwards. */
42
- aio_context_acquire(bdrv_get_aio_context(bs));
43
+ AioContext *aio_context = bdrv_get_aio_context(bs);
44
+
45
+ if (!g_slist_find(aio_ctxs, aio_context)) {
46
+ aio_ctxs = g_slist_prepend(aio_ctxs, aio_context);
47
+ aio_context_acquire(aio_context);
48
+ }
49
}
50
51
/* We do two passes of inactivation. The first pass calls to drivers'
52
@@ -XXX,XX +XXX,XX @@ int bdrv_inactivate_all(void)
53
}
54
55
out:
56
- for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
57
- aio_context_release(bdrv_get_aio_context(bs));
58
+ for (ctx = aio_ctxs; ctx != NULL; ctx = ctx->next) {
59
+ AioContext *aio_context = ctx->data;
60
+ aio_context_release(aio_context);
61
}
62
+ g_slist_free(aio_ctxs);
63
64
return ret;
65
}
30
--
66
--
31
2.17.1
67
2.14.3
32
68
33
69
diff view generated by jsdifflib
1
From: Kevin Wolf <kwolf@redhat.com>
1
See the patch for why nested AioContext locking is no longer allowed.
2
2
3
bdrv_drain_poll_top_level() was buggy because it didn't release the
3
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
4
AioContext lock of the node to be drained before calling aio_poll().
4
Reviewed-by: Eric Blake <eblake@redhat.com>
5
This way, callbacks called by aio_poll() would possibly take the lock a
5
Message-id: 20171207201320.19284-3-stefanha@redhat.com
6
second time and run into a deadlock with a nested AIO_WAIT_WHILE() call.
6
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
7
---
8
docs/devel/multiple-iothreads.txt | 7 ++++---
9
1 file changed, 4 insertions(+), 3 deletions(-)
7
10
8
However, it turns out that the aio_poll() call isn't actually needed any
11
diff --git a/docs/devel/multiple-iothreads.txt b/docs/devel/multiple-iothreads.txt
9
more. It was introduced in commit 91af091f923, which is effectively
10
reverted by this patch. The cases it was supposed to fix are now covered
11
by bdrv_drain_poll(), which waits for block jobs to reach a quiescent
12
state.
13
14
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
15
Reviewed-by: Fam Zheng <famz@redhat.com>
16
Reviewed-by: Max Reitz <mreitz@redhat.com>
17
---
18
block/io.c | 8 --------
19
1 file changed, 8 deletions(-)
20
21
diff --git a/block/io.c b/block/io.c
22
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
23
--- a/block/io.c
13
--- a/docs/devel/multiple-iothreads.txt
24
+++ b/block/io.c
14
+++ b/docs/devel/multiple-iothreads.txt
25
@@ -XXX,XX +XXX,XX @@ bool bdrv_drain_poll(BlockDriverState *bs, bool recursive,
15
@@ -XXX,XX +XXX,XX @@
26
static bool bdrv_drain_poll_top_level(BlockDriverState *bs, bool recursive,
16
-Copyright (c) 2014 Red Hat Inc.
27
BdrvChild *ignore_parent)
17
+Copyright (c) 2014-2017 Red Hat Inc.
28
{
18
29
- /* Execute pending BHs first and check everything else only after the BHs
19
This work is licensed under the terms of the GNU GPL, version 2 or later. See
30
- * have executed. */
20
the COPYING file in the top-level directory.
31
- while (aio_poll(bs->aio_context, false));
21
@@ -XXX,XX +XXX,XX @@ aio_context_acquire()/aio_context_release() for mutual exclusion. Once the
32
-
22
context is acquired no other thread can access it or run event loop iterations
33
return bdrv_drain_poll(bs, recursive, ignore_parent, false);
23
in this AioContext.
34
}
24
35
25
-aio_context_acquire()/aio_context_release() calls may be nested. This
36
@@ -XXX,XX +XXX,XX @@ static bool bdrv_drain_all_poll(void)
26
-means you can call them if you're not sure whether #2 applies.
37
BlockDriverState *bs = NULL;
27
+Legacy code sometimes nests aio_context_acquire()/aio_context_release() calls.
38
bool result = false;
28
+Do not use nesting anymore, it is incompatible with the BDRV_POLL_WHILE() macro
39
29
+used in the block layer and can lead to hangs.
40
- /* Execute pending BHs first (may modify the graph) and check everything
30
41
- * else only after the BHs have executed. */
31
There is currently no lock ordering rule if a thread needs to acquire multiple
42
- while (aio_poll(qemu_get_aio_context(), false));
32
AioContexts simultaneously. Therefore, it is only safe for code holding the
43
-
44
/* bdrv_drain_poll() can't make changes to the graph and we are holding the
45
* main AioContext lock, so iterating bdrv_next_all_states() is safe. */
46
while ((bs = bdrv_next_all_states(bs))) {
47
--
33
--
48
2.17.1
34
2.14.3
49
35
50
36
diff view generated by jsdifflib
1
From: Kevin Wolf <kwolf@redhat.com>
1
When a node is already associated with a BlockBackend the
2
x-blockdev-set-iothread command refuses to set the IOThread. This is to
3
prevent accidentally changing the IOThread when the nodes are in use.
2
4
3
The block-commit QMP command required specifying the top and base nodes
5
When the nodes are created with -drive they automatically get a
4
of the commit jobs using the file name of that node. While this works
6
BlockBackend. In that case we know nothing is using them yet and it's
5
in simple cases (local files with absolute paths), the file names
7
safe to set the IOThread. Add a force boolean to override the check.
6
generated for more complicated setups can be hard to predict.
7
8
8
The block-commit command has more problems than just this, so we want to
9
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
9
replace it altogether in the long run, but libvirt needs a reliable way
10
Reviewed-by: Eric Blake <eblake@redhat.com>
10
to address nodes now. So we don't want to wait for a new, cleaner
11
Message-id: 20171207201320.19284-4-stefanha@redhat.com
11
command, but just add the minimal thing needed right now.
12
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
13
This adds two new options top-node and base-node to the command, which
14
allow specifying node names instead. They are mutually exclusive with
15
the old options.
16
17
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
18
---
13
---
19
qapi/block-core.json | 24 ++++++++++++++++++------
14
qapi/block-core.json | 6 +++++-
20
blockdev.c | 32 ++++++++++++++++++++++++++++++--
15
blockdev.c | 11 ++++++-----
21
2 files changed, 48 insertions(+), 8 deletions(-)
16
2 files changed, 11 insertions(+), 6 deletions(-)
22
17
23
diff --git a/qapi/block-core.json b/qapi/block-core.json
18
diff --git a/qapi/block-core.json b/qapi/block-core.json
24
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
25
--- a/qapi/block-core.json
20
--- a/qapi/block-core.json
26
+++ b/qapi/block-core.json
21
+++ b/qapi/block-core.json
27
@@ -XXX,XX +XXX,XX @@
22
@@ -XXX,XX +XXX,XX @@
28
#
23
#
29
# @device: the device name or node-name of a root node
24
# @iothread: the name of the IOThread object or null for the main loop
30
#
25
#
31
-# @base: The file name of the backing image to write data into.
26
+# @force: true if the node and its children should be moved when a BlockBackend
32
-# If not specified, this is the deepest backing image.
27
+# is already attached
33
+# @base-node: The node name of the backing image to write data into.
28
+#
34
+# If not specified, this is the deepest backing image.
29
# Note: this command is experimental and intended for test cases that need
35
+# (since: 3.1)
30
# control over IOThreads only.
36
#
31
#
37
-# @top: The file name of the backing image within the image chain,
38
-# which contains the topmost data to be committed down. If
39
-# not specified, this is the active layer.
40
+# @base: Same as @base-node, except that it is a file name rather than a node
41
+# name. This must be the exact filename string that was used to open the
42
+# node; other strings, even if addressing the same file, are not
43
+# accepted (deprecated, use @base-node instead)
44
+#
45
+# @top-node: The node name of the backing image within the image chain
46
+# which contains the topmost data to be committed down. If
47
+# not specified, this is the active layer. (since: 3.1)
48
+#
49
+# @top: Same as @top-node, except that it is a file name rather than a node
50
+# name. This must be the exact filename string that was used to open the
51
+# node; other strings, even if addressing the same file, are not
52
+# accepted (deprecated, use @base-node instead)
53
#
54
# @backing-file: The backing file string to write into the overlay
55
# image of 'top'. If 'top' is the active layer,
56
@@ -XXX,XX +XXX,XX @@
32
@@ -XXX,XX +XXX,XX @@
57
#
58
##
33
##
59
{ 'command': 'block-commit',
34
{ 'command': 'x-blockdev-set-iothread',
60
- 'data': { '*job-id': 'str', 'device': 'str', '*base': 'str', '*top': 'str',
35
'data' : { 'node-name': 'str',
61
+ 'data': { '*job-id': 'str', 'device': 'str', '*base-node': 'str',
36
- 'iothread': 'StrOrNull' } }
62
+ '*base': 'str', '*top-node': 'str', '*top': 'str',
37
+ 'iothread': 'StrOrNull',
63
'*backing-file': 'str', '*speed': 'int',
38
+ '*force': 'bool' } }
64
'*filter-node-name': 'str',
65
'*auto-finalize': 'bool', '*auto-dismiss': 'bool' } }
66
diff --git a/blockdev.c b/blockdev.c
39
diff --git a/blockdev.c b/blockdev.c
67
index XXXXXXX..XXXXXXX 100644
40
index XXXXXXX..XXXXXXX 100644
68
--- a/blockdev.c
41
--- a/blockdev.c
69
+++ b/blockdev.c
42
+++ b/blockdev.c
70
@@ -XXX,XX +XXX,XX @@ out:
43
@@ -XXX,XX +XXX,XX @@ BlockJobInfoList *qmp_query_block_jobs(Error **errp)
71
}
44
}
72
45
73
void qmp_block_commit(bool has_job_id, const char *job_id, const char *device,
46
void qmp_x_blockdev_set_iothread(const char *node_name, StrOrNull *iothread,
74
+ bool has_base_node, const char *base_node,
47
- Error **errp)
75
bool has_base, const char *base,
48
+ bool has_force, bool force, Error **errp)
76
+ bool has_top_node, const char *top_node,
49
{
77
bool has_top, const char *top,
50
AioContext *old_context;
78
bool has_backing_file, const char *backing_file,
51
AioContext *new_context;
79
bool has_speed, int64_t speed,
52
@@ -XXX,XX +XXX,XX @@ void qmp_x_blockdev_set_iothread(const char *node_name, StrOrNull *iothread,
80
@@ -XXX,XX +XXX,XX @@ void qmp_block_commit(bool has_job_id, const char *job_id, const char *device,
53
return;
81
/* default top_bs is the active layer */
54
}
82
top_bs = bs;
55
83
56
- /* If we want to allow more extreme test scenarios this guard could be
84
- if (has_top && top) {
57
- * removed. For now it protects against accidents. */
85
+ if (has_top_node && has_top) {
58
- if (bdrv_has_blk(bs)) {
86
+ error_setg(errp, "'top-node' and 'top' are mutually exclusive");
59
- error_setg(errp, "Node %s is in use", node_name);
87
+ goto out;
60
+ /* Protects against accidents. */
88
+ } else if (has_top_node) {
61
+ if (!(has_force && force) && bdrv_has_blk(bs)) {
89
+ top_bs = bdrv_lookup_bs(NULL, top_node, errp);
62
+ error_setg(errp, "Node %s is associated with a BlockBackend and could "
90
+ if (top_bs == NULL) {
63
+ "be in use (use force=true to override this check)",
91
+ goto out;
64
+ node_name);
92
+ }
65
return;
93
+ if (!bdrv_chain_contains(bs, top_bs)) {
66
}
94
+ error_setg(errp, "'%s' is not in this backing file chain",
67
95
+ top_node);
96
+ goto out;
97
+ }
98
+ } else if (has_top && top) {
99
if (strcmp(bs->filename, top) != 0) {
100
top_bs = bdrv_find_backing_image(bs, top);
101
}
102
@@ -XXX,XX +XXX,XX @@ void qmp_block_commit(bool has_job_id, const char *job_id, const char *device,
103
104
assert(bdrv_get_aio_context(top_bs) == aio_context);
105
106
- if (has_base && base) {
107
+ if (has_base_node && has_base) {
108
+ error_setg(errp, "'base-node' and 'base' are mutually exclusive");
109
+ goto out;
110
+ } else if (has_base_node) {
111
+ base_bs = bdrv_lookup_bs(NULL, base_node, errp);
112
+ if (base_bs == NULL) {
113
+ goto out;
114
+ }
115
+ if (!bdrv_chain_contains(top_bs, base_bs)) {
116
+ error_setg(errp, "'%s' is not in this backing file chain",
117
+ base_node);
118
+ goto out;
119
+ }
120
+ } else if (has_base && base) {
121
base_bs = bdrv_find_backing_image(top_bs, base);
122
} else {
123
base_bs = bdrv_find_base(top_bs);
124
--
68
--
125
2.17.1
69
2.14.3
126
70
127
71
diff view generated by jsdifflib
Deleted patch
1
From: Kevin Wolf <kwolf@redhat.com>
2
1
3
This adds some tests for block-commit with the new options top-node and
4
base-node (taking node names) instead of top and base (taking file
5
names).
6
7
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
8
---
9
tests/qemu-iotests/040 | 52 ++++++++++++++++++++++++++++++++++++--
10
tests/qemu-iotests/040.out | 4 +--
11
2 files changed, 52 insertions(+), 4 deletions(-)
12
13
diff --git a/tests/qemu-iotests/040 b/tests/qemu-iotests/040
14
index XXXXXXX..XXXXXXX 100755
15
--- a/tests/qemu-iotests/040
16
+++ b/tests/qemu-iotests/040
17
@@ -XXX,XX +XXX,XX @@ class ImageCommitTestCase(iotests.QMPTestCase):
18
self.assert_no_active_block_jobs()
19
self.vm.shutdown()
20
21
- def run_commit_test(self, top, base, need_ready=False):
22
+ def run_commit_test(self, top, base, need_ready=False, node_names=False):
23
self.assert_no_active_block_jobs()
24
- result = self.vm.qmp('block-commit', device='drive0', top=top, base=base)
25
+ if node_names:
26
+ result = self.vm.qmp('block-commit', device='drive0', top_node=top, base_node=base)
27
+ else:
28
+ result = self.vm.qmp('block-commit', device='drive0', top=top, base=base)
29
self.assert_qmp(result, 'return', {})
30
self.wait_for_complete(need_ready)
31
32
@@ -XXX,XX +XXX,XX @@ class TestSingleDrive(ImageCommitTestCase):
33
self.assertEqual(-1, qemu_io('-f', 'raw', '-c', 'read -P 0xab 0 524288', backing_img).find("verification failed"))
34
self.assertEqual(-1, qemu_io('-f', 'raw', '-c', 'read -P 0xef 524288 524288', backing_img).find("verification failed"))
35
36
+ def test_commit_node(self):
37
+ self.run_commit_test("mid", "base", node_names=True)
38
+ self.assertEqual(-1, qemu_io('-f', 'raw', '-c', 'read -P 0xab 0 524288', backing_img).find("verification failed"))
39
+ self.assertEqual(-1, qemu_io('-f', 'raw', '-c', 'read -P 0xef 524288 524288', backing_img).find("verification failed"))
40
+
41
def test_device_not_found(self):
42
result = self.vm.qmp('block-commit', device='nonexistent', top='%s' % mid_img)
43
self.assert_qmp(result, 'error/class', 'DeviceNotFound')
44
@@ -XXX,XX +XXX,XX @@ class TestSingleDrive(ImageCommitTestCase):
45
self.assert_qmp(result, 'error/class', 'GenericError')
46
self.assert_qmp(result, 'error/desc', 'Base \'badfile\' not found')
47
48
+ def test_top_node_invalid(self):
49
+ self.assert_no_active_block_jobs()
50
+ result = self.vm.qmp('block-commit', device='drive0', top_node='badfile', base_node='base')
51
+ self.assert_qmp(result, 'error/class', 'GenericError')
52
+ self.assert_qmp(result, 'error/desc', "Cannot find device= nor node_name=badfile")
53
+
54
+ def test_base_node_invalid(self):
55
+ self.assert_no_active_block_jobs()
56
+ result = self.vm.qmp('block-commit', device='drive0', top_node='mid', base_node='badfile')
57
+ self.assert_qmp(result, 'error/class', 'GenericError')
58
+ self.assert_qmp(result, 'error/desc', "Cannot find device= nor node_name=badfile")
59
+
60
+ def test_top_path_and_node(self):
61
+ self.assert_no_active_block_jobs()
62
+ result = self.vm.qmp('block-commit', device='drive0', top_node='mid', base_node='base', top='%s' % mid_img)
63
+ self.assert_qmp(result, 'error/class', 'GenericError')
64
+ self.assert_qmp(result, 'error/desc', "'top-node' and 'top' are mutually exclusive")
65
+
66
+ def test_base_path_and_node(self):
67
+ self.assert_no_active_block_jobs()
68
+ result = self.vm.qmp('block-commit', device='drive0', top_node='mid', base_node='base', base='%s' % backing_img)
69
+ self.assert_qmp(result, 'error/class', 'GenericError')
70
+ self.assert_qmp(result, 'error/desc', "'base-node' and 'base' are mutually exclusive")
71
+
72
def test_top_is_active(self):
73
self.run_commit_test(test_img, backing_img, need_ready=True)
74
self.assertEqual(-1, qemu_io('-f', 'raw', '-c', 'read -P 0xab 0 524288', backing_img).find("verification failed"))
75
@@ -XXX,XX +XXX,XX @@ class TestSingleDrive(ImageCommitTestCase):
76
self.assert_qmp(result, 'error/class', 'GenericError')
77
self.assert_qmp(result, 'error/desc', 'Base \'%s\' not found' % mid_img)
78
79
+ def test_top_and_base_node_reversed(self):
80
+ self.assert_no_active_block_jobs()
81
+ result = self.vm.qmp('block-commit', device='drive0', top_node='base', base_node='top')
82
+ self.assert_qmp(result, 'error/class', 'GenericError')
83
+ self.assert_qmp(result, 'error/desc', "'top' is not in this backing file chain")
84
+
85
+ def test_top_node_in_wrong_chain(self):
86
+ self.assert_no_active_block_jobs()
87
+
88
+ result = self.vm.qmp('blockdev-add', driver='null-co', node_name='null')
89
+ self.assert_qmp(result, 'return', {})
90
+
91
+ result = self.vm.qmp('block-commit', device='drive0', top_node='null', base_node='base')
92
+ self.assert_qmp(result, 'error/class', 'GenericError')
93
+ self.assert_qmp(result, 'error/desc', "'null' is not in this backing file chain")
94
+
95
# When the job is running on a BB that is automatically deleted on hot
96
# unplug, the job is cancelled when the device disappears
97
def test_hot_unplug(self):
98
diff --git a/tests/qemu-iotests/040.out b/tests/qemu-iotests/040.out
99
index XXXXXXX..XXXXXXX 100644
100
--- a/tests/qemu-iotests/040.out
101
+++ b/tests/qemu-iotests/040.out
102
@@ -XXX,XX +XXX,XX @@
103
-.............................
104
+...........................................
105
----------------------------------------------------------------------
106
-Ran 29 tests
107
+Ran 43 tests
108
109
OK
110
--
111
2.17.1
112
113
diff view generated by jsdifflib
Deleted patch
1
From: Sergio Lopez <slp@redhat.com>
2
1
3
In qemu_laio_process_completions_and_submit, the AioContext is acquired
4
before the ioq_submit iteration and after qemu_laio_process_completions,
5
but the latter is not thread safe either.
6
7
This change avoids a number of random crashes when the Main Thread and
8
an IO Thread collide processing completions for the same AioContext.
9
This is an example of such crash:
10
11
- The IO Thread is trying to acquire the AioContext at aio_co_enter,
12
which evidences that it didn't lock it before:
13
14
Thread 3 (Thread 0x7fdfd8bd8700 (LWP 36743)):
15
#0 0x00007fdfe0dd542d in __lll_lock_wait () at ../nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S:135
16
#1 0x00007fdfe0dd0de6 in _L_lock_870 () at /lib64/libpthread.so.0
17
#2 0x00007fdfe0dd0cdf in __GI___pthread_mutex_lock (mutex=mutex@entry=0x5631fde0e6c0)
18
at ../nptl/pthread_mutex_lock.c:114
19
#3 0x00005631fc0603a7 in qemu_mutex_lock_impl (mutex=0x5631fde0e6c0, file=0x5631fc23520f "util/async.c", line=511) at util/qemu-thread-posix.c:66
20
#4 0x00005631fc05b558 in aio_co_enter (ctx=0x5631fde0e660, co=0x7fdfcc0c2b40) at util/async.c:493
21
#5 0x00005631fc05b5ac in aio_co_wake (co=<optimized out>) at util/async.c:478
22
#6 0x00005631fbfc51ad in qemu_laio_process_completion (laiocb=<optimized out>) at block/linux-aio.c:104
23
#7 0x00005631fbfc523c in qemu_laio_process_completions (s=s@entry=0x7fdfc0297670)
24
at block/linux-aio.c:222
25
#8 0x00005631fbfc5499 in qemu_laio_process_completions_and_submit (s=0x7fdfc0297670)
26
at block/linux-aio.c:237
27
#9 0x00005631fc05d978 in aio_dispatch_handlers (ctx=ctx@entry=0x5631fde0e660) at util/aio-posix.c:406
28
#10 0x00005631fc05e3ea in aio_poll (ctx=0x5631fde0e660, blocking=blocking@entry=true)
29
at util/aio-posix.c:693
30
#11 0x00005631fbd7ad96 in iothread_run (opaque=0x5631fde0e1c0) at iothread.c:64
31
#12 0x00007fdfe0dcee25 in start_thread (arg=0x7fdfd8bd8700) at pthread_create.c:308
32
#13 0x00007fdfe0afc34d in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:113
33
34
- The Main Thread is also processing completions from the same
35
AioContext, and crashes due to failed assertion at util/iov.c:78:
36
37
Thread 1 (Thread 0x7fdfeb5eac80 (LWP 36740)):
38
#0 0x00007fdfe0a391f7 in __GI_raise (sig=sig@entry=6) at ../nptl/sysdeps/unix/sysv/linux/raise.c:56
39
#1 0x00007fdfe0a3a8e8 in __GI_abort () at abort.c:90
40
#2 0x00007fdfe0a32266 in __assert_fail_base (fmt=0x7fdfe0b84e68 "%s%s%s:%u: %s%sAssertion `%s' failed.\n%n", assertion=assertion@entry=0x5631fc238ccb "offset == 0", file=file@entry=0x5631fc23698e "util/iov.c", line=line@entry=78, function=function@entry=0x5631fc236adc <__PRETTY_FUNCTION__.15220> "iov_memset")
41
at assert.c:92
42
#3 0x00007fdfe0a32312 in __GI___assert_fail (assertion=assertion@entry=0x5631fc238ccb "offset == 0", file=file@entry=0x5631fc23698e "util/iov.c", line=line@entry=78, function=function@entry=0x5631fc236adc <__PRETTY_FUNCTION__.15220> "iov_memset") at assert.c:101
43
#4 0x00005631fc065287 in iov_memset (iov=<optimized out>, iov_cnt=<optimized out>, offset=<optimized out>, offset@entry=65536, fillc=fillc@entry=0, bytes=15515191315812405248) at util/iov.c:78
44
#5 0x00005631fc065a63 in qemu_iovec_memset (qiov=<optimized out>, offset=offset@entry=65536, fillc=fillc@entry=0, bytes=<optimized out>) at util/iov.c:410
45
#6 0x00005631fbfc5178 in qemu_laio_process_completion (laiocb=0x7fdd920df630) at block/linux-aio.c:88
46
#7 0x00005631fbfc523c in qemu_laio_process_completions (s=s@entry=0x7fdfc0297670)
47
at block/linux-aio.c:222
48
#8 0x00005631fbfc5499 in qemu_laio_process_completions_and_submit (s=0x7fdfc0297670)
49
at block/linux-aio.c:237
50
#9 0x00005631fbfc54ed in qemu_laio_poll_cb (opaque=<optimized out>) at block/linux-aio.c:272
51
#10 0x00005631fc05d85e in run_poll_handlers_once (ctx=ctx@entry=0x5631fde0e660) at util/aio-posix.c:497
52
#11 0x00005631fc05e2ca in aio_poll (blocking=false, ctx=0x5631fde0e660) at util/aio-posix.c:574
53
#12 0x00005631fc05e2ca in aio_poll (ctx=0x5631fde0e660, blocking=blocking@entry=false)
54
at util/aio-posix.c:604
55
#13 0x00005631fbfcb8a3 in bdrv_do_drained_begin (ignore_parent=<optimized out>, recursive=<optimized out>, bs=<optimized out>) at block/io.c:273
56
#14 0x00005631fbfcb8a3 in bdrv_do_drained_begin (bs=0x5631fe8b6200, recursive=<optimized out>, parent=0x0, ignore_bds_parents=<optimized out>, poll=<optimized out>) at block/io.c:390
57
#15 0x00005631fbfbcd2e in blk_drain (blk=0x5631fe83ac80) at block/block-backend.c:1590
58
#16 0x00005631fbfbe138 in blk_remove_bs (blk=blk@entry=0x5631fe83ac80) at block/block-backend.c:774
59
#17 0x00005631fbfbe3d6 in blk_unref (blk=0x5631fe83ac80) at block/block-backend.c:401
60
#18 0x00005631fbfbe3d6 in blk_unref (blk=0x5631fe83ac80) at block/block-backend.c:449
61
#19 0x00005631fbfc9a69 in commit_complete (job=0x5631fe8b94b0, opaque=0x7fdfcc1bb080)
62
at block/commit.c:92
63
#20 0x00005631fbf7d662 in job_defer_to_main_loop_bh (opaque=0x7fdfcc1b4560) at job.c:973
64
#21 0x00005631fc05ad41 in aio_bh_poll (bh=0x7fdfcc01ad90) at util/async.c:90
65
#22 0x00005631fc05ad41 in aio_bh_poll (ctx=ctx@entry=0x5631fddffdb0) at util/async.c:118
66
#23 0x00005631fc05e210 in aio_dispatch (ctx=0x5631fddffdb0) at util/aio-posix.c:436
67
#24 0x00005631fc05ac1e in aio_ctx_dispatch (source=<optimized out>, callback=<optimized out>, user_data=<optimized out>) at util/async.c:261
68
#25 0x00007fdfeaae44c9 in g_main_context_dispatch (context=0x5631fde00140) at gmain.c:3201
69
#26 0x00007fdfeaae44c9 in g_main_context_dispatch (context=context@entry=0x5631fde00140) at gmain.c:3854
70
#27 0x00005631fc05d503 in main_loop_wait () at util/main-loop.c:215
71
#28 0x00005631fc05d503 in main_loop_wait (timeout=<optimized out>) at util/main-loop.c:238
72
#29 0x00005631fc05d503 in main_loop_wait (nonblocking=nonblocking@entry=0) at util/main-loop.c:497
73
#30 0x00005631fbd81412 in main_loop () at vl.c:1866
74
#31 0x00005631fbc18ff3 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>)
75
at vl.c:4647
76
77
- A closer examination shows that s->io_q.in_flight appears to have
78
gone backwards:
79
80
(gdb) frame 7
81
#7 0x00005631fbfc523c in qemu_laio_process_completions (s=s@entry=0x7fdfc0297670)
82
at block/linux-aio.c:222
83
222     qemu_laio_process_completion(laiocb);
84
(gdb) p s
85
$2 = (LinuxAioState *) 0x7fdfc0297670
86
(gdb) p *s
87
$3 = {aio_context = 0x5631fde0e660, ctx = 0x7fdfeb43b000, e = {rfd = 33, wfd = 33}, io_q = {plugged = 0,
88
in_queue = 0, in_flight = 4294967280, blocked = false, pending = {sqh_first = 0x0,
89
sqh_last = 0x7fdfc0297698}}, completion_bh = 0x7fdfc0280ef0, event_idx = 21, event_max = 241}
90
(gdb) p/x s->io_q.in_flight
91
$4 = 0xfffffff0
92
93
Signed-off-by: Sergio Lopez <slp@redhat.com>
94
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
95
---
96
block/linux-aio.c | 2 +-
97
1 file changed, 1 insertion(+), 1 deletion(-)
98
99
diff --git a/block/linux-aio.c b/block/linux-aio.c
100
index XXXXXXX..XXXXXXX 100644
101
--- a/block/linux-aio.c
102
+++ b/block/linux-aio.c
103
@@ -XXX,XX +XXX,XX @@ static void qemu_laio_process_completions(LinuxAioState *s)
104
105
static void qemu_laio_process_completions_and_submit(LinuxAioState *s)
106
{
107
+ aio_context_acquire(s->aio_context);
108
qemu_laio_process_completions(s);
109
110
- aio_context_acquire(s->aio_context);
111
if (!s->io_q.plugged && !QSIMPLEQ_EMPTY(&s->io_q.pending)) {
112
ioq_submit(s);
113
}
114
--
115
2.17.1
116
117
diff view generated by jsdifflib
Deleted patch
1
From: Alberto Garcia <berto@igalia.com>
2
1
3
We just fixed a bug that was causing a use-after-free when QEMU was
4
unable to create a temporary snapshot. This is a test case for this
5
scenario.
6
7
Signed-off-by: Alberto Garcia <berto@igalia.com>
8
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
9
---
10
tests/qemu-iotests/051 | 3 +++
11
tests/qemu-iotests/051.out | 3 +++
12
tests/qemu-iotests/051.pc.out | 3 +++
13
3 files changed, 9 insertions(+)
14
15
diff --git a/tests/qemu-iotests/051 b/tests/qemu-iotests/051
16
index XXXXXXX..XXXXXXX 100755
17
--- a/tests/qemu-iotests/051
18
+++ b/tests/qemu-iotests/051
19
@@ -XXX,XX +XXX,XX @@ printf %b "qemu-io $device_id \"write -P 0x33 0 4k\"\ncommit $device_id\n" |
20
21
$QEMU_IO -c "read -P 0x33 0 4k" "$TEST_IMG" | _filter_qemu_io
22
23
+# Using snapshot=on with a non-existent TMPDIR
24
+TMPDIR=/nonexistent run_qemu -drive driver=null-co,snapshot=on
25
+
26
# success, all done
27
echo "*** done"
28
rm -f $seq.full
29
diff --git a/tests/qemu-iotests/051.out b/tests/qemu-iotests/051.out
30
index XXXXXXX..XXXXXXX 100644
31
--- a/tests/qemu-iotests/051.out
32
+++ b/tests/qemu-iotests/051.out
33
@@ -XXX,XX +XXX,XX @@ wrote 4096/4096 bytes at offset 0
34
35
read 4096/4096 bytes at offset 0
36
4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
37
+Testing: -drive driver=null-co,snapshot=on
38
+QEMU_PROG: -drive driver=null-co,snapshot=on: Could not get temporary filename: No such file or directory
39
+
40
*** done
41
diff --git a/tests/qemu-iotests/051.pc.out b/tests/qemu-iotests/051.pc.out
42
index XXXXXXX..XXXXXXX 100644
43
--- a/tests/qemu-iotests/051.pc.out
44
+++ b/tests/qemu-iotests/051.pc.out
45
@@ -XXX,XX +XXX,XX @@ wrote 4096/4096 bytes at offset 0
46
47
read 4096/4096 bytes at offset 0
48
4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
49
+Testing: -drive driver=null-co,snapshot=on
50
+QEMU_PROG: -drive driver=null-co,snapshot=on: Could not get temporary filename: No such file or directory
51
+
52
*** done
53
--
54
2.17.1
55
56
diff view generated by jsdifflib
Deleted patch
1
From: Sergio Lopez <slp@redhat.com>
2
1
3
AIO Coroutines shouldn't by managed by an AioContext different than the
4
one assigned when they are created. aio_co_enter avoids entering a
5
coroutine from a different AioContext, calling aio_co_schedule instead.
6
7
Scheduled coroutines are then entered by co_schedule_bh_cb using
8
qemu_coroutine_enter, which just calls qemu_aio_coroutine_enter with the
9
current AioContext obtained with qemu_get_current_aio_context.
10
Eventually, co->ctx will be set to the AioContext passed as an argument
11
to qemu_aio_coroutine_enter.
12
13
This means that, if an IO Thread's AioConext is being processed by the
14
Main Thread (due to aio_poll being called with a BDS AioContext, as it
15
happens in AIO_WAIT_WHILE among other places), the AioContext from some
16
coroutines may be wrongly replaced with the one from the Main Thread.
17
18
This is the root cause behind some crashes, mainly triggered by the
19
drain code at block/io.c. The most common are these abort and failed
20
assertion:
21
22
util/async.c:aio_co_schedule
23
456 if (scheduled) {
24
457 fprintf(stderr,
25
458 "%s: Co-routine was already scheduled in '%s'\n",
26
459 __func__, scheduled);
27
460 abort();
28
461 }
29
30
util/qemu-coroutine-lock.c:
31
286 assert(mutex->holder == self);
32
33
But it's also known to cause random errors at different locations, and
34
even SIGSEGV with broken coroutine backtraces.
35
36
By using qemu_aio_coroutine_enter directly in co_schedule_bh_cb, we can
37
pass the correct AioContext as an argument, making sure co->ctx is not
38
wrongly altered.
39
40
Signed-off-by: Sergio Lopez <slp@redhat.com>
41
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
42
---
43
util/async.c | 2 +-
44
1 file changed, 1 insertion(+), 1 deletion(-)
45
46
diff --git a/util/async.c b/util/async.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/util/async.c
49
+++ b/util/async.c
50
@@ -XXX,XX +XXX,XX @@ static void co_schedule_bh_cb(void *opaque)
51
52
/* Protected by write barrier in qemu_aio_coroutine_enter */
53
atomic_set(&co->scheduled, NULL);
54
- qemu_coroutine_enter(co);
55
+ qemu_aio_coroutine_enter(ctx, co);
56
aio_context_release(ctx);
57
}
58
}
59
--
60
2.17.1
61
62
diff view generated by jsdifflib
Deleted patch
1
From: Fam Zheng <famz@redhat.com>
2
1
3
All callers have acquired ctx already. Doing that again results in
4
aio_poll() hang. This fixes the problem that a BDRV_POLL_WHILE() in the
5
callback cannot make progress because ctx is recursively locked, for
6
example, when drive-backup finishes.
7
8
There are two callers of job_finalize():
9
10
fam@lemon:~/work/qemu [master]$ git grep -w -A1 '^\s*job_finalize'
11
blockdev.c: job_finalize(&job->job, errp);
12
blockdev.c- aio_context_release(aio_context);
13
--
14
job-qmp.c: job_finalize(job, errp);
15
job-qmp.c- aio_context_release(aio_context);
16
--
17
tests/test-blockjob.c: job_finalize(&job->job, &error_abort);
18
tests/test-blockjob.c- assert(job->job.status == JOB_STATUS_CONCLUDED);
19
20
Ignoring the test, it's easy to see both callers to job_finalize (and
21
job_do_finalize) have acquired the context.
22
23
Cc: qemu-stable@nongnu.org
24
Reported-by: Gu Nini <ngu@redhat.com>
25
Reviewed-by: Eric Blake <eblake@redhat.com>
26
Signed-off-by: Fam Zheng <famz@redhat.com>
27
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
28
---
29
job.c | 18 +++++-------------
30
1 file changed, 5 insertions(+), 13 deletions(-)
31
32
diff --git a/job.c b/job.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/job.c
35
+++ b/job.c
36
@@ -XXX,XX +XXX,XX @@ static void job_txn_del_job(Job *job)
37
}
38
}
39
40
-static int job_txn_apply(JobTxn *txn, int fn(Job *), bool lock)
41
+static int job_txn_apply(JobTxn *txn, int fn(Job *))
42
{
43
- AioContext *ctx;
44
Job *job, *next;
45
int rc = 0;
46
47
QLIST_FOREACH_SAFE(job, &txn->jobs, txn_list, next) {
48
- if (lock) {
49
- ctx = job->aio_context;
50
- aio_context_acquire(ctx);
51
- }
52
rc = fn(job);
53
- if (lock) {
54
- aio_context_release(ctx);
55
- }
56
if (rc) {
57
break;
58
}
59
@@ -XXX,XX +XXX,XX @@ static void job_do_finalize(Job *job)
60
assert(job && job->txn);
61
62
/* prepare the transaction to complete */
63
- rc = job_txn_apply(job->txn, job_prepare, true);
64
+ rc = job_txn_apply(job->txn, job_prepare);
65
if (rc) {
66
job_completed_txn_abort(job);
67
} else {
68
- job_txn_apply(job->txn, job_finalize_single, true);
69
+ job_txn_apply(job->txn, job_finalize_single);
70
}
71
}
72
73
@@ -XXX,XX +XXX,XX @@ static void job_completed_txn_success(Job *job)
74
assert(other_job->ret == 0);
75
}
76
77
- job_txn_apply(txn, job_transition_to_pending, false);
78
+ job_txn_apply(txn, job_transition_to_pending);
79
80
/* If no jobs need manual finalization, automatically do so */
81
- if (job_txn_apply(txn, job_needs_finalize, false) == 0) {
82
+ if (job_txn_apply(txn, job_needs_finalize) == 0) {
83
job_do_finalize(job);
84
}
85
}
86
--
87
2.17.1
88
89
diff view generated by jsdifflib
Deleted patch
1
From: Kevin Wolf <kwolf@redhat.com>
2
1
3
job_completed() had a problem with double locking that was recently
4
fixed independently by two different commits:
5
6
"job: Fix nested aio_poll() hanging in job_txn_apply"
7
"jobs: add exit shim"
8
9
One fix removed the first aio_context_acquire(), the other fix removed
10
the other one. Now we have a bug again and the code is run without any
11
locking.
12
13
Add it back in one of the places.
14
15
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
16
Reviewed-by: Max Reitz <mreitz@redhat.com>
17
Reviewed-by: John Snow <jsnow@redhat.com>
18
---
19
job.c | 4 ++++
20
1 file changed, 4 insertions(+)
21
22
diff --git a/job.c b/job.c
23
index XXXXXXX..XXXXXXX 100644
24
--- a/job.c
25
+++ b/job.c
26
@@ -XXX,XX +XXX,XX @@ static void job_completed(Job *job)
27
static void job_exit(void *opaque)
28
{
29
Job *job = (Job *)opaque;
30
+ AioContext *ctx = job->aio_context;
31
+
32
+ aio_context_acquire(ctx);
33
job_completed(job);
34
+ aio_context_release(ctx);
35
}
36
37
/**
38
--
39
2.17.1
40
41
diff view generated by jsdifflib
Deleted patch
1
From: Kevin Wolf <kwolf@redhat.com>
2
1
3
In the context of draining a BDS, the .drained_poll callback of block
4
jobs is called. If this returns true (i.e. there is still some activity
5
pending), the drain operation may call aio_poll() with blocking=true to
6
wait for completion.
7
8
As soon as the pending activity is completed and the job finally arrives
9
in a quiescent state (i.e. its coroutine either yields with busy=false
10
or terminates), the block job must notify the aio_poll() loop to wake
11
up, otherwise we get a deadlock if both are running in different
12
threads.
13
14
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
15
Reviewed-by: Fam Zheng <famz@redhat.com>
16
Reviewed-by: Max Reitz <mreitz@redhat.com>
17
---
18
include/block/blockjob.h | 13 +++++++++++++
19
include/qemu/job.h | 3 +++
20
blockjob.c | 18 ++++++++++++++++++
21
job.c | 7 +++++++
22
4 files changed, 41 insertions(+)
23
24
diff --git a/include/block/blockjob.h b/include/block/blockjob.h
25
index XXXXXXX..XXXXXXX 100644
26
--- a/include/block/blockjob.h
27
+++ b/include/block/blockjob.h
28
@@ -XXX,XX +XXX,XX @@ typedef struct BlockJob {
29
/** Called when the job transitions to READY */
30
Notifier ready_notifier;
31
32
+ /** Called when the job coroutine yields or terminates */
33
+ Notifier idle_notifier;
34
+
35
/** BlockDriverStates that are involved in this block job */
36
GSList *nodes;
37
} BlockJob;
38
@@ -XXX,XX +XXX,XX @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
39
*/
40
void block_job_remove_all_bdrv(BlockJob *job);
41
42
+/**
43
+ * block_job_wakeup_all_bdrv:
44
+ * @job: The block job
45
+ *
46
+ * Calls bdrv_wakeup() for all BlockDriverStates that have been added to the
47
+ * job. This function is to be called whenever child_job_drained_poll() would
48
+ * go from true to false to notify waiting drain requests.
49
+ */
50
+void block_job_wakeup_all_bdrv(BlockJob *job);
51
+
52
/**
53
* block_job_set_speed:
54
* @job: The job to set the speed for.
55
diff --git a/include/qemu/job.h b/include/qemu/job.h
56
index XXXXXXX..XXXXXXX 100644
57
--- a/include/qemu/job.h
58
+++ b/include/qemu/job.h
59
@@ -XXX,XX +XXX,XX @@ typedef struct Job {
60
/** Notifiers called when the job transitions to READY */
61
NotifierList on_ready;
62
63
+ /** Notifiers called when the job coroutine yields or terminates */
64
+ NotifierList on_idle;
65
+
66
/** Element of the list of jobs */
67
QLIST_ENTRY(Job) job_list;
68
69
diff --git a/blockjob.c b/blockjob.c
70
index XXXXXXX..XXXXXXX 100644
71
--- a/blockjob.c
72
+++ b/blockjob.c
73
@@ -XXX,XX +XXX,XX @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
74
return 0;
75
}
76
77
+void block_job_wakeup_all_bdrv(BlockJob *job)
78
+{
79
+ GSList *l;
80
+
81
+ for (l = job->nodes; l; l = l->next) {
82
+ BdrvChild *c = l->data;
83
+ bdrv_wakeup(c->bs);
84
+ }
85
+}
86
+
87
+static void block_job_on_idle(Notifier *n, void *opaque)
88
+{
89
+ BlockJob *job = opaque;
90
+ block_job_wakeup_all_bdrv(job);
91
+}
92
+
93
bool block_job_is_internal(BlockJob *job)
94
{
95
return (job->job.id == NULL);
96
@@ -XXX,XX +XXX,XX @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
97
job->finalize_completed_notifier.notify = block_job_event_completed;
98
job->pending_notifier.notify = block_job_event_pending;
99
job->ready_notifier.notify = block_job_event_ready;
100
+ job->idle_notifier.notify = block_job_on_idle;
101
102
notifier_list_add(&job->job.on_finalize_cancelled,
103
&job->finalize_cancelled_notifier);
104
@@ -XXX,XX +XXX,XX @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
105
&job->finalize_completed_notifier);
106
notifier_list_add(&job->job.on_pending, &job->pending_notifier);
107
notifier_list_add(&job->job.on_ready, &job->ready_notifier);
108
+ notifier_list_add(&job->job.on_idle, &job->idle_notifier);
109
110
error_setg(&job->blocker, "block device is in use by block job: %s",
111
job_type_str(&job->job));
112
diff --git a/job.c b/job.c
113
index XXXXXXX..XXXXXXX 100644
114
--- a/job.c
115
+++ b/job.c
116
@@ -XXX,XX +XXX,XX @@ static void job_event_ready(Job *job)
117
notifier_list_notify(&job->on_ready, job);
118
}
119
120
+static void job_event_idle(Job *job)
121
+{
122
+ notifier_list_notify(&job->on_idle, job);
123
+}
124
+
125
void job_enter_cond(Job *job, bool(*fn)(Job *job))
126
{
127
if (!job_started(job)) {
128
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn job_do_yield(Job *job, uint64_t ns)
129
timer_mod(&job->sleep_timer, ns);
130
}
131
job->busy = false;
132
+ job_event_idle(job);
133
job_unlock();
134
qemu_coroutine_yield();
135
136
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn job_co_entry(void *opaque)
137
assert(job && job->driver && job->driver->run);
138
job_pause_point(job);
139
job->ret = job->driver->run(job, &job->err);
140
+ job_event_idle(job);
141
job->deferred_to_main_loop = true;
142
aio_bh_schedule_oneshot(qemu_get_aio_context(), job_exit, job);
143
}
144
--
145
2.17.1
146
147
diff view generated by jsdifflib
Deleted patch
1
From: Kevin Wolf <kwolf@redhat.com>
2
1
3
Even if AIO_WAIT_WHILE() is called in the home context of the
4
AioContext, we still want to allow the condition to change depending on
5
other threads as long as they kick the AioWait. Specfically block jobs
6
can be running in an I/O thread and should then be able to kick a drain
7
in the main loop context.
8
9
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
10
Reviewed-by: Fam Zheng <famz@redhat.com>
11
---
12
include/block/aio-wait.h | 6 +++---
13
1 file changed, 3 insertions(+), 3 deletions(-)
14
15
diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/include/block/aio-wait.h
18
+++ b/include/block/aio-wait.h
19
@@ -XXX,XX +XXX,XX @@ typedef struct {
20
bool waited_ = false; \
21
AioWait *wait_ = (wait); \
22
AioContext *ctx_ = (ctx); \
23
+ /* Increment wait_->num_waiters before evaluating cond. */ \
24
+ atomic_inc(&wait_->num_waiters); \
25
if (ctx_ && in_aio_context_home_thread(ctx_)) { \
26
while ((cond)) { \
27
aio_poll(ctx_, true); \
28
@@ -XXX,XX +XXX,XX @@ typedef struct {
29
} else { \
30
assert(qemu_get_current_aio_context() == \
31
qemu_get_aio_context()); \
32
- /* Increment wait_->num_waiters before evaluating cond. */ \
33
- atomic_inc(&wait_->num_waiters); \
34
while ((cond)) { \
35
if (ctx_) { \
36
aio_context_release(ctx_); \
37
@@ -XXX,XX +XXX,XX @@ typedef struct {
38
} \
39
waited_ = true; \
40
} \
41
- atomic_dec(&wait_->num_waiters); \
42
} \
43
+ atomic_dec(&wait_->num_waiters); \
44
waited_; })
45
46
/**
47
--
48
2.17.1
49
50
diff view generated by jsdifflib
1
From: Kevin Wolf <kwolf@redhat.com>
1
The VM.add_object() method can be used to add IOThreads or memory
2
backend objects.
2
3
3
Request callbacks can do pretty much anything, including operations that
4
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
4
will yield from the coroutine (such as draining the backend). In that
5
Reviewed-by: Eric Blake <eblake@redhat.com>
5
case, a decreased in_flight would be visible to other code and could
6
Message-id: 20171207201320.19284-5-stefanha@redhat.com
6
lead to a drain completing while the callback hasn't actually completed
7
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
7
yet.
8
---
9
tests/qemu-iotests/iotests.py | 5 +++++
10
1 file changed, 5 insertions(+)
8
11
9
Note that reordering these operations forbids calling drain directly
12
diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py
10
inside an AIO callback. As Paolo explains, indirectly calling it is
11
okay:
12
13
- Calling it through a coroutine is okay, because then
14
bdrv_drained_begin() goes through bdrv_co_yield_to_drain() and you
15
have in_flight=2 when bdrv_co_yield_to_drain() yields, then soon
16
in_flight=1 when the aio_co_wake() in the AIO callback completes, then
17
in_flight=0 after the bottom half starts.
18
19
- Calling it through a bottom half would be okay too, as long as the AIO
20
callback remembers to do inc_in_flight/dec_in_flight just like
21
bdrv_co_yield_to_drain() and bdrv_co_drain_bh_cb() do
22
23
A few more important cases that come to mind:
24
25
- A coroutine that yields because of I/O is okay, with a sequence
26
similar to bdrv_co_yield_to_drain().
27
28
- A coroutine that yields with no I/O pending will correctly decrease
29
in_flight to zero before yielding.
30
31
- Calling more AIO from the callback won't overflow the counter just
32
because of mutual recursion, because AIO functions always yield at
33
least once before invoking the callback.
34
35
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
36
Reviewed-by: Fam Zheng <famz@redhat.com>
37
Reviewed-by: Max Reitz <mreitz@redhat.com>
38
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
39
---
40
block/block-backend.c | 2 +-
41
1 file changed, 1 insertion(+), 1 deletion(-)
42
43
diff --git a/block/block-backend.c b/block/block-backend.c
44
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
45
--- a/block/block-backend.c
14
--- a/tests/qemu-iotests/iotests.py
46
+++ b/block/block-backend.c
15
+++ b/tests/qemu-iotests/iotests.py
47
@@ -XXX,XX +XXX,XX @@ static const AIOCBInfo blk_aio_em_aiocb_info = {
16
@@ -XXX,XX +XXX,XX @@ class VM(qtest.QEMUQtestMachine):
48
static void blk_aio_complete(BlkAioEmAIOCB *acb)
17
socket_scm_helper=socket_scm_helper)
49
{
18
self._num_drives = 0
50
if (acb->has_returned) {
19
51
- blk_dec_in_flight(acb->rwco.blk);
20
+ def add_object(self, opts):
52
acb->common.cb(acb->common.opaque, acb->rwco.ret);
21
+ self._args.append('-object')
53
+ blk_dec_in_flight(acb->rwco.blk);
22
+ self._args.append(opts)
54
qemu_aio_unref(acb);
23
+ return self
55
}
24
+
56
}
25
def add_device(self, opts):
26
self._args.append('-device')
27
self._args.append(opts)
57
--
28
--
58
2.17.1
29
2.14.3
59
30
60
31
diff view generated by jsdifflib
1
From: Kevin Wolf <kwolf@redhat.com>
1
There is a small chance that iothread_stop() hangs as follows:
2
2
3
This extends the existing drain test with a block job to include
3
Thread 3 (Thread 0x7f63eba5f700 (LWP 16105)):
4
variants where the block job runs in a different AioContext.
4
#0 0x00007f64012c09b6 in ppoll () at /lib64/libc.so.6
5
#1 0x000055959992eac9 in ppoll (__ss=0x0, __timeout=0x0, __nfds=<optimized out>, __fds=<optimized out>) at /usr/include/bits/poll2.h:77
6
#2 0x000055959992eac9 in qemu_poll_ns (fds=<optimized out>, nfds=<optimized out>, timeout=<optimized out>) at util/qemu-timer.c:322
7
#3 0x0000559599930711 in aio_poll (ctx=0x55959bdb83c0, blocking=blocking@entry=true) at util/aio-posix.c:629
8
#4 0x00005595996806fe in iothread_run (opaque=0x55959bd78400) at iothread.c:59
9
#5 0x00007f640159f609 in start_thread () at /lib64/libpthread.so.0
10
#6 0x00007f64012cce6f in clone () at /lib64/libc.so.6
5
11
6
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
12
Thread 1 (Thread 0x7f640b45b280 (LWP 16103)):
7
Reviewed-by: Fam Zheng <famz@redhat.com>
13
#0 0x00007f64015a0b6d in pthread_join () at /lib64/libpthread.so.0
14
#1 0x00005595999332ef in qemu_thread_join (thread=<optimized out>) at util/qemu-thread-posix.c:547
15
#2 0x00005595996808ae in iothread_stop (iothread=<optimized out>) at iothread.c:91
16
#3 0x000055959968094d in iothread_stop_iter (object=<optimized out>, opaque=<optimized out>) at iothread.c:102
17
#4 0x0000559599857d97 in do_object_child_foreach (obj=obj@entry=0x55959bdb8100, fn=fn@entry=0x559599680930 <iothread_stop_iter>, opaque=opaque@entry=0x0, recurse=recurse@entry=false) at qom/object.c:852
18
#5 0x0000559599859477 in object_child_foreach (obj=obj@entry=0x55959bdb8100, fn=fn@entry=0x559599680930 <iothread_stop_iter>, opaque=opaque@entry=0x0) at qom/object.c:867
19
#6 0x0000559599680a6e in iothread_stop_all () at iothread.c:341
20
#7 0x000055959955b1d5 in main (argc=<optimized out>, argv=<optimized out>, envp=<optimized out>) at vl.c:4913
21
22
The relevant code from iothread_run() is:
23
24
while (!atomic_read(&iothread->stopping)) {
25
aio_poll(iothread->ctx, true);
26
27
and iothread_stop():
28
29
iothread->stopping = true;
30
aio_notify(iothread->ctx);
31
...
32
qemu_thread_join(&iothread->thread);
33
34
The following scenario can occur:
35
36
1. IOThread:
37
while (!atomic_read(&iothread->stopping)) -> stopping=false
38
39
2. Main loop:
40
iothread->stopping = true;
41
aio_notify(iothread->ctx);
42
43
3. IOThread:
44
aio_poll(iothread->ctx, true); -> hang
45
46
The bug is explained by the AioContext->notify_me doc comments:
47
48
"If this field is 0, everything (file descriptors, bottom halves,
49
timers) will be re-evaluated before the next blocking poll(), thus the
50
event_notifier_set call can be skipped."
51
52
The problem is that "everything" does not include checking
53
iothread->stopping. This means iothread_run() will block in aio_poll()
54
if aio_notify() was called just before aio_poll().
55
56
This patch fixes the hang by replacing aio_notify() with
57
aio_bh_schedule_oneshot(). This makes aio_poll() or g_main_loop_run()
58
to return.
59
60
Implementing this properly required a new bool running flag. The new
61
flag prevents races that are tricky if we try to use iothread->stopping.
62
Now iothread->stopping is purely for iothread_stop() and
63
iothread->running is purely for the iothread_run() thread.
64
65
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
66
Reviewed-by: Eric Blake <eblake@redhat.com>
67
Message-id: 20171207201320.19284-6-stefanha@redhat.com
68
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
8
---
69
---
9
tests/test-bdrv-drain.c | 92 ++++++++++++++++++++++++++++++++++++++---
70
include/sysemu/iothread.h | 3 ++-
10
1 file changed, 86 insertions(+), 6 deletions(-)
71
iothread.c | 20 +++++++++++++++-----
72
2 files changed, 17 insertions(+), 6 deletions(-)
11
73
12
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
74
diff --git a/include/sysemu/iothread.h b/include/sysemu/iothread.h
13
index XXXXXXX..XXXXXXX 100644
75
index XXXXXXX..XXXXXXX 100644
14
--- a/tests/test-bdrv-drain.c
76
--- a/include/sysemu/iothread.h
15
+++ b/tests/test-bdrv-drain.c
77
+++ b/include/sysemu/iothread.h
16
@@ -XXX,XX +XXX,XX @@ static void do_drain_end(enum drain_type drain_type, BlockDriverState *bs)
78
@@ -XXX,XX +XXX,XX @@ typedef struct {
17
}
79
GOnce once;
80
QemuMutex init_done_lock;
81
QemuCond init_done_cond; /* is thread initialization done? */
82
- bool stopping;
83
+ bool stopping; /* has iothread_stop() been called? */
84
+ bool running; /* should iothread_run() continue? */
85
int thread_id;
86
87
/* AioContext poll parameters */
88
diff --git a/iothread.c b/iothread.c
89
index XXXXXXX..XXXXXXX 100644
90
--- a/iothread.c
91
+++ b/iothread.c
92
@@ -XXX,XX +XXX,XX @@ static void *iothread_run(void *opaque)
93
qemu_cond_signal(&iothread->init_done_cond);
94
qemu_mutex_unlock(&iothread->init_done_lock);
95
96
- while (!atomic_read(&iothread->stopping)) {
97
+ while (iothread->running) {
98
aio_poll(iothread->ctx, true);
99
100
if (atomic_read(&iothread->worker_context)) {
101
@@ -XXX,XX +XXX,XX @@ static void *iothread_run(void *opaque)
102
return NULL;
18
}
103
}
19
104
20
+static void do_drain_begin_unlocked(enum drain_type drain_type, BlockDriverState *bs)
105
+/* Runs in iothread_run() thread */
106
+static void iothread_stop_bh(void *opaque)
21
+{
107
+{
22
+ if (drain_type != BDRV_DRAIN_ALL) {
108
+ IOThread *iothread = opaque;
23
+ aio_context_acquire(bdrv_get_aio_context(bs));
109
+
24
+ }
110
+ iothread->running = false; /* stop iothread_run() */
25
+ do_drain_begin(drain_type, bs);
111
+
26
+ if (drain_type != BDRV_DRAIN_ALL) {
112
+ if (iothread->main_loop) {
27
+ aio_context_release(bdrv_get_aio_context(bs));
113
+ g_main_loop_quit(iothread->main_loop);
28
+ }
114
+ }
29
+}
115
+}
30
+
116
+
31
+static void do_drain_end_unlocked(enum drain_type drain_type, BlockDriverState *bs)
117
void iothread_stop(IOThread *iothread)
32
+{
33
+ if (drain_type != BDRV_DRAIN_ALL) {
34
+ aio_context_acquire(bdrv_get_aio_context(bs));
35
+ }
36
+ do_drain_end(drain_type, bs);
37
+ if (drain_type != BDRV_DRAIN_ALL) {
38
+ aio_context_release(bdrv_get_aio_context(bs));
39
+ }
40
+}
41
+
42
static void test_drv_cb_common(enum drain_type drain_type, bool recursive)
43
{
118
{
44
BlockBackend *blk;
119
if (!iothread->ctx || iothread->stopping) {
45
@@ -XXX,XX +XXX,XX @@ BlockJobDriver test_job_driver = {
120
return;
46
},
121
}
47
};
122
iothread->stopping = true;
48
123
- aio_notify(iothread->ctx);
49
-static void test_blockjob_common(enum drain_type drain_type)
124
- if (atomic_read(&iothread->main_loop)) {
50
+static void test_blockjob_common(enum drain_type drain_type, bool use_iothread)
125
- g_main_loop_quit(iothread->main_loop);
51
{
126
- }
52
BlockBackend *blk_src, *blk_target;
127
+ aio_bh_schedule_oneshot(iothread->ctx, iothread_stop_bh, iothread);
53
BlockDriverState *src, *target;
128
qemu_thread_join(&iothread->thread);
54
BlockJob *job;
55
+ IOThread *iothread = NULL;
56
+ AioContext *ctx;
57
int ret;
58
59
src = bdrv_new_open_driver(&bdrv_test, "source", BDRV_O_RDWR,
60
@@ -XXX,XX +XXX,XX @@ static void test_blockjob_common(enum drain_type drain_type)
61
blk_src = blk_new(BLK_PERM_ALL, BLK_PERM_ALL);
62
blk_insert_bs(blk_src, src, &error_abort);
63
64
+ if (use_iothread) {
65
+ iothread = iothread_new();
66
+ ctx = iothread_get_aio_context(iothread);
67
+ blk_set_aio_context(blk_src, ctx);
68
+ } else {
69
+ ctx = qemu_get_aio_context();
70
+ }
71
+
72
target = bdrv_new_open_driver(&bdrv_test, "target", BDRV_O_RDWR,
73
&error_abort);
74
blk_target = blk_new(BLK_PERM_ALL, BLK_PERM_ALL);
75
blk_insert_bs(blk_target, target, &error_abort);
76
77
+ aio_context_acquire(ctx);
78
job = block_job_create("job0", &test_job_driver, NULL, src, 0, BLK_PERM_ALL,
79
0, 0, NULL, NULL, &error_abort);
80
block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort);
81
job_start(&job->job);
82
+ aio_context_release(ctx);
83
84
g_assert_cmpint(job->job.pause_count, ==, 0);
85
g_assert_false(job->job.paused);
86
g_assert_true(job->job.busy); /* We're in job_sleep_ns() */
87
88
- do_drain_begin(drain_type, src);
89
+ do_drain_begin_unlocked(drain_type, src);
90
91
if (drain_type == BDRV_DRAIN_ALL) {
92
/* bdrv_drain_all() drains both src and target */
93
@@ -XXX,XX +XXX,XX @@ static void test_blockjob_common(enum drain_type drain_type)
94
g_assert_true(job->job.paused);
95
g_assert_false(job->job.busy); /* The job is paused */
96
97
- do_drain_end(drain_type, src);
98
+ do_drain_end_unlocked(drain_type, src);
99
+
100
+ if (use_iothread) {
101
+ /* paused is reset in the I/O thread, wait for it */
102
+ while (job->job.paused) {
103
+ aio_poll(qemu_get_aio_context(), false);
104
+ }
105
+ }
106
107
g_assert_cmpint(job->job.pause_count, ==, 0);
108
g_assert_false(job->job.paused);
109
@@ -XXX,XX +XXX,XX @@ static void test_blockjob_common(enum drain_type drain_type)
110
111
do_drain_end(drain_type, target);
112
113
+ if (use_iothread) {
114
+ /* paused is reset in the I/O thread, wait for it */
115
+ while (job->job.paused) {
116
+ aio_poll(qemu_get_aio_context(), false);
117
+ }
118
+ }
119
+
120
g_assert_cmpint(job->job.pause_count, ==, 0);
121
g_assert_false(job->job.paused);
122
g_assert_true(job->job.busy); /* We're in job_sleep_ns() */
123
124
+ aio_context_acquire(ctx);
125
ret = job_complete_sync(&job->job, &error_abort);
126
g_assert_cmpint(ret, ==, 0);
127
128
+ if (use_iothread) {
129
+ blk_set_aio_context(blk_src, qemu_get_aio_context());
130
+ }
131
+ aio_context_release(ctx);
132
+
133
blk_unref(blk_src);
134
blk_unref(blk_target);
135
bdrv_unref(src);
136
bdrv_unref(target);
137
+
138
+ if (iothread) {
139
+ iothread_join(iothread);
140
+ }
141
}
129
}
142
130
143
static void test_blockjob_drain_all(void)
131
@@ -XXX,XX +XXX,XX @@ static void iothread_complete(UserCreatable *obj, Error **errp)
144
{
132
char *name, *thread_name;
145
- test_blockjob_common(BDRV_DRAIN_ALL);
133
146
+ test_blockjob_common(BDRV_DRAIN_ALL, false);
134
iothread->stopping = false;
147
}
135
+ iothread->running = true;
148
136
iothread->thread_id = -1;
149
static void test_blockjob_drain(void)
137
iothread->ctx = aio_context_new(&local_error);
150
{
138
if (!iothread->ctx) {
151
- test_blockjob_common(BDRV_DRAIN);
152
+ test_blockjob_common(BDRV_DRAIN, false);
153
}
154
155
static void test_blockjob_drain_subtree(void)
156
{
157
- test_blockjob_common(BDRV_SUBTREE_DRAIN);
158
+ test_blockjob_common(BDRV_SUBTREE_DRAIN, false);
159
+}
160
+
161
+static void test_blockjob_iothread_drain_all(void)
162
+{
163
+ test_blockjob_common(BDRV_DRAIN_ALL, true);
164
+}
165
+
166
+static void test_blockjob_iothread_drain(void)
167
+{
168
+ test_blockjob_common(BDRV_DRAIN, true);
169
+}
170
+
171
+static void test_blockjob_iothread_drain_subtree(void)
172
+{
173
+ test_blockjob_common(BDRV_SUBTREE_DRAIN, true);
174
}
175
176
177
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
178
g_test_add_func("/bdrv-drain/blockjob/drain_subtree",
179
test_blockjob_drain_subtree);
180
181
+ g_test_add_func("/bdrv-drain/blockjob/iothread/drain_all",
182
+ test_blockjob_iothread_drain_all);
183
+ g_test_add_func("/bdrv-drain/blockjob/iothread/drain",
184
+ test_blockjob_iothread_drain);
185
+ g_test_add_func("/bdrv-drain/blockjob/iothread/drain_subtree",
186
+ test_blockjob_iothread_drain_subtree);
187
+
188
g_test_add_func("/bdrv-drain/deletion/drain", test_delete_by_drain);
189
g_test_add_func("/bdrv-drain/detach/drain_all", test_detach_by_drain_all);
190
g_test_add_func("/bdrv-drain/detach/drain", test_detach_by_drain);
191
--
139
--
192
2.17.1
140
2.14.3
193
141
194
142
diff view generated by jsdifflib
Deleted patch
1
From: Kevin Wolf <kwolf@redhat.com>
2
1
3
All callers in QEMU proper hold the AioContext lock when calling
4
job_finish_sync(). test-blockjob should do the same when it calls the
5
function indirectly through job_cancel_sync().
6
7
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
8
Reviewed-by: Fam Zheng <famz@redhat.com>
9
---
10
include/qemu/job.h | 6 ++++++
11
tests/test-blockjob.c | 6 ++++++
12
2 files changed, 12 insertions(+)
13
14
diff --git a/include/qemu/job.h b/include/qemu/job.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/include/qemu/job.h
17
+++ b/include/qemu/job.h
18
@@ -XXX,XX +XXX,XX @@ void job_user_cancel(Job *job, bool force, Error **errp);
19
*
20
* Returns the return value from the job if the job actually completed
21
* during the call, or -ECANCELED if it was canceled.
22
+ *
23
+ * Callers must hold the AioContext lock of job->aio_context.
24
*/
25
int job_cancel_sync(Job *job);
26
27
@@ -XXX,XX +XXX,XX @@ void job_cancel_sync_all(void);
28
* function).
29
*
30
* Returns the return value from the job.
31
+ *
32
+ * Callers must hold the AioContext lock of job->aio_context.
33
*/
34
int job_complete_sync(Job *job, Error **errp);
35
36
@@ -XXX,XX +XXX,XX @@ void job_dismiss(Job **job, Error **errp);
37
*
38
* Returns 0 if the job is successfully completed, -ECANCELED if the job was
39
* cancelled before completing, and -errno in other error cases.
40
+ *
41
+ * Callers must hold the AioContext lock of job->aio_context.
42
*/
43
int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp);
44
45
diff --git a/tests/test-blockjob.c b/tests/test-blockjob.c
46
index XXXXXXX..XXXXXXX 100644
47
--- a/tests/test-blockjob.c
48
+++ b/tests/test-blockjob.c
49
@@ -XXX,XX +XXX,XX @@ static void cancel_common(CancelJob *s)
50
BlockJob *job = &s->common;
51
BlockBackend *blk = s->blk;
52
JobStatus sts = job->job.status;
53
+ AioContext *ctx;
54
+
55
+ ctx = job->job.aio_context;
56
+ aio_context_acquire(ctx);
57
58
job_cancel_sync(&job->job);
59
if (sts != JOB_STATUS_CREATED && sts != JOB_STATUS_CONCLUDED) {
60
@@ -XXX,XX +XXX,XX @@ static void cancel_common(CancelJob *s)
61
assert(job->job.status == JOB_STATUS_NULL);
62
job_unref(&job->job);
63
destroy_blk(blk);
64
+
65
+ aio_context_release(ctx);
66
}
67
68
static void test_cancel_created(void)
69
--
70
2.17.1
71
72
diff view generated by jsdifflib
Deleted patch
1
From: Kevin Wolf <kwolf@redhat.com>
2
1
3
job_finish_sync() needs to release the AioContext lock of the job before
4
calling aio_poll(). Otherwise, callbacks called by aio_poll() would
5
possibly take the lock a second time and run into a deadlock with a
6
nested AIO_WAIT_WHILE() call.
7
8
Also, job_drain() without aio_poll() isn't necessarily enough to make
9
progress on a job, it could depend on bottom halves to be executed.
10
11
Combine both open-coded while loops into a single AIO_WAIT_WHILE() call
12
that solves both of these problems.
13
14
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
15
Reviewed-by: Fam Zheng <famz@redhat.com>
16
Reviewed-by: Max Reitz <mreitz@redhat.com>
17
---
18
job.c | 14 ++++++--------
19
1 file changed, 6 insertions(+), 8 deletions(-)
20
21
diff --git a/job.c b/job.c
22
index XXXXXXX..XXXXXXX 100644
23
--- a/job.c
24
+++ b/job.c
25
@@ -XXX,XX +XXX,XX @@
26
#include "qemu/job.h"
27
#include "qemu/id.h"
28
#include "qemu/main-loop.h"
29
+#include "block/aio-wait.h"
30
#include "trace-root.h"
31
#include "qapi/qapi-events-job.h"
32
33
@@ -XXX,XX +XXX,XX @@ void job_complete(Job *job, Error **errp)
34
int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp)
35
{
36
Error *local_err = NULL;
37
+ AioWait dummy_wait = {};
38
int ret;
39
40
job_ref(job);
41
@@ -XXX,XX +XXX,XX @@ int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp)
42
job_unref(job);
43
return -EBUSY;
44
}
45
- /* job_drain calls job_enter, and it should be enough to induce progress
46
- * until the job completes or moves to the main thread. */
47
- while (!job->deferred_to_main_loop && !job_is_completed(job)) {
48
- job_drain(job);
49
- }
50
- while (!job_is_completed(job)) {
51
- aio_poll(qemu_get_aio_context(), true);
52
- }
53
+
54
+ AIO_WAIT_WHILE(&dummy_wait, job->aio_context,
55
+ (job_drain(job), !job_is_completed(job)));
56
+
57
ret = (job_is_cancelled(job) && job->ret == 0) ? -ECANCELED : job->ret;
58
job_unref(job);
59
return ret;
60
--
61
2.17.1
62
63
diff view generated by jsdifflib
Deleted patch
1
From: Kevin Wolf <kwolf@redhat.com>
2
1
3
A bdrv_drain operation must ensure that all parents are quiesced, this
4
includes BlockBackends. Otherwise, callbacks called by requests that are
5
completed on the BDS layer, but not quite yet on the BlockBackend layer
6
could still create new requests.
7
8
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
9
Reviewed-by: Fam Zheng <famz@redhat.com>
10
Reviewed-by: Max Reitz <mreitz@redhat.com>
11
---
12
block/block-backend.c | 9 +++++++++
13
1 file changed, 9 insertions(+)
14
15
diff --git a/block/block-backend.c b/block/block-backend.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/block/block-backend.c
18
+++ b/block/block-backend.c
19
@@ -XXX,XX +XXX,XX @@ static void blk_root_inherit_options(int *child_flags, QDict *child_options,
20
abort();
21
}
22
static void blk_root_drained_begin(BdrvChild *child);
23
+static bool blk_root_drained_poll(BdrvChild *child);
24
static void blk_root_drained_end(BdrvChild *child);
25
26
static void blk_root_change_media(BdrvChild *child, bool load);
27
@@ -XXX,XX +XXX,XX @@ static const BdrvChildRole child_root = {
28
.get_parent_desc = blk_root_get_parent_desc,
29
30
.drained_begin = blk_root_drained_begin,
31
+ .drained_poll = blk_root_drained_poll,
32
.drained_end = blk_root_drained_end,
33
34
.activate = blk_root_activate,
35
@@ -XXX,XX +XXX,XX @@ static void blk_root_drained_begin(BdrvChild *child)
36
}
37
}
38
39
+static bool blk_root_drained_poll(BdrvChild *child)
40
+{
41
+ BlockBackend *blk = child->opaque;
42
+ assert(blk->quiesce_counter);
43
+ return !!blk->in_flight;
44
+}
45
+
46
static void blk_root_drained_end(BdrvChild *child)
47
{
48
BlockBackend *blk = child->opaque;
49
--
50
2.17.1
51
52
diff view generated by jsdifflib
1
From: Kevin Wolf <kwolf@redhat.com>
1
This test case will prevent future regressions with savevm and
2
IOThreads.
2
3
3
blk_unref() first decreases the refcount of the BlockBackend and calls
4
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
4
blk_delete() if the refcount reaches zero. Requests can still be in
5
Reviewed-by: Eric Blake <eblake@redhat.com>
5
flight at this point, they are only drained during blk_delete():
6
Message-id: 20171207201320.19284-7-stefanha@redhat.com
7
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
8
---
9
tests/qemu-iotests/203 | 59 ++++++++++++++++++++++++++++++++++++++++++++++
10
tests/qemu-iotests/203.out | 6 +++++
11
tests/qemu-iotests/group | 1 +
12
3 files changed, 66 insertions(+)
13
create mode 100755 tests/qemu-iotests/203
14
create mode 100644 tests/qemu-iotests/203.out
6
15
7
At this point, arbitrary callbacks can run. If any callback takes a
16
diff --git a/tests/qemu-iotests/203 b/tests/qemu-iotests/203
8
temporary BlockBackend reference, it will first increase the refcount to
17
new file mode 100755
9
1 and then decrease it to 0 again, triggering another blk_delete(). This
18
index XXXXXXX..XXXXXXX
10
will cause a use-after-free crash in the outer blk_delete().
19
--- /dev/null
11
20
+++ b/tests/qemu-iotests/203
12
Fix it by draining the BlockBackend before decreasing to refcount to 0.
21
@@ -XXX,XX +XXX,XX @@
13
Assert in blk_ref() that it never takes the first refcount (which would
22
+#!/usr/bin/env python
14
mean that the BlockBackend is already being deleted).
23
+#
15
24
+# Copyright (C) 2017 Red Hat, Inc.
16
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
25
+#
17
Reviewed-by: Fam Zheng <famz@redhat.com>
26
+# This program is free software; you can redistribute it and/or modify
18
Reviewed-by: Max Reitz <mreitz@redhat.com>
27
+# it under the terms of the GNU General Public License as published by
19
---
28
+# the Free Software Foundation; either version 2 of the License, or
20
block/block-backend.c | 9 ++++++++-
29
+# (at your option) any later version.
21
1 file changed, 8 insertions(+), 1 deletion(-)
30
+#
22
31
+# This program is distributed in the hope that it will be useful,
23
diff --git a/block/block-backend.c b/block/block-backend.c
32
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
33
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
34
+# GNU General Public License for more details.
35
+#
36
+# You should have received a copy of the GNU General Public License
37
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
38
+#
39
+# Creator/Owner: Stefan Hajnoczi <stefanha@redhat.com>
40
+#
41
+# Check that QMP 'migrate' with multiple drives on a single IOThread completes
42
+# successfully. This particular command triggered a hang in the source QEMU
43
+# process due to recursive AioContext locking in bdrv_invalidate_all() and
44
+# BDRV_POLL_WHILE().
45
+
46
+import iotests
47
+
48
+iotests.verify_image_format(supported_fmts=['qcow2'])
49
+iotests.verify_platform(['linux'])
50
+
51
+with iotests.FilePath('disk0.img') as disk0_img_path, \
52
+ iotests.FilePath('disk1.img') as disk1_img_path, \
53
+ iotests.VM() as vm:
54
+
55
+ img_size = '10M'
56
+ iotests.qemu_img_pipe('create', '-f', iotests.imgfmt, disk0_img_path, img_size)
57
+ iotests.qemu_img_pipe('create', '-f', iotests.imgfmt, disk1_img_path, img_size)
58
+
59
+ iotests.log('Launching VM...')
60
+ (vm.add_object('iothread,id=iothread0')
61
+ .add_drive(disk0_img_path, 'node-name=drive0-node', interface='none')
62
+ .add_drive(disk1_img_path, 'node-name=drive1-node', interface='none')
63
+ .launch())
64
+
65
+ iotests.log('Setting IOThreads...')
66
+ iotests.log(vm.qmp('x-blockdev-set-iothread',
67
+ node_name='drive0-node', iothread='iothread0',
68
+ force=True))
69
+ iotests.log(vm.qmp('x-blockdev-set-iothread',
70
+ node_name='drive1-node', iothread='iothread0',
71
+ force=True))
72
+
73
+ iotests.log('Starting migration...')
74
+ iotests.log(vm.qmp('migrate', uri='exec:cat >/dev/null'))
75
+ while True:
76
+ vm.get_qmp_event(wait=60.0)
77
+ result = vm.qmp('query-migrate')
78
+ status = result.get('return', {}).get('status', None)
79
+ if status == 'completed':
80
+ break
81
diff --git a/tests/qemu-iotests/203.out b/tests/qemu-iotests/203.out
82
new file mode 100644
83
index XXXXXXX..XXXXXXX
84
--- /dev/null
85
+++ b/tests/qemu-iotests/203.out
86
@@ -XXX,XX +XXX,XX @@
87
+Launching VM...
88
+Setting IOThreads...
89
+{u'return': {}}
90
+{u'return': {}}
91
+Starting migration...
92
+{u'return': {}}
93
diff --git a/tests/qemu-iotests/group b/tests/qemu-iotests/group
24
index XXXXXXX..XXXXXXX 100644
94
index XXXXXXX..XXXXXXX 100644
25
--- a/block/block-backend.c
95
--- a/tests/qemu-iotests/group
26
+++ b/block/block-backend.c
96
+++ b/tests/qemu-iotests/group
27
@@ -XXX,XX +XXX,XX @@ int blk_get_refcnt(BlockBackend *blk)
97
@@ -XXX,XX +XXX,XX @@
28
*/
98
198 rw auto
29
void blk_ref(BlockBackend *blk)
99
200 rw auto
30
{
100
202 rw auto quick
31
+ assert(blk->refcnt > 0);
101
+203 rw auto
32
blk->refcnt++;
33
}
34
35
@@ -XXX,XX +XXX,XX @@ void blk_unref(BlockBackend *blk)
36
{
37
if (blk) {
38
assert(blk->refcnt > 0);
39
- if (!--blk->refcnt) {
40
+ if (blk->refcnt > 1) {
41
+ blk->refcnt--;
42
+ } else {
43
+ blk_drain(blk);
44
+ /* blk_drain() cannot resurrect blk, nobody held a reference */
45
+ assert(blk->refcnt == 1);
46
+ blk->refcnt = 0;
47
blk_delete(blk);
48
}
49
}
50
--
102
--
51
2.17.1
103
2.14.3
52
104
53
105
diff view generated by jsdifflib