1
The following changes since commit f1d33f55c47dfdaf8daacd618588ad3ae4c452d1:
1
The following changes since commit 13d9f6dca08a38e9258b6328f3ad61bdb8e19619:
2
2
3
Merge tag 'pull-testing-gdbstub-plugins-gitdm-061022-3' of https://github.com/stsquad/qemu into staging (2022-10-06 07:11:56 -0400)
3
Merge tag 'ide-pull-request' of https://gitlab.com/jsnow/qemu into staging (2023-09-07 10:28:33 -0400)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
git://repo.or.cz/qemu/kevin.git tags/for-upstream
7
https://repo.or.cz/qemu/kevin.git tags/for-upstream
8
8
9
for you to fetch changes up to a7ca2eb488ff149c898f43abe103f8bd8e3ca3c4:
9
for you to fetch changes up to 92e2e6a867334a990f8d29f07ca34e3162fdd6ec:
10
10
11
file-posix: Remove unused s->discard_zeroes (2022-10-07 12:11:41 +0200)
11
virtio: Drop out of coroutine context in virtio_load() (2023-09-08 17:03:09 +0200)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Block layer patches
14
Block layer patches
15
15
16
- job: replace AioContext lock with job_mutex
16
- Optimise reqs_lock to make multiqueue actually scale
17
- Fixes to make coroutine_fn annotations more accurate
17
- virtio: Drop out of coroutine context in virtio_load()
18
- QAPI schema: Fix incorrect example
18
- iotests: Fix reference output for some tests after recent changes
19
- Code cleanup
19
- vpc: Avoid dynamic stack allocation
20
- Code cleanup, improved documentation
20
21
21
----------------------------------------------------------------
22
----------------------------------------------------------------
22
Alberto Faria (1):
23
Dmitry Frolov (1):
23
coroutine: Drop coroutine_fn annotation from qemu_coroutine_self()
24
vmdk: Clean up bdrv_open_child() return value check
24
25
25
Emanuele Giuseppe Esposito (20):
26
Fabiano Rosas (2):
26
job.c: make job_mutex and job_lock/unlock() public
27
block: Remove bdrv_query_block_node_info
27
job.h: categorize fields in struct Job
28
block: Remove unnecessary variable in bdrv_block_device_info
28
job.c: API functions not used outside should be static
29
aio-wait.h: introduce AIO_WAIT_WHILE_UNLOCKED
30
job.c: add job_lock/unlock while keeping job.h intact
31
job: move and update comments from blockjob.c
32
blockjob: introduce block_job _locked() APIs
33
jobs: add job lock in find_* functions
34
jobs: use job locks also in the unit tests
35
block/mirror.c: use of job helpers in drivers
36
jobs: group together API calls under the same job lock
37
jobs: protect job.aio_context with BQL and job_mutex
38
blockjob.h: categorize fields in struct BlockJob
39
blockjob: rename notifier callbacks as _locked
40
blockjob: protect iostatus field in BlockJob struct
41
job.h: categorize JobDriver callbacks that need the AioContext lock
42
job.c: enable job lock/unlock and remove Aiocontext locks
43
block_job_query: remove atomic read
44
blockjob: remove unused functions
45
job: remove unused functions
46
29
47
Kevin Wolf (2):
30
Fiona Ebner (1):
48
quorum: Remove unnecessary forward declaration
31
iotests: adapt test output for new qemu_cleanup() behavior
49
file-posix: Remove unused s->discard_zeroes
50
32
51
Marc-André Lureau (3):
33
Hanna Czenczek (1):
52
9p: add missing coroutine_fn annotations
34
block: Be more verbose in create fallback
53
migration: add missing coroutine_fn annotations
54
test-coroutine: add missing coroutine_fn annotations
55
35
56
Markus Armbruster (1):
36
Kevin Wolf (5):
57
Revert "qapi: fix examples of blockdev-add with qcow2"
37
qemu-img: Update documentation for compressed images
38
block/meson.build: Restore alphabetical order of files
39
block: Make more BlockDriver definitions static
40
vmstate: Mark VMStateInfo.get/put() coroutine_mixed_fn
41
virtio: Drop out of coroutine context in virtio_load()
58
42
59
Paolo Bonzini (23):
43
Michael Tokarev (1):
60
block/nvme: separate nvme_get_free_req cases for coroutine/non-coroutine context
44
qemu-img: omit errno value in error message
61
block: add missing coroutine_fn annotations
62
qcow2: remove incorrect coroutine_fn annotations
63
nbd: remove incorrect coroutine_fn annotations
64
coroutine: remove incorrect coroutine_fn annotations
65
blkverify: add missing coroutine_fn annotations
66
file-posix: add missing coroutine_fn annotations
67
iscsi: add missing coroutine_fn annotations
68
nbd: add missing coroutine_fn annotations
69
nfs: add missing coroutine_fn annotations
70
nvme: add missing coroutine_fn annotations
71
parallels: add missing coroutine_fn annotations
72
qcow2: add missing coroutine_fn annotations
73
copy-before-write: add missing coroutine_fn annotations
74
curl: add missing coroutine_fn annotations
75
qed: add missing coroutine_fn annotations
76
quorum: add missing coroutine_fn annotations
77
throttle: add missing coroutine_fn annotations
78
vmdk: add missing coroutine_fn annotations
79
job: add missing coroutine_fn annotations
80
coroutine-lock: add missing coroutine_fn annotations
81
raw-format: add missing coroutine_fn annotations
82
job: detect change of aiocontext within job coroutine
83
45
84
qapi/block-core.json | 10 +-
46
Peter Maydell (1):
85
block/qcow2.h | 19 +-
47
block/iscsi: Document why we use raw malloc()
86
hw/9pfs/9p.h | 9 +-
48
87
include/block/aio-wait.h | 17 +-
49
Philippe Mathieu-Daudé (1):
88
include/block/blockjob.h | 59 +++-
50
block/vpc: Avoid dynamic stack allocation
89
include/block/nbd.h | 2 +-
51
90
include/qemu/coroutine.h | 4 +-
52
Stefan Hajnoczi (2):
91
include/qemu/job.h | 306 +++++++++++++-----
53
block: minimize bs->reqs_lock section in tracked_request_end()
92
block.c | 24 +-
54
block: change reqs_lock to QemuMutex
93
block/blkverify.c | 2 +-
55
94
block/block-backend.c | 10 +-
56
docs/tools/qemu-img.rst | 19 +++++++++++++++--
95
block/copy-before-write.c | 9 +-
57
include/block/block_int-common.h | 2 +-
96
block/curl.c | 2 +-
58
include/block/qapi.h | 3 ---
97
block/file-posix.c | 11 +-
59
include/migration/vmstate.h | 8 ++++---
98
block/io.c | 22 +-
60
block.c | 10 ++++++---
99
block/iscsi.c | 3 +-
61
block/copy-before-write.c | 2 +-
100
block/mirror.c | 19 +-
62
block/io.c | 30 ++++++++++++++++-----------
101
block/nbd.c | 11 +-
63
block/iscsi.c | 1 +
102
block/nfs.c | 2 +-
64
block/preallocate.c | 2 +-
103
block/nvme.c | 54 ++--
65
block/qapi.c | 32 ++--------------------------
104
block/parallels.c | 5 +-
66
block/snapshot-access.c | 2 +-
105
block/qcow2-cluster.c | 21 +-
67
block/vmdk.c | 2 +-
106
block/qcow2-refcount.c | 6 +-
68
block/vpc.c | 4 ++--
107
block/qcow2.c | 5 +-
69
hw/virtio/virtio.c | 45 +++++++++++++++++++++++++++++++++++-----
108
block/qed.c | 4 +-
70
qemu-img.c | 4 ++--
109
block/quorum.c | 38 +--
71
block/meson.build | 12 +++++------
110
block/raw-format.c | 3 +-
72
tests/qemu-iotests/080.out | 6 +++---
111
block/replication.c | 3 +
73
tests/qemu-iotests/109.out | 24 +++++++++++++++++++++
112
block/throttle.c | 2 +-
74
tests/qemu-iotests/112.out | 6 +++---
113
block/vmdk.c | 22 +-
75
tests/qemu-iotests/185 | 2 ++
114
blockdev.c | 129 ++++----
76
tests/qemu-iotests/185.out | 4 ++++
115
blockjob.c | 132 ++++----
77
tests/qemu-iotests/244.out | 2 +-
116
job-qmp.c | 92 +++---
78
22 files changed, 142 insertions(+), 80 deletions(-)
117
job.c | 674 +++++++++++++++++++++++++--------------
118
migration/migration.c | 3 +-
119
monitor/qmp-cmds.c | 7 +-
120
qemu-img.c | 17 +-
121
tests/unit/test-bdrv-drain.c | 80 +++--
122
tests/unit/test-block-iothread.c | 8 +-
123
tests/unit/test-blockjob-txn.c | 24 +-
124
tests/unit/test-blockjob.c | 136 ++++----
125
tests/unit/test-coroutine.c | 2 +-
126
util/qemu-coroutine-lock.c | 14 +-
127
util/qemu-coroutine.c | 2 +-
128
44 files changed, 1237 insertions(+), 787 deletions(-)
129
79
130
80
diff view generated by jsdifflib
Deleted patch
1
From: Markus Armbruster <armbru@redhat.com>
2
1
3
This reverts commit b6522938327141235b97ab38e40c6c4512587373.
4
5
Kevin Wolf NAKed this patch, because:
6
7
'file' is a required member (defined in BlockdevOptionsGenericFormat),
8
removing it makes the example invalid. 'data-file' is only an additional
9
optional member to be used for external data files (i.e. when the guest
10
data is kept separate from the metadata in the .qcow2 file).
11
12
However, it had already been merged then. Revert.
13
14
Signed-off-by: Markus Armbruster <armbru@redhat.com>
15
Message-Id: <20220930171908.846769-1-armbru@redhat.com>
16
Reviewed-by: Victor Toso <victortoso@redhat.com>
17
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
18
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
19
---
20
qapi/block-core.json | 10 +++++-----
21
1 file changed, 5 insertions(+), 5 deletions(-)
22
23
diff --git a/qapi/block-core.json b/qapi/block-core.json
24
index XXXXXXX..XXXXXXX 100644
25
--- a/qapi/block-core.json
26
+++ b/qapi/block-core.json
27
@@ -XXX,XX +XXX,XX @@
28
# -> { "execute": "blockdev-add",
29
# "arguments": { "driver": "qcow2",
30
# "node-name": "node1534",
31
-# "data-file": { "driver": "file",
32
-# "filename": "hd1.qcow2" },
33
+# "file": { "driver": "file",
34
+# "filename": "hd1.qcow2" },
35
# "backing": null } }
36
#
37
# <- { "return": {} }
38
@@ -XXX,XX +XXX,XX @@
39
# "arguments": {
40
# "driver": "qcow2",
41
# "node-name": "test1",
42
-# "data-file": {
43
+# "file": {
44
# "driver": "file",
45
# "filename": "test.qcow2"
46
# }
47
@@ -XXX,XX +XXX,XX @@
48
# "cache": {
49
# "direct": true
50
# },
51
-# "data-file": {
52
+# "file": {
53
# "driver": "file",
54
# "filename": "/tmp/test.qcow2"
55
# },
56
@@ -XXX,XX +XXX,XX @@
57
# "arguments": {
58
# "driver": "qcow2",
59
# "node-name": "node0",
60
-# "data-file": {
61
+# "file": {
62
# "driver": "file",
63
# "filename": "test.qcow2"
64
# }
65
--
66
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Alberto Faria <afaria@redhat.com>
2
1
3
qemu_coroutine_self() can be called from outside coroutine context,
4
returning the leader coroutine, and several such invocations currently
5
exist (mostly in qcow2 tracing calls).
6
7
Signed-off-by: Alberto Faria <afaria@redhat.com>
8
Message-Id: <20221005175209.975797-1-afaria@redhat.com>
9
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
10
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
11
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
12
---
13
include/qemu/coroutine.h | 2 +-
14
1 file changed, 1 insertion(+), 1 deletion(-)
15
16
diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/include/qemu/coroutine.h
19
+++ b/include/qemu/coroutine.h
20
@@ -XXX,XX +XXX,XX @@ AioContext *coroutine_fn qemu_coroutine_get_aio_context(Coroutine *co);
21
/**
22
* Get the currently executing coroutine
23
*/
24
-Coroutine *coroutine_fn qemu_coroutine_self(void);
25
+Coroutine *qemu_coroutine_self(void);
26
27
/**
28
* Return whether or not currently inside a coroutine
29
--
30
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
1
3
nvme_get_free_req has very difference semantics when called in
4
coroutine context (where it waits) and in non-coroutine context
5
(where it doesn't). Split the two cases to make it clear what
6
is being requested.
7
8
Cc: qemu-block@nongnu.org
9
Reviewed-by: Alberto Faria <afaria@redhat.com>
10
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
11
Message-Id: <20220922084924.201610-2-pbonzini@redhat.com>
12
[kwolf: Fixed up coding style]
13
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
14
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
15
---
16
block/nvme.c | 48 ++++++++++++++++++++++++++++--------------------
17
1 file changed, 28 insertions(+), 20 deletions(-)
18
19
diff --git a/block/nvme.c b/block/nvme.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/block/nvme.c
22
+++ b/block/nvme.c
23
@@ -XXX,XX +XXX,XX @@ static void nvme_kick(NVMeQueuePair *q)
24
q->need_kick = 0;
25
}
26
27
-/* Find a free request element if any, otherwise:
28
- * a) if in coroutine context, try to wait for one to become available;
29
- * b) if not in coroutine, return NULL;
30
- */
31
-static NVMeRequest *nvme_get_free_req(NVMeQueuePair *q)
32
+static NVMeRequest *nvme_get_free_req_nofail_locked(NVMeQueuePair *q)
33
{
34
NVMeRequest *req;
35
36
- qemu_mutex_lock(&q->lock);
37
-
38
- while (q->free_req_head == -1) {
39
- if (qemu_in_coroutine()) {
40
- trace_nvme_free_req_queue_wait(q->s, q->index);
41
- qemu_co_queue_wait(&q->free_req_queue, &q->lock);
42
- } else {
43
- qemu_mutex_unlock(&q->lock);
44
- return NULL;
45
- }
46
- }
47
-
48
req = &q->reqs[q->free_req_head];
49
q->free_req_head = req->free_req_next;
50
req->free_req_next = -1;
51
-
52
- qemu_mutex_unlock(&q->lock);
53
return req;
54
}
55
56
+/* Return a free request element if any, otherwise return NULL. */
57
+static NVMeRequest *nvme_get_free_req_nowait(NVMeQueuePair *q)
58
+{
59
+ QEMU_LOCK_GUARD(&q->lock);
60
+ if (q->free_req_head == -1) {
61
+ return NULL;
62
+ }
63
+ return nvme_get_free_req_nofail_locked(q);
64
+}
65
+
66
+/*
67
+ * Wait for a free request to become available if necessary, then
68
+ * return it.
69
+ */
70
+static coroutine_fn NVMeRequest *nvme_get_free_req(NVMeQueuePair *q)
71
+{
72
+ QEMU_LOCK_GUARD(&q->lock);
73
+
74
+ while (q->free_req_head == -1) {
75
+ trace_nvme_free_req_queue_wait(q->s, q->index);
76
+ qemu_co_queue_wait(&q->free_req_queue, &q->lock);
77
+ }
78
+
79
+ return nvme_get_free_req_nofail_locked(q);
80
+}
81
+
82
/* With q->lock */
83
static void nvme_put_free_req_locked(NVMeQueuePair *q, NVMeRequest *req)
84
{
85
@@ -XXX,XX +XXX,XX @@ static int nvme_admin_cmd_sync(BlockDriverState *bs, NvmeCmd *cmd)
86
AioContext *aio_context = bdrv_get_aio_context(bs);
87
NVMeRequest *req;
88
int ret = -EINPROGRESS;
89
- req = nvme_get_free_req(q);
90
+ req = nvme_get_free_req_nowait(q);
91
if (!req) {
92
return -EBUSY;
93
}
94
--
95
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
1
3
Callers of coroutine_fn must be coroutine_fn themselves, or the call
4
must be within "if (qemu_in_coroutine())". Apply coroutine_fn to
5
functions where this holds.
6
7
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
8
Message-Id: <20220922084924.201610-3-pbonzini@redhat.com>
9
[kwolf: Fixed up coding style]
10
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
11
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
12
---
13
block.c | 7 ++++---
14
block/block-backend.c | 10 +++++-----
15
block/io.c | 22 +++++++++++-----------
16
3 files changed, 20 insertions(+), 19 deletions(-)
17
18
diff --git a/block.c b/block.c
19
index XXXXXXX..XXXXXXX 100644
20
--- a/block.c
21
+++ b/block.c
22
@@ -XXX,XX +XXX,XX @@ static int64_t create_file_fallback_truncate(BlockBackend *blk,
23
* Helper function for bdrv_create_file_fallback(): Zero the first
24
* sector to remove any potentially pre-existing image header.
25
*/
26
-static int create_file_fallback_zero_first_sector(BlockBackend *blk,
27
- int64_t current_size,
28
- Error **errp)
29
+static int coroutine_fn
30
+create_file_fallback_zero_first_sector(BlockBackend *blk,
31
+ int64_t current_size,
32
+ Error **errp)
33
{
34
int64_t bytes_to_clear;
35
int ret;
36
diff --git a/block/block-backend.c b/block/block-backend.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/block/block-backend.c
39
+++ b/block/block-backend.c
40
@@ -XXX,XX +XXX,XX @@ static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset,
41
return &acb->common;
42
}
43
44
-static void blk_aio_read_entry(void *opaque)
45
+static void coroutine_fn blk_aio_read_entry(void *opaque)
46
{
47
BlkAioEmAIOCB *acb = opaque;
48
BlkRwCo *rwco = &acb->rwco;
49
@@ -XXX,XX +XXX,XX @@ static void blk_aio_read_entry(void *opaque)
50
blk_aio_complete(acb);
51
}
52
53
-static void blk_aio_write_entry(void *opaque)
54
+static void coroutine_fn blk_aio_write_entry(void *opaque)
55
{
56
BlkAioEmAIOCB *acb = opaque;
57
BlkRwCo *rwco = &acb->rwco;
58
@@ -XXX,XX +XXX,XX @@ int coroutine_fn blk_co_ioctl(BlockBackend *blk, unsigned long int req,
59
return ret;
60
}
61
62
-static void blk_aio_ioctl_entry(void *opaque)
63
+static void coroutine_fn blk_aio_ioctl_entry(void *opaque)
64
{
65
BlkAioEmAIOCB *acb = opaque;
66
BlkRwCo *rwco = &acb->rwco;
67
@@ -XXX,XX +XXX,XX @@ blk_co_do_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes)
68
return bdrv_co_pdiscard(blk->root, offset, bytes);
69
}
70
71
-static void blk_aio_pdiscard_entry(void *opaque)
72
+static void coroutine_fn blk_aio_pdiscard_entry(void *opaque)
73
{
74
BlkAioEmAIOCB *acb = opaque;
75
BlkRwCo *rwco = &acb->rwco;
76
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn blk_co_do_flush(BlockBackend *blk)
77
return bdrv_co_flush(blk_bs(blk));
78
}
79
80
-static void blk_aio_flush_entry(void *opaque)
81
+static void coroutine_fn blk_aio_flush_entry(void *opaque)
82
{
83
BlkAioEmAIOCB *acb = opaque;
84
BlkRwCo *rwco = &acb->rwco;
85
diff --git a/block/io.c b/block/io.c
86
index XXXXXXX..XXXXXXX 100644
87
--- a/block/io.c
88
+++ b/block/io.c
89
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn tracked_request_end(BdrvTrackedRequest *req)
90
/**
91
* Add an active request to the tracked requests list
92
*/
93
-static void tracked_request_begin(BdrvTrackedRequest *req,
94
- BlockDriverState *bs,
95
- int64_t offset,
96
- int64_t bytes,
97
- enum BdrvTrackedRequestType type)
98
+static void coroutine_fn tracked_request_begin(BdrvTrackedRequest *req,
99
+ BlockDriverState *bs,
100
+ int64_t offset,
101
+ int64_t bytes,
102
+ enum BdrvTrackedRequestType type)
103
{
104
bdrv_check_request(offset, bytes, &error_abort);
105
106
@@ -XXX,XX +XXX,XX @@ static bool tracked_request_overlaps(BdrvTrackedRequest *req,
107
}
108
109
/* Called with self->bs->reqs_lock held */
110
-static BdrvTrackedRequest *
111
+static coroutine_fn BdrvTrackedRequest *
112
bdrv_find_conflicting_request(BdrvTrackedRequest *self)
113
{
114
BdrvTrackedRequest *req;
115
@@ -XXX,XX +XXX,XX @@ static bool bdrv_init_padding(BlockDriverState *bs,
116
return true;
117
}
118
119
-static int bdrv_padding_rmw_read(BdrvChild *child,
120
- BdrvTrackedRequest *req,
121
- BdrvRequestPadding *pad,
122
- bool zero_middle)
123
+static coroutine_fn int bdrv_padding_rmw_read(BdrvChild *child,
124
+ BdrvTrackedRequest *req,
125
+ BdrvRequestPadding *pad,
126
+ bool zero_middle)
127
{
128
QEMUIOVector local_qiov;
129
BlockDriverState *bs = child->bs;
130
@@ -XXX,XX +XXX,XX @@ out:
131
return ret;
132
}
133
134
-int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
135
+int coroutine_fn bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
136
{
137
BlockDriver *drv = bs->drv;
138
CoroutineIOCompletion co = {
139
--
140
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
1
3
This is incorrect because qcow2_mark_clean() calls qcow2_flush_caches().
4
qcow2_mark_clean() is called from non-coroutine context in
5
qcow2_inactivate() and qcow2_amend_options().
6
7
Reviewed-by: Alberto Faria <afaria@redhat.com>
8
Reviewed-by: Eric Blake <eblake@redhat.com>
9
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
10
Message-Id: <20220922084924.201610-4-pbonzini@redhat.com>
11
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
12
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
13
---
14
block/qcow2.h | 4 ++--
15
block/qcow2-refcount.c | 4 ++--
16
2 files changed, 4 insertions(+), 4 deletions(-)
17
18
diff --git a/block/qcow2.h b/block/qcow2.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/block/qcow2.h
21
+++ b/block/qcow2.h
22
@@ -XXX,XX +XXX,XX @@ void qcow2_free_any_cluster(BlockDriverState *bs, uint64_t l2_entry,
23
int qcow2_update_snapshot_refcount(BlockDriverState *bs,
24
int64_t l1_table_offset, int l1_size, int addend);
25
26
-int coroutine_fn qcow2_flush_caches(BlockDriverState *bs);
27
-int coroutine_fn qcow2_write_caches(BlockDriverState *bs);
28
+int qcow2_flush_caches(BlockDriverState *bs);
29
+int qcow2_write_caches(BlockDriverState *bs);
30
int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
31
BdrvCheckMode fix);
32
33
diff --git a/block/qcow2-refcount.c b/block/qcow2-refcount.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/block/qcow2-refcount.c
36
+++ b/block/qcow2-refcount.c
37
@@ -XXX,XX +XXX,XX @@ void qcow2_free_any_cluster(BlockDriverState *bs, uint64_t l2_entry,
38
}
39
}
40
41
-int coroutine_fn qcow2_write_caches(BlockDriverState *bs)
42
+int qcow2_write_caches(BlockDriverState *bs)
43
{
44
BDRVQcow2State *s = bs->opaque;
45
int ret;
46
@@ -XXX,XX +XXX,XX @@ int coroutine_fn qcow2_write_caches(BlockDriverState *bs)
47
return 0;
48
}
49
50
-int coroutine_fn qcow2_flush_caches(BlockDriverState *bs)
51
+int qcow2_flush_caches(BlockDriverState *bs)
52
{
53
int ret = qcow2_write_caches(bs);
54
if (ret < 0) {
55
--
56
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
1
3
nbd_co_establish_connection_cancel() cancels a coroutine but is not called
4
from coroutine context itself, for example in nbd_cancel_in_flight()
5
and in timer callbacks reconnect_delay_timer_cb() and open_timer_cb().
6
7
Reviewed-by: Alberto Faria <afaria@redhat.com>
8
Reviewed-by: Eric Blake <eblake@redhat.com>
9
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
10
Message-Id: <20220922084924.201610-5-pbonzini@redhat.com>
11
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
12
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
13
---
14
include/block/nbd.h | 2 +-
15
1 file changed, 1 insertion(+), 1 deletion(-)
16
17
diff --git a/include/block/nbd.h b/include/block/nbd.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/include/block/nbd.h
20
+++ b/include/block/nbd.h
21
@@ -XXX,XX +XXX,XX @@ QIOChannel *coroutine_fn
22
nbd_co_establish_connection(NBDClientConnection *conn, NBDExportInfo *info,
23
bool blocking, Error **errp);
24
25
-void coroutine_fn nbd_co_establish_connection_cancel(NBDClientConnection *conn);
26
+void nbd_co_establish_connection_cancel(NBDClientConnection *conn);
27
28
#endif
29
--
30
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
1
3
qemu_coroutine_get_aio_context inspects a coroutine, but it does
4
not have to be called from the coroutine itself (or from any
5
coroutine).
6
7
Reviewed-by: Alberto Faria <afaria@redhat.com>
8
Reviewed-by: Eric Blake <eblake@redhat.com>
9
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
10
Message-Id: <20220922084924.201610-6-pbonzini@redhat.com>
11
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
12
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
13
---
14
include/qemu/coroutine.h | 2 +-
15
util/qemu-coroutine.c | 2 +-
16
2 files changed, 2 insertions(+), 2 deletions(-)
17
18
diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/include/qemu/coroutine.h
21
+++ b/include/qemu/coroutine.h
22
@@ -XXX,XX +XXX,XX @@ void coroutine_fn qemu_coroutine_yield(void);
23
/**
24
* Get the AioContext of the given coroutine
25
*/
26
-AioContext *coroutine_fn qemu_coroutine_get_aio_context(Coroutine *co);
27
+AioContext *qemu_coroutine_get_aio_context(Coroutine *co);
28
29
/**
30
* Get the currently executing coroutine
31
diff --git a/util/qemu-coroutine.c b/util/qemu-coroutine.c
32
index XXXXXXX..XXXXXXX 100644
33
--- a/util/qemu-coroutine.c
34
+++ b/util/qemu-coroutine.c
35
@@ -XXX,XX +XXX,XX @@ bool qemu_coroutine_entered(Coroutine *co)
36
return co->caller;
37
}
38
39
-AioContext *coroutine_fn qemu_coroutine_get_aio_context(Coroutine *co)
40
+AioContext *qemu_coroutine_get_aio_context(Coroutine *co)
41
{
42
return co->ctx;
43
}
44
--
45
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
1
3
Callers of coroutine_fn must be coroutine_fn themselves, or the call
4
must be within "if (qemu_in_coroutine())". Apply coroutine_fn to
5
functions where this holds.
6
7
Reviewed-by: Alberto Faria <afaria@redhat.com>
8
Reviewed-by: Eric Blake <eblake@redhat.com>
9
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
10
Message-Id: <20220922084924.201610-8-pbonzini@redhat.com>
11
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
12
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
13
---
14
block/blkverify.c | 2 +-
15
1 file changed, 1 insertion(+), 1 deletion(-)
16
17
diff --git a/block/blkverify.c b/block/blkverify.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/block/blkverify.c
20
+++ b/block/blkverify.c
21
@@ -XXX,XX +XXX,XX @@ blkverify_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
22
return blkverify_co_prwv(bs, &r, offset, bytes, qiov, qiov, flags, true);
23
}
24
25
-static int blkverify_co_flush(BlockDriverState *bs)
26
+static int coroutine_fn blkverify_co_flush(BlockDriverState *bs)
27
{
28
BDRVBlkverifyState *s = bs->opaque;
29
30
--
31
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
1
3
Callers of coroutine_fn must be coroutine_fn themselves, or the call
4
must be within "if (qemu_in_coroutine())". Apply coroutine_fn to
5
functions where this holds.
6
7
Reviewed-by: Alberto Faria <afaria@redhat.com>
8
Reviewed-by: Eric Blake <eblake@redhat.com>
9
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
10
Message-Id: <20220922084924.201610-9-pbonzini@redhat.com>
11
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
12
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
13
---
14
block/file-posix.c | 2 +-
15
1 file changed, 1 insertion(+), 1 deletion(-)
16
17
diff --git a/block/file-posix.c b/block/file-posix.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/block/file-posix.c
20
+++ b/block/file-posix.c
21
@@ -XXX,XX +XXX,XX @@ static void raw_aio_unplug(BlockDriverState *bs)
22
#endif
23
}
24
25
-static int raw_co_flush_to_disk(BlockDriverState *bs)
26
+static int coroutine_fn raw_co_flush_to_disk(BlockDriverState *bs)
27
{
28
BDRVRawState *s = bs->opaque;
29
RawPosixAIOData acb;
30
--
31
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
1
3
Callers of coroutine_fn must be coroutine_fn themselves, or the call
4
must be within "if (qemu_in_coroutine())". Apply coroutine_fn to
5
functions where this holds.
6
7
Reviewed-by: Alberto Faria <afaria@redhat.com>
8
Reviewed-by: Eric Blake <eblake@redhat.com>
9
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
10
Message-Id: <20220922084924.201610-10-pbonzini@redhat.com>
11
[kwolf: Fixed up coding style]
12
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
13
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
14
---
15
block/iscsi.c | 3 ++-
16
1 file changed, 2 insertions(+), 1 deletion(-)
17
18
diff --git a/block/iscsi.c b/block/iscsi.c
19
index XXXXXXX..XXXXXXX 100644
20
--- a/block/iscsi.c
21
+++ b/block/iscsi.c
22
@@ -XXX,XX +XXX,XX @@ iscsi_co_generic_cb(struct iscsi_context *iscsi, int status,
23
}
24
}
25
26
-static void iscsi_co_init_iscsitask(IscsiLun *iscsilun, struct IscsiTask *iTask)
27
+static void coroutine_fn
28
+iscsi_co_init_iscsitask(IscsiLun *iscsilun, struct IscsiTask *iTask)
29
{
30
*iTask = (struct IscsiTask) {
31
.co = qemu_coroutine_self(),
32
--
33
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
1
3
Callers of coroutine_fn must be coroutine_fn themselves, or the call
4
must be within "if (qemu_in_coroutine())". Apply coroutine_fn to
5
functions where this holds.
6
7
Reviewed-by: Alberto Faria <afaria@redhat.com>
8
Reviewed-by: Eric Blake <eblake@redhat.com>
9
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
10
Message-Id: <20220922084924.201610-11-pbonzini@redhat.com>
11
[kwolf: Fixed up coding style]
12
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
13
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
14
---
15
block/nbd.c | 11 ++++++-----
16
1 file changed, 6 insertions(+), 5 deletions(-)
17
18
diff --git a/block/nbd.c b/block/nbd.c
19
index XXXXXXX..XXXXXXX 100644
20
--- a/block/nbd.c
21
+++ b/block/nbd.c
22
@@ -XXX,XX +XXX,XX @@ static void nbd_iter_request_error(NBDReplyChunkIter *iter, int ret)
23
* nbd_reply_chunk_iter_receive
24
* The pointer stored in @payload requires g_free() to free it.
25
*/
26
-static bool nbd_reply_chunk_iter_receive(BDRVNBDState *s,
27
- NBDReplyChunkIter *iter,
28
- uint64_t handle,
29
- QEMUIOVector *qiov, NBDReply *reply,
30
- void **payload)
31
+static bool coroutine_fn nbd_reply_chunk_iter_receive(BDRVNBDState *s,
32
+ NBDReplyChunkIter *iter,
33
+ uint64_t handle,
34
+ QEMUIOVector *qiov,
35
+ NBDReply *reply,
36
+ void **payload)
37
{
38
int ret, request_ret;
39
NBDReply local_reply;
40
--
41
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
1
3
Callers of coroutine_fn must be coroutine_fn themselves, or the call
4
must be within "if (qemu_in_coroutine())". Apply coroutine_fn to
5
functions where this holds.
6
7
Reviewed-by: Alberto Faria <afaria@redhat.com>
8
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
9
Message-Id: <20220922084924.201610-12-pbonzini@redhat.com>
10
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
11
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
12
---
13
block/nfs.c | 2 +-
14
1 file changed, 1 insertion(+), 1 deletion(-)
15
16
diff --git a/block/nfs.c b/block/nfs.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/block/nfs.c
19
+++ b/block/nfs.c
20
@@ -XXX,XX +XXX,XX @@ static void nfs_process_write(void *arg)
21
qemu_mutex_unlock(&client->mutex);
22
}
23
24
-static void nfs_co_init_task(BlockDriverState *bs, NFSRPC *task)
25
+static void coroutine_fn nfs_co_init_task(BlockDriverState *bs, NFSRPC *task)
26
{
27
*task = (NFSRPC) {
28
.co = qemu_coroutine_self(),
29
--
30
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
1
3
Callers of coroutine_fn must be coroutine_fn themselves, or the call
4
must be within "if (qemu_in_coroutine())". Apply coroutine_fn to
5
functions where this holds.
6
7
Reviewed-by: Alberto Faria <afaria@redhat.com>
8
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
9
Message-Id: <20220922084924.201610-13-pbonzini@redhat.com>
10
[kwolf: Fixed up coding style]
11
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
12
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
13
---
14
block/nvme.c | 6 ++++--
15
1 file changed, 4 insertions(+), 2 deletions(-)
16
17
diff --git a/block/nvme.c b/block/nvme.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/block/nvme.c
20
+++ b/block/nvme.c
21
@@ -XXX,XX +XXX,XX @@ static inline bool nvme_qiov_aligned(BlockDriverState *bs,
22
return true;
23
}
24
25
-static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
26
- QEMUIOVector *qiov, bool is_write, int flags)
27
+static coroutine_fn int nvme_co_prw(BlockDriverState *bs,
28
+ uint64_t offset, uint64_t bytes,
29
+ QEMUIOVector *qiov, bool is_write,
30
+ int flags)
31
{
32
BDRVNVMeState *s = bs->opaque;
33
int r;
34
--
35
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
1
3
Callers of coroutine_fn must be coroutine_fn themselves, or the call
4
must be within "if (qemu_in_coroutine())". Apply coroutine_fn to
5
functions where this holds.
6
7
Reviewed-by: Alberto Faria <afaria@redhat.com>
8
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
9
Message-Id: <20220922084924.201610-14-pbonzini@redhat.com>
10
[kwolf: Fixed up coding style]
11
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
12
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
13
---
14
block/parallels.c | 5 +++--
15
1 file changed, 3 insertions(+), 2 deletions(-)
16
17
diff --git a/block/parallels.c b/block/parallels.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/block/parallels.c
20
+++ b/block/parallels.c
21
@@ -XXX,XX +XXX,XX @@ static int64_t block_status(BDRVParallelsState *s, int64_t sector_num,
22
return start_off;
23
}
24
25
-static int64_t allocate_clusters(BlockDriverState *bs, int64_t sector_num,
26
- int nb_sectors, int *pnum)
27
+static coroutine_fn int64_t allocate_clusters(BlockDriverState *bs,
28
+ int64_t sector_num,
29
+ int nb_sectors, int *pnum)
30
{
31
int ret = 0;
32
BDRVParallelsState *s = bs->opaque;
33
--
34
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
1
3
Callers of coroutine_fn must be coroutine_fn themselves, or the call
4
must be within "if (qemu_in_coroutine())". Apply coroutine_fn to
5
functions where this holds.
6
7
Reviewed-by: Alberto Faria <afaria@redhat.com>
8
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
9
Message-Id: <20220922084924.201610-15-pbonzini@redhat.com>
10
[kwolf: Fixed up coding style]
11
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
12
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
13
---
14
block/qcow2.h | 15 ++++++++-------
15
block/qcow2-cluster.c | 21 ++++++++++++---------
16
block/qcow2-refcount.c | 2 +-
17
block/qcow2.c | 5 +++--
18
4 files changed, 24 insertions(+), 19 deletions(-)
19
20
diff --git a/block/qcow2.h b/block/qcow2.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/block/qcow2.h
23
+++ b/block/qcow2.h
24
@@ -XXX,XX +XXX,XX @@ int qcow2_change_refcount_order(BlockDriverState *bs, int refcount_order,
25
void *cb_opaque, Error **errp);
26
int qcow2_shrink_reftable(BlockDriverState *bs);
27
int64_t qcow2_get_last_cluster(BlockDriverState *bs, int64_t size);
28
-int qcow2_detect_metadata_preallocation(BlockDriverState *bs);
29
+int coroutine_fn qcow2_detect_metadata_preallocation(BlockDriverState *bs);
30
31
/* qcow2-cluster.c functions */
32
int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
33
@@ -XXX,XX +XXX,XX @@ int qcow2_encrypt_sectors(BDRVQcow2State *s, int64_t sector_num,
34
int qcow2_get_host_offset(BlockDriverState *bs, uint64_t offset,
35
unsigned int *bytes, uint64_t *host_offset,
36
QCow2SubclusterType *subcluster_type);
37
-int qcow2_alloc_host_offset(BlockDriverState *bs, uint64_t offset,
38
- unsigned int *bytes, uint64_t *host_offset,
39
- QCowL2Meta **m);
40
+int coroutine_fn qcow2_alloc_host_offset(BlockDriverState *bs, uint64_t offset,
41
+ unsigned int *bytes,
42
+ uint64_t *host_offset, QCowL2Meta **m);
43
int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
44
uint64_t offset,
45
int compressed_size,
46
@@ -XXX,XX +XXX,XX @@ int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
47
void qcow2_parse_compressed_l2_entry(BlockDriverState *bs, uint64_t l2_entry,
48
uint64_t *coffset, int *csize);
49
50
-int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m);
51
+int coroutine_fn qcow2_alloc_cluster_link_l2(BlockDriverState *bs,
52
+ QCowL2Meta *m);
53
void qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m);
54
int qcow2_cluster_discard(BlockDriverState *bs, uint64_t offset,
55
uint64_t bytes, enum qcow2_discard_type type,
56
bool full_discard);
57
-int qcow2_subcluster_zeroize(BlockDriverState *bs, uint64_t offset,
58
- uint64_t bytes, int flags);
59
+int coroutine_fn qcow2_subcluster_zeroize(BlockDriverState *bs, uint64_t offset,
60
+ uint64_t bytes, int flags);
61
62
int qcow2_expand_zero_clusters(BlockDriverState *bs,
63
BlockDriverAmendStatusCB *status_cb,
64
diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c
65
index XXXXXXX..XXXXXXX 100644
66
--- a/block/qcow2-cluster.c
67
+++ b/block/qcow2-cluster.c
68
@@ -XXX,XX +XXX,XX @@ int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
69
return 0;
70
}
71
72
-static int perform_cow(BlockDriverState *bs, QCowL2Meta *m)
73
+static int coroutine_fn perform_cow(BlockDriverState *bs, QCowL2Meta *m)
74
{
75
BDRVQcow2State *s = bs->opaque;
76
Qcow2COWRegion *start = &m->cow_start;
77
@@ -XXX,XX +XXX,XX @@ fail:
78
return ret;
79
}
80
81
-int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
82
+int coroutine_fn qcow2_alloc_cluster_link_l2(BlockDriverState *bs,
83
+ QCowL2Meta *m)
84
{
85
BDRVQcow2State *s = bs->opaque;
86
int i, j = 0, l2_index, ret;
87
@@ -XXX,XX +XXX,XX @@ static int count_single_write_clusters(BlockDriverState *bs, int nb_clusters,
88
* information on cluster allocation may be invalid now. The caller
89
* must start over anyway, so consider *cur_bytes undefined.
90
*/
91
-static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset,
92
- uint64_t *cur_bytes, QCowL2Meta **m)
93
+static int coroutine_fn handle_dependencies(BlockDriverState *bs,
94
+ uint64_t guest_offset,
95
+ uint64_t *cur_bytes, QCowL2Meta **m)
96
{
97
BDRVQcow2State *s = bs->opaque;
98
QCowL2Meta *old_alloc;
99
@@ -XXX,XX +XXX,XX @@ out:
100
*
101
* Return 0 on success and -errno in error cases
102
*/
103
-int qcow2_alloc_host_offset(BlockDriverState *bs, uint64_t offset,
104
- unsigned int *bytes, uint64_t *host_offset,
105
- QCowL2Meta **m)
106
+int coroutine_fn qcow2_alloc_host_offset(BlockDriverState *bs, uint64_t offset,
107
+ unsigned int *bytes,
108
+ uint64_t *host_offset,
109
+ QCowL2Meta **m)
110
{
111
BDRVQcow2State *s = bs->opaque;
112
uint64_t start, remaining;
113
@@ -XXX,XX +XXX,XX @@ out:
114
return ret;
115
}
116
117
-int qcow2_subcluster_zeroize(BlockDriverState *bs, uint64_t offset,
118
- uint64_t bytes, int flags)
119
+int coroutine_fn qcow2_subcluster_zeroize(BlockDriverState *bs, uint64_t offset,
120
+ uint64_t bytes, int flags)
121
{
122
BDRVQcow2State *s = bs->opaque;
123
uint64_t end_offset = offset + bytes;
124
diff --git a/block/qcow2-refcount.c b/block/qcow2-refcount.c
125
index XXXXXXX..XXXXXXX 100644
126
--- a/block/qcow2-refcount.c
127
+++ b/block/qcow2-refcount.c
128
@@ -XXX,XX +XXX,XX @@ int64_t qcow2_get_last_cluster(BlockDriverState *bs, int64_t size)
129
return -EIO;
130
}
131
132
-int qcow2_detect_metadata_preallocation(BlockDriverState *bs)
133
+int coroutine_fn qcow2_detect_metadata_preallocation(BlockDriverState *bs)
134
{
135
BDRVQcow2State *s = bs->opaque;
136
int64_t i, end_cluster, cluster_count = 0, threshold;
137
diff --git a/block/qcow2.c b/block/qcow2.c
138
index XXXXXXX..XXXXXXX 100644
139
--- a/block/qcow2.c
140
+++ b/block/qcow2.c
141
@@ -XXX,XX +XXX,XX @@ static bool merge_cow(uint64_t offset, unsigned bytes,
142
* Return 1 if the COW regions read as zeroes, 0 if not, < 0 on error.
143
* Note that returning 0 does not guarantee non-zero data.
144
*/
145
-static int is_zero_cow(BlockDriverState *bs, QCowL2Meta *m)
146
+static int coroutine_fn is_zero_cow(BlockDriverState *bs, QCowL2Meta *m)
147
{
148
/*
149
* This check is designed for optimization shortcut so it must be
150
@@ -XXX,XX +XXX,XX @@ static int is_zero_cow(BlockDriverState *bs, QCowL2Meta *m)
151
m->cow_end.nb_bytes);
152
}
153
154
-static int handle_alloc_space(BlockDriverState *bs, QCowL2Meta *l2meta)
155
+static int coroutine_fn handle_alloc_space(BlockDriverState *bs,
156
+ QCowL2Meta *l2meta)
157
{
158
BDRVQcow2State *s = bs->opaque;
159
QCowL2Meta *m;
160
--
161
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
1
3
Callers of coroutine_fn must be coroutine_fn themselves, or the call
4
must be within "if (qemu_in_coroutine())". Apply coroutine_fn to
5
functions where this holds.
6
7
Reviewed-by: Alberto Faria <afaria@redhat.com>
8
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
9
Message-Id: <20220922084924.201610-16-pbonzini@redhat.com>
10
[kwolf: Fixed up coding style]
11
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
12
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
13
---
14
block/copy-before-write.c | 9 +++++----
15
1 file changed, 5 insertions(+), 4 deletions(-)
16
17
diff --git a/block/copy-before-write.c b/block/copy-before-write.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/block/copy-before-write.c
20
+++ b/block/copy-before-write.c
21
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn cbw_co_flush(BlockDriverState *bs)
22
* It's guaranteed that guest writes will not interact in the region until
23
* cbw_snapshot_read_unlock() called.
24
*/
25
-static BlockReq *cbw_snapshot_read_lock(BlockDriverState *bs,
26
- int64_t offset, int64_t bytes,
27
- int64_t *pnum, BdrvChild **file)
28
+static coroutine_fn BlockReq *
29
+cbw_snapshot_read_lock(BlockDriverState *bs, int64_t offset, int64_t bytes,
30
+ int64_t *pnum, BdrvChild **file)
31
{
32
BDRVCopyBeforeWriteState *s = bs->opaque;
33
BlockReq *req = g_new(BlockReq, 1);
34
@@ -XXX,XX +XXX,XX @@ static BlockReq *cbw_snapshot_read_lock(BlockDriverState *bs,
35
return req;
36
}
37
38
-static void cbw_snapshot_read_unlock(BlockDriverState *bs, BlockReq *req)
39
+static coroutine_fn void
40
+cbw_snapshot_read_unlock(BlockDriverState *bs, BlockReq *req)
41
{
42
BDRVCopyBeforeWriteState *s = bs->opaque;
43
44
--
45
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
1
3
Callers of coroutine_fn must be coroutine_fn themselves, or the call
4
must be within "if (qemu_in_coroutine())". Apply coroutine_fn to
5
functions where this holds.
6
7
Reviewed-by: Alberto Faria <afaria@redhat.com>
8
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
9
Message-Id: <20220922084924.201610-17-pbonzini@redhat.com>
10
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
11
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
12
---
13
block/curl.c | 2 +-
14
1 file changed, 1 insertion(+), 1 deletion(-)
15
16
diff --git a/block/curl.c b/block/curl.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/block/curl.c
19
+++ b/block/curl.c
20
@@ -XXX,XX +XXX,XX @@ out_noclean:
21
return -EINVAL;
22
}
23
24
-static void curl_setup_preadv(BlockDriverState *bs, CURLAIOCB *acb)
25
+static void coroutine_fn curl_setup_preadv(BlockDriverState *bs, CURLAIOCB *acb)
26
{
27
CURLState *state;
28
int running;
29
--
30
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
1
3
Callers of coroutine_fn must be coroutine_fn themselves, or the call
4
must be within "if (qemu_in_coroutine())". Apply coroutine_fn to
5
functions where this holds.
6
7
Reviewed-by: Alberto Faria <afaria@redhat.com>
8
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
9
Message-Id: <20220922084924.201610-18-pbonzini@redhat.com>
10
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
11
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
12
---
13
block/qed.c | 4 ++--
14
1 file changed, 2 insertions(+), 2 deletions(-)
15
16
diff --git a/block/qed.c b/block/qed.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/block/qed.c
19
+++ b/block/qed.c
20
@@ -XXX,XX +XXX,XX @@ static CachedL2Table *qed_new_l2_table(BDRVQEDState *s)
21
return l2_table;
22
}
23
24
-static bool qed_plug_allocating_write_reqs(BDRVQEDState *s)
25
+static bool coroutine_fn qed_plug_allocating_write_reqs(BDRVQEDState *s)
26
{
27
qemu_co_mutex_lock(&s->table_lock);
28
29
@@ -XXX,XX +XXX,XX @@ static bool qed_plug_allocating_write_reqs(BDRVQEDState *s)
30
return true;
31
}
32
33
-static void qed_unplug_allocating_write_reqs(BDRVQEDState *s)
34
+static void coroutine_fn qed_unplug_allocating_write_reqs(BDRVQEDState *s)
35
{
36
qemu_co_mutex_lock(&s->table_lock);
37
assert(s->allocating_write_reqs_plugged);
38
--
39
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
1
3
Callers of coroutine_fn must be coroutine_fn themselves, or the call
4
must be within "if (qemu_in_coroutine())". Apply coroutine_fn to
5
functions where this holds.
6
7
Reviewed-by: Alberto Faria <afaria@redhat.com>
8
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
9
Message-Id: <20220922084924.201610-19-pbonzini@redhat.com>
10
[kwolf: Fixed up coding style]
11
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
12
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
13
---
14
block/quorum.c | 36 +++++++++++++++++++-----------------
15
1 file changed, 19 insertions(+), 17 deletions(-)
16
17
diff --git a/block/quorum.c b/block/quorum.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/block/quorum.c
20
+++ b/block/quorum.c
21
@@ -XXX,XX +XXX,XX @@ static bool quorum_64bits_compare(QuorumVoteValue *a, QuorumVoteValue *b)
22
return a->l == b->l;
23
}
24
25
-static QuorumAIOCB *quorum_aio_get(BlockDriverState *bs,
26
- QEMUIOVector *qiov,
27
- uint64_t offset,
28
- uint64_t bytes,
29
- int flags)
30
+static QuorumAIOCB *coroutine_fn quorum_aio_get(BlockDriverState *bs,
31
+ QEMUIOVector *qiov,
32
+ uint64_t offset, uint64_t bytes,
33
+ int flags)
34
{
35
BDRVQuorumState *s = bs->opaque;
36
QuorumAIOCB *acb = g_new(QuorumAIOCB, 1);
37
@@ -XXX,XX +XXX,XX @@ static void quorum_report_bad_versions(BDRVQuorumState *s,
38
}
39
}
40
41
-static void quorum_rewrite_entry(void *opaque)
42
+static void coroutine_fn quorum_rewrite_entry(void *opaque)
43
{
44
QuorumCo *co = opaque;
45
QuorumAIOCB *acb = co->acb;
46
@@ -XXX,XX +XXX,XX @@ free_exit:
47
quorum_free_vote_list(&acb->votes);
48
}
49
50
-static void read_quorum_children_entry(void *opaque)
51
+static void coroutine_fn read_quorum_children_entry(void *opaque)
52
{
53
QuorumCo *co = opaque;
54
QuorumAIOCB *acb = co->acb;
55
@@ -XXX,XX +XXX,XX @@ static void read_quorum_children_entry(void *opaque)
56
}
57
}
58
59
-static int read_quorum_children(QuorumAIOCB *acb)
60
+static int coroutine_fn read_quorum_children(QuorumAIOCB *acb)
61
{
62
BDRVQuorumState *s = acb->bs->opaque;
63
int i;
64
@@ -XXX,XX +XXX,XX @@ static int read_quorum_children(QuorumAIOCB *acb)
65
return acb->vote_ret;
66
}
67
68
-static int read_fifo_child(QuorumAIOCB *acb)
69
+static int coroutine_fn read_fifo_child(QuorumAIOCB *acb)
70
{
71
BDRVQuorumState *s = acb->bs->opaque;
72
int n, ret;
73
@@ -XXX,XX +XXX,XX @@ static int read_fifo_child(QuorumAIOCB *acb)
74
return ret;
75
}
76
77
-static int quorum_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
78
- QEMUIOVector *qiov, BdrvRequestFlags flags)
79
+static int coroutine_fn quorum_co_preadv(BlockDriverState *bs,
80
+ int64_t offset, int64_t bytes,
81
+ QEMUIOVector *qiov,
82
+ BdrvRequestFlags flags)
83
{
84
BDRVQuorumState *s = bs->opaque;
85
QuorumAIOCB *acb = quorum_aio_get(bs, qiov, offset, bytes, flags);
86
@@ -XXX,XX +XXX,XX @@ static int quorum_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
87
return ret;
88
}
89
90
-static void write_quorum_entry(void *opaque)
91
+static void coroutine_fn write_quorum_entry(void *opaque)
92
{
93
QuorumCo *co = opaque;
94
QuorumAIOCB *acb = co->acb;
95
@@ -XXX,XX +XXX,XX @@ static void write_quorum_entry(void *opaque)
96
}
97
}
98
99
-static int quorum_co_pwritev(BlockDriverState *bs, int64_t offset,
100
- int64_t bytes, QEMUIOVector *qiov,
101
- BdrvRequestFlags flags)
102
+static int coroutine_fn quorum_co_pwritev(BlockDriverState *bs, int64_t offset,
103
+ int64_t bytes, QEMUIOVector *qiov,
104
+ BdrvRequestFlags flags)
105
{
106
BDRVQuorumState *s = bs->opaque;
107
QuorumAIOCB *acb = quorum_aio_get(bs, qiov, offset, bytes, flags);
108
@@ -XXX,XX +XXX,XX @@ static int quorum_co_pwritev(BlockDriverState *bs, int64_t offset,
109
return ret;
110
}
111
112
-static int quorum_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
113
- int64_t bytes, BdrvRequestFlags flags)
114
+static int coroutine_fn quorum_co_pwrite_zeroes(BlockDriverState *bs,
115
+ int64_t offset, int64_t bytes,
116
+ BdrvRequestFlags flags)
117
118
{
119
return quorum_co_pwritev(bs, offset, bytes, NULL,
120
--
121
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
1
3
Callers of coroutine_fn must be coroutine_fn themselves, or the call
4
must be within "if (qemu_in_coroutine())". Apply coroutine_fn to
5
functions where this holds.
6
7
Reviewed-by: Alberto Faria <afaria@redhat.com>
8
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
9
Message-Id: <20220922084924.201610-20-pbonzini@redhat.com>
10
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
11
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
12
---
13
block/throttle.c | 2 +-
14
1 file changed, 1 insertion(+), 1 deletion(-)
15
16
diff --git a/block/throttle.c b/block/throttle.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/block/throttle.c
19
+++ b/block/throttle.c
20
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn throttle_co_pwritev_compressed(BlockDriverState *bs,
21
BDRV_REQ_WRITE_COMPRESSED);
22
}
23
24
-static int throttle_co_flush(BlockDriverState *bs)
25
+static int coroutine_fn throttle_co_flush(BlockDriverState *bs)
26
{
27
return bdrv_co_flush(bs->file->bs);
28
}
29
--
30
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
1
3
Callers of coroutine_fn must be coroutine_fn themselves, or the call
4
must be within "if (qemu_in_coroutine())". Apply coroutine_fn to
5
functions where this holds.
6
7
Reviewed-by: Alberto Faria <afaria@redhat.com>
8
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
9
Message-Id: <20220922084924.201610-21-pbonzini@redhat.com>
10
[kwolf: Fixed up coding style]
11
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
12
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
13
---
14
block/vmdk.c | 22 ++++++++++++----------
15
1 file changed, 12 insertions(+), 10 deletions(-)
16
17
diff --git a/block/vmdk.c b/block/vmdk.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/block/vmdk.c
20
+++ b/block/vmdk.c
21
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn vmdk_co_block_status(BlockDriverState *bs,
22
return ret;
23
}
24
25
-static int vmdk_write_extent(VmdkExtent *extent, int64_t cluster_offset,
26
- int64_t offset_in_cluster, QEMUIOVector *qiov,
27
- uint64_t qiov_offset, uint64_t n_bytes,
28
- uint64_t offset)
29
+static int coroutine_fn
30
+vmdk_write_extent(VmdkExtent *extent, int64_t cluster_offset,
31
+ int64_t offset_in_cluster, QEMUIOVector *qiov,
32
+ uint64_t qiov_offset, uint64_t n_bytes,
33
+ uint64_t offset)
34
{
35
int ret;
36
VmdkGrainMarker *data = NULL;
37
@@ -XXX,XX +XXX,XX @@ static int vmdk_write_extent(VmdkExtent *extent, int64_t cluster_offset,
38
return ret;
39
}
40
41
-static int vmdk_read_extent(VmdkExtent *extent, int64_t cluster_offset,
42
- int64_t offset_in_cluster, QEMUIOVector *qiov,
43
- int bytes)
44
+static int coroutine_fn
45
+vmdk_read_extent(VmdkExtent *extent, int64_t cluster_offset,
46
+ int64_t offset_in_cluster, QEMUIOVector *qiov,
47
+ int bytes)
48
{
49
int ret;
50
int cluster_bytes, buf_bytes;
51
@@ -XXX,XX +XXX,XX @@ fail:
52
*
53
* Returns: error code with 0 for success.
54
*/
55
-static int vmdk_pwritev(BlockDriverState *bs, uint64_t offset,
56
- uint64_t bytes, QEMUIOVector *qiov,
57
- bool zeroed, bool zero_dry_run)
58
+static int coroutine_fn vmdk_pwritev(BlockDriverState *bs, uint64_t offset,
59
+ uint64_t bytes, QEMUIOVector *qiov,
60
+ bool zeroed, bool zero_dry_run)
61
{
62
BDRVVmdkState *s = bs->opaque;
63
VmdkExtent *extent = NULL;
64
--
65
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
1
3
Callers of coroutine_fn must be coroutine_fn themselves, or the call
4
must be within "if (qemu_in_coroutine())". Apply coroutine_fn to
5
functions where this holds.
6
7
Reviewed-by: Alberto Faria <afaria@redhat.com>
8
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
9
Message-Id: <20220922084924.201610-22-pbonzini@redhat.com>
10
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
11
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
12
---
13
include/qemu/job.h | 2 +-
14
job.c | 2 +-
15
2 files changed, 2 insertions(+), 2 deletions(-)
16
17
diff --git a/include/qemu/job.h b/include/qemu/job.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/include/qemu/job.h
20
+++ b/include/qemu/job.h
21
@@ -XXX,XX +XXX,XX @@ void coroutine_fn job_pause_point(Job *job);
22
*
23
* Yield the job coroutine.
24
*/
25
-void job_yield(Job *job);
26
+void coroutine_fn job_yield(Job *job);
27
28
/**
29
* @job: The job that calls the function.
30
diff --git a/job.c b/job.c
31
index XXXXXXX..XXXXXXX 100644
32
--- a/job.c
33
+++ b/job.c
34
@@ -XXX,XX +XXX,XX @@ void coroutine_fn job_pause_point(Job *job)
35
}
36
}
37
38
-void job_yield(Job *job)
39
+void coroutine_fn job_yield(Job *job)
40
{
41
assert(job->busy);
42
43
--
44
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
1
3
Callers of coroutine_fn must be coroutine_fn themselves, or the call
4
must be within "if (qemu_in_coroutine())". Apply coroutine_fn to
5
functions where this holds.
6
7
Reviewed-by: Alberto Faria <afaria@redhat.com>
8
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
9
Message-Id: <20220922084924.201610-23-pbonzini@redhat.com>
10
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
11
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
12
---
13
util/qemu-coroutine-lock.c | 14 +++++++-------
14
1 file changed, 7 insertions(+), 7 deletions(-)
15
16
diff --git a/util/qemu-coroutine-lock.c b/util/qemu-coroutine-lock.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/util/qemu-coroutine-lock.c
19
+++ b/util/qemu-coroutine-lock.c
20
@@ -XXX,XX +XXX,XX @@ typedef struct CoWaitRecord {
21
QSLIST_ENTRY(CoWaitRecord) next;
22
} CoWaitRecord;
23
24
-static void push_waiter(CoMutex *mutex, CoWaitRecord *w)
25
+static void coroutine_fn push_waiter(CoMutex *mutex, CoWaitRecord *w)
26
{
27
w->co = qemu_coroutine_self();
28
QSLIST_INSERT_HEAD_ATOMIC(&mutex->from_push, w, next);
29
@@ -XXX,XX +XXX,XX @@ void qemu_co_rwlock_init(CoRwlock *lock)
30
}
31
32
/* Releases the internal CoMutex. */
33
-static void qemu_co_rwlock_maybe_wake_one(CoRwlock *lock)
34
+static void coroutine_fn qemu_co_rwlock_maybe_wake_one(CoRwlock *lock)
35
{
36
CoRwTicket *tkt = QSIMPLEQ_FIRST(&lock->tickets);
37
Coroutine *co = NULL;
38
@@ -XXX,XX +XXX,XX @@ static void qemu_co_rwlock_maybe_wake_one(CoRwlock *lock)
39
}
40
}
41
42
-void qemu_co_rwlock_rdlock(CoRwlock *lock)
43
+void coroutine_fn qemu_co_rwlock_rdlock(CoRwlock *lock)
44
{
45
Coroutine *self = qemu_coroutine_self();
46
47
@@ -XXX,XX +XXX,XX @@ void qemu_co_rwlock_rdlock(CoRwlock *lock)
48
self->locks_held++;
49
}
50
51
-void qemu_co_rwlock_unlock(CoRwlock *lock)
52
+void coroutine_fn qemu_co_rwlock_unlock(CoRwlock *lock)
53
{
54
Coroutine *self = qemu_coroutine_self();
55
56
@@ -XXX,XX +XXX,XX @@ void qemu_co_rwlock_unlock(CoRwlock *lock)
57
qemu_co_rwlock_maybe_wake_one(lock);
58
}
59
60
-void qemu_co_rwlock_downgrade(CoRwlock *lock)
61
+void coroutine_fn qemu_co_rwlock_downgrade(CoRwlock *lock)
62
{
63
qemu_co_mutex_lock(&lock->mutex);
64
assert(lock->owners == -1);
65
@@ -XXX,XX +XXX,XX @@ void qemu_co_rwlock_downgrade(CoRwlock *lock)
66
qemu_co_rwlock_maybe_wake_one(lock);
67
}
68
69
-void qemu_co_rwlock_wrlock(CoRwlock *lock)
70
+void coroutine_fn qemu_co_rwlock_wrlock(CoRwlock *lock)
71
{
72
Coroutine *self = qemu_coroutine_self();
73
74
@@ -XXX,XX +XXX,XX @@ void qemu_co_rwlock_wrlock(CoRwlock *lock)
75
self->locks_held++;
76
}
77
78
-void qemu_co_rwlock_upgrade(CoRwlock *lock)
79
+void coroutine_fn qemu_co_rwlock_upgrade(CoRwlock *lock)
80
{
81
qemu_co_mutex_lock(&lock->mutex);
82
assert(lock->owners > 0);
83
--
84
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
1
3
Callers of coroutine_fn must be coroutine_fn themselves, or the call
4
must be within "if (qemu_in_coroutine())". Apply coroutine_fn to
5
functions where this holds.
6
7
Reviewed-by: Alberto Faria <afaria@redhat.com>
8
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
9
Message-Id: <20220922084924.201610-24-pbonzini@redhat.com>
10
[kwolf: Fixed up coding style]
11
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
12
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
13
---
14
block/raw-format.c | 3 ++-
15
1 file changed, 2 insertions(+), 1 deletion(-)
16
17
diff --git a/block/raw-format.c b/block/raw-format.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/block/raw-format.c
20
+++ b/block/raw-format.c
21
@@ -XXX,XX +XXX,XX @@ static void raw_lock_medium(BlockDriverState *bs, bool locked)
22
bdrv_lock_medium(bs->file->bs, locked);
23
}
24
25
-static int raw_co_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
26
+static int coroutine_fn raw_co_ioctl(BlockDriverState *bs,
27
+ unsigned long int req, void *buf)
28
{
29
BDRVRawState *s = bs->opaque;
30
if (s->offset || s->has_size) {
31
--
32
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Marc-André Lureau <marcandre.lureau@redhat.com>
2
1
3
Callers of coroutine_fn must be coroutine_fn themselves, or the call
4
must be within "if (qemu_in_coroutine())". Apply coroutine_fn to
5
functions where this holds.
6
7
Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
8
Acked-by: Greg Kurz <groug@kaod.org>
9
Reviewed-by: Alberto Faria <afaria@redhat.com>
10
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
11
Message-Id: <20220922084924.201610-25-pbonzini@redhat.com>
12
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
13
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
14
---
15
hw/9pfs/9p.h | 9 ++++++---
16
1 file changed, 6 insertions(+), 3 deletions(-)
17
18
diff --git a/hw/9pfs/9p.h b/hw/9pfs/9p.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/hw/9pfs/9p.h
21
+++ b/hw/9pfs/9p.h
22
@@ -XXX,XX +XXX,XX @@ typedef struct V9fsGetlock
23
extern int open_fd_hw;
24
extern int total_open_fd;
25
26
-static inline void v9fs_path_write_lock(V9fsState *s)
27
+static inline void coroutine_fn
28
+v9fs_path_write_lock(V9fsState *s)
29
{
30
if (s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT) {
31
qemu_co_rwlock_wrlock(&s->rename_lock);
32
}
33
}
34
35
-static inline void v9fs_path_read_lock(V9fsState *s)
36
+static inline void coroutine_fn
37
+v9fs_path_read_lock(V9fsState *s)
38
{
39
if (s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT) {
40
qemu_co_rwlock_rdlock(&s->rename_lock);
41
}
42
}
43
44
-static inline void v9fs_path_unlock(V9fsState *s)
45
+static inline void coroutine_fn
46
+v9fs_path_unlock(V9fsState *s)
47
{
48
if (s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT) {
49
qemu_co_rwlock_unlock(&s->rename_lock);
50
--
51
2.37.3
52
53
diff view generated by jsdifflib
Deleted patch
1
From: Marc-André Lureau <marcandre.lureau@redhat.com>
2
1
3
Callers of coroutine_fn must be coroutine_fn themselves, or the call
4
must be within "if (qemu_in_coroutine())". Apply coroutine_fn to
5
functions where this holds.
6
7
Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
8
Reviewed-by: Juan Quintela <quintela@redhat.com>
9
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
10
Reviewed-by: Alberto Faria <afaria@redhat.com>
11
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
12
Message-Id: <20220922084924.201610-26-pbonzini@redhat.com>
13
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
14
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
15
---
16
migration/migration.c | 3 ++-
17
1 file changed, 2 insertions(+), 1 deletion(-)
18
19
diff --git a/migration/migration.c b/migration/migration.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/migration/migration.c
22
+++ b/migration/migration.c
23
@@ -XXX,XX +XXX,XX @@ static void process_incoming_migration_bh(void *opaque)
24
migration_incoming_state_destroy();
25
}
26
27
-static void process_incoming_migration_co(void *opaque)
28
+static void coroutine_fn
29
+process_incoming_migration_co(void *opaque)
30
{
31
MigrationIncomingState *mis = migration_incoming_get_current();
32
PostcopyState ps;
33
--
34
2.37.3
35
36
diff view generated by jsdifflib
Deleted patch
1
From: Marc-André Lureau <marcandre.lureau@redhat.com>
2
1
3
Callers of coroutine_fn must be coroutine_fn themselves, or the call
4
must be within "if (qemu_in_coroutine())". Apply coroutine_fn to
5
functions where this holds.
6
7
Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
8
Reviewed-by: Alberto Faria <afaria@redhat.com>
9
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
10
Message-Id: <20220922084924.201610-27-pbonzini@redhat.com>
11
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
12
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
13
---
14
tests/unit/test-coroutine.c | 2 +-
15
1 file changed, 1 insertion(+), 1 deletion(-)
16
17
diff --git a/tests/unit/test-coroutine.c b/tests/unit/test-coroutine.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/tests/unit/test-coroutine.c
20
+++ b/tests/unit/test-coroutine.c
21
@@ -XXX,XX +XXX,XX @@ static void perf_baseline(void)
22
g_test_message("Function call %u iterations: %f s", maxcycles, duration);
23
}
24
25
-static __attribute__((noinline)) void perf_cost_func(void *opaque)
26
+static __attribute__((noinline)) void coroutine_fn perf_cost_func(void *opaque)
27
{
28
qemu_coroutine_yield();
29
}
30
--
31
2.37.3
32
33
diff view generated by jsdifflib
Deleted patch
1
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2
Message-Id: <20221006122607.162769-1-kwolf@redhat.com>
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
6
---
7
block/quorum.c | 2 --
8
1 file changed, 2 deletions(-)
9
1
10
diff --git a/block/quorum.c b/block/quorum.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/block/quorum.c
13
+++ b/block/quorum.c
14
@@ -XXX,XX +XXX,XX @@ static bool quorum_has_too_much_io_failed(QuorumAIOCB *acb)
15
return false;
16
}
17
18
-static int read_fifo_child(QuorumAIOCB *acb);
19
-
20
static void quorum_copy_qiov(QEMUIOVector *dest, QEMUIOVector *source)
21
{
22
int i;
23
--
24
2.37.3
25
26
diff view generated by jsdifflib
Deleted patch
1
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
2
1
3
job mutex will be used to protect the job struct elements and list,
4
replacing AioContext locks.
5
6
Right now use a shared lock for all jobs, in order to keep things
7
simple. Once the AioContext lock is gone, we can introduce per-job
8
locks.
9
10
To simplify the switch from aiocontext to job lock, introduce
11
*nop* lock/unlock functions and macros.
12
We want to always call job_lock/unlock outside the AioContext locks,
13
and not vice-versa, otherwise we might get a deadlock. This is not
14
straightforward to do, and that's why we start with nop functions.
15
Once everything is protected by job_lock/unlock, we can change the nop into
16
an actual mutex and remove the aiocontext lock.
17
18
Since job_mutex is already being used, add static
19
real_job_{lock/unlock} for the existing usage.
20
21
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
22
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
23
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
24
Message-Id: <20220926093214.506243-2-eesposit@redhat.com>
25
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
26
---
27
include/qemu/job.h | 24 ++++++++++++++++++++++++
28
job.c | 35 +++++++++++++++++++++++------------
29
2 files changed, 47 insertions(+), 12 deletions(-)
30
31
diff --git a/include/qemu/job.h b/include/qemu/job.h
32
index XXXXXXX..XXXXXXX 100644
33
--- a/include/qemu/job.h
34
+++ b/include/qemu/job.h
35
@@ -XXX,XX +XXX,XX @@ typedef enum JobCreateFlags {
36
JOB_MANUAL_DISMISS = 0x04,
37
} JobCreateFlags;
38
39
+extern QemuMutex job_mutex;
40
+
41
+#define JOB_LOCK_GUARD() /* QEMU_LOCK_GUARD(&job_mutex) */
42
+
43
+#define WITH_JOB_LOCK_GUARD() /* WITH_QEMU_LOCK_GUARD(&job_mutex) */
44
+
45
+/**
46
+ * job_lock:
47
+ *
48
+ * Take the mutex protecting the list of jobs and their status.
49
+ * Most functions called by the monitor need to call job_lock
50
+ * and job_unlock manually. On the other hand, function called
51
+ * by the block jobs themselves and by the block layer will take the
52
+ * lock for you.
53
+ */
54
+void job_lock(void);
55
+
56
+/**
57
+ * job_unlock:
58
+ *
59
+ * Release the mutex protecting the list of jobs and their status.
60
+ */
61
+void job_unlock(void);
62
+
63
/**
64
* Allocate and return a new job transaction. Jobs can be added to the
65
* transaction using job_txn_add_job().
66
diff --git a/job.c b/job.c
67
index XXXXXXX..XXXXXXX 100644
68
--- a/job.c
69
+++ b/job.c
70
@@ -XXX,XX +XXX,XX @@
71
#include "trace/trace-root.h"
72
#include "qapi/qapi-events-job.h"
73
74
+/*
75
+ * job_mutex protects the jobs list, but also makes the
76
+ * struct job fields thread-safe.
77
+ */
78
+QemuMutex job_mutex;
79
+
80
static QLIST_HEAD(, Job) jobs = QLIST_HEAD_INITIALIZER(jobs);
81
82
/* Job State Transition Table */
83
@@ -XXX,XX +XXX,XX @@ struct JobTxn {
84
int refcnt;
85
};
86
87
-/* Right now, this mutex is only needed to synchronize accesses to job->busy
88
- * and job->sleep_timer, such as concurrent calls to job_do_yield and
89
- * job_enter. */
90
-static QemuMutex job_mutex;
91
+void job_lock(void)
92
+{
93
+ /* nop */
94
+}
95
+
96
+void job_unlock(void)
97
+{
98
+ /* nop */
99
+}
100
101
-static void job_lock(void)
102
+static void real_job_lock(void)
103
{
104
qemu_mutex_lock(&job_mutex);
105
}
106
107
-static void job_unlock(void)
108
+static void real_job_unlock(void)
109
{
110
qemu_mutex_unlock(&job_mutex);
111
}
112
@@ -XXX,XX +XXX,XX @@ void job_enter_cond(Job *job, bool(*fn)(Job *job))
113
return;
114
}
115
116
- job_lock();
117
+ real_job_lock();
118
if (job->busy) {
119
- job_unlock();
120
+ real_job_unlock();
121
return;
122
}
123
124
if (fn && !fn(job)) {
125
- job_unlock();
126
+ real_job_unlock();
127
return;
128
}
129
130
assert(!job->deferred_to_main_loop);
131
timer_del(&job->sleep_timer);
132
job->busy = true;
133
- job_unlock();
134
+ real_job_unlock();
135
aio_co_enter(job->aio_context, job->co);
136
}
137
138
@@ -XXX,XX +XXX,XX @@ void job_enter(Job *job)
139
* called explicitly. */
140
static void coroutine_fn job_do_yield(Job *job, uint64_t ns)
141
{
142
- job_lock();
143
+ real_job_lock();
144
if (ns != -1) {
145
timer_mod(&job->sleep_timer, ns);
146
}
147
job->busy = false;
148
job_event_idle(job);
149
- job_unlock();
150
+ real_job_unlock();
151
qemu_coroutine_yield();
152
153
/* Set by job_enter_cond() before re-entering the coroutine. */
154
--
155
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
2
1
3
Categorize the fields in struct Job to understand which ones
4
need to be protected by the job mutex and which don't.
5
6
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
7
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
8
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
9
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
10
Message-Id: <20220926093214.506243-3-eesposit@redhat.com>
11
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
12
---
13
include/qemu/job.h | 61 +++++++++++++++++++++++++++-------------------
14
1 file changed, 36 insertions(+), 25 deletions(-)
15
16
diff --git a/include/qemu/job.h b/include/qemu/job.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/include/qemu/job.h
19
+++ b/include/qemu/job.h
20
@@ -XXX,XX +XXX,XX @@ typedef struct JobTxn JobTxn;
21
* Long-running operation.
22
*/
23
typedef struct Job {
24
+
25
+ /* Fields set at initialization (job_create), and never modified */
26
+
27
/** The ID of the job. May be NULL for internal jobs. */
28
char *id;
29
30
- /** The type of this job. */
31
+ /**
32
+ * The type of this job.
33
+ * All callbacks are called with job_mutex *not* held.
34
+ */
35
const JobDriver *driver;
36
37
- /** Reference count of the block job */
38
- int refcnt;
39
-
40
- /** Current state; See @JobStatus for details. */
41
- JobStatus status;
42
-
43
- /** AioContext to run the job coroutine in */
44
- AioContext *aio_context;
45
-
46
/**
47
* The coroutine that executes the job. If not NULL, it is reentered when
48
* busy is false and the job is cancelled.
49
+ * Initialized in job_start()
50
*/
51
Coroutine *co;
52
53
+ /** True if this job should automatically finalize itself */
54
+ bool auto_finalize;
55
+
56
+ /** True if this job should automatically dismiss itself */
57
+ bool auto_dismiss;
58
+
59
+ /** The completion function that will be called when the job completes. */
60
+ BlockCompletionFunc *cb;
61
+
62
+ /** The opaque value that is passed to the completion function. */
63
+ void *opaque;
64
+
65
+ /* ProgressMeter API is thread-safe */
66
+ ProgressMeter progress;
67
+
68
+
69
+ /** Protected by AioContext lock */
70
+
71
+ /** AioContext to run the job coroutine in */
72
+ AioContext *aio_context;
73
+
74
+ /** Reference count of the block job */
75
+ int refcnt;
76
+
77
+ /** Current state; See @JobStatus for details. */
78
+ JobStatus status;
79
+
80
/**
81
* Timer that is used by @job_sleep_ns. Accessed under job_mutex (in
82
* job.c).
83
@@ -XXX,XX +XXX,XX @@ typedef struct Job {
84
/** Set to true when the job has deferred work to the main loop. */
85
bool deferred_to_main_loop;
86
87
- /** True if this job should automatically finalize itself */
88
- bool auto_finalize;
89
-
90
- /** True if this job should automatically dismiss itself */
91
- bool auto_dismiss;
92
-
93
- ProgressMeter progress;
94
-
95
/**
96
* Return code from @run and/or @prepare callback(s).
97
* Not final until the job has reached the CONCLUDED status.
98
@@ -XXX,XX +XXX,XX @@ typedef struct Job {
99
*/
100
Error *err;
101
102
- /** The completion function that will be called when the job completes. */
103
- BlockCompletionFunc *cb;
104
-
105
- /** The opaque value that is passed to the completion function. */
106
- void *opaque;
107
-
108
/** Notifiers called when a cancelled job is finalised */
109
NotifierList on_finalize_cancelled;
110
111
@@ -XXX,XX +XXX,XX @@ typedef struct Job {
112
113
/**
114
* Callbacks and other information about a Job driver.
115
+ * All callbacks are invoked with job_mutex *not* held.
116
*/
117
struct JobDriver {
118
119
@@ -XXX,XX +XXX,XX @@ void coroutine_fn job_yield(Job *job);
120
*/
121
void coroutine_fn job_sleep_ns(Job *job, int64_t ns);
122
123
-
124
/** Returns the JobType of a given Job. */
125
JobType job_type(const Job *job);
126
127
--
128
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
2
1
3
job_event_* functions can all be static, as they are not used
4
outside job.c.
5
6
Same applies for job_txn_add_job().
7
8
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
9
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
10
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
11
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
12
Message-Id: <20220926093214.506243-4-eesposit@redhat.com>
13
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
14
---
15
include/qemu/job.h | 18 ------------------
16
job.c | 22 +++++++++++++++++++---
17
2 files changed, 19 insertions(+), 21 deletions(-)
18
19
diff --git a/include/qemu/job.h b/include/qemu/job.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/include/qemu/job.h
22
+++ b/include/qemu/job.h
23
@@ -XXX,XX +XXX,XX @@ JobTxn *job_txn_new(void);
24
*/
25
void job_txn_unref(JobTxn *txn);
26
27
-/**
28
- * @txn: The transaction (may be NULL)
29
- * @job: Job to add to the transaction
30
- *
31
- * Add @job to the transaction. The @job must not already be in a transaction.
32
- * The caller must call either job_txn_unref() or job_completed() to release
33
- * the reference that is automatically grabbed here.
34
- *
35
- * If @txn is NULL, the function does nothing.
36
- */
37
-void job_txn_add_job(JobTxn *txn, Job *job);
38
-
39
/**
40
* Create a new long-running job and return it.
41
*
42
@@ -XXX,XX +XXX,XX @@ void job_progress_set_remaining(Job *job, uint64_t remaining);
43
*/
44
void job_progress_increase_remaining(Job *job, uint64_t delta);
45
46
-/** To be called when a cancelled job is finalised. */
47
-void job_event_cancelled(Job *job);
48
-
49
-/** To be called when a successfully completed job is finalised. */
50
-void job_event_completed(Job *job);
51
-
52
/**
53
* Conditionally enter the job coroutine if the job is ready to run, not
54
* already busy and fn() returns true. fn() is called while under the job_lock
55
diff --git a/job.c b/job.c
56
index XXXXXXX..XXXXXXX 100644
57
--- a/job.c
58
+++ b/job.c
59
@@ -XXX,XX +XXX,XX @@ void job_txn_unref(JobTxn *txn)
60
}
61
}
62
63
-void job_txn_add_job(JobTxn *txn, Job *job)
64
+/**
65
+ * @txn: The transaction (may be NULL)
66
+ * @job: Job to add to the transaction
67
+ *
68
+ * Add @job to the transaction. The @job must not already be in a transaction.
69
+ * The caller must call either job_txn_unref() or job_completed() to release
70
+ * the reference that is automatically grabbed here.
71
+ *
72
+ * If @txn is NULL, the function does nothing.
73
+ */
74
+static void job_txn_add_job(JobTxn *txn, Job *job)
75
{
76
if (!txn) {
77
return;
78
@@ -XXX,XX +XXX,XX @@ void job_progress_increase_remaining(Job *job, uint64_t delta)
79
progress_increase_remaining(&job->progress, delta);
80
}
81
82
-void job_event_cancelled(Job *job)
83
+/**
84
+ * To be called when a cancelled job is finalised.
85
+ */
86
+static void job_event_cancelled(Job *job)
87
{
88
notifier_list_notify(&job->on_finalize_cancelled, job);
89
}
90
91
-void job_event_completed(Job *job)
92
+/**
93
+ * To be called when a successfully completed job is finalised.
94
+ */
95
+static void job_event_completed(Job *job)
96
{
97
notifier_list_notify(&job->on_finalize_completed, job);
98
}
99
--
100
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
2
1
3
Same as AIO_WAIT_WHILE macro, but if we are in the Main loop
4
do not release and then acquire ctx_ 's aiocontext.
5
6
Once all Aiocontext locks go away, this macro will replace
7
AIO_WAIT_WHILE.
8
9
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
10
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
11
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
12
Message-Id: <20220926093214.506243-5-eesposit@redhat.com>
13
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
14
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
15
---
16
include/block/aio-wait.h | 17 +++++++++++++----
17
1 file changed, 13 insertions(+), 4 deletions(-)
18
19
diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/include/block/aio-wait.h
22
+++ b/include/block/aio-wait.h
23
@@ -XXX,XX +XXX,XX @@ typedef struct {
24
extern AioWait global_aio_wait;
25
26
/**
27
- * AIO_WAIT_WHILE:
28
+ * AIO_WAIT_WHILE_INTERNAL:
29
* @ctx: the aio context, or NULL if multiple aio contexts (for which the
30
* caller does not hold a lock) are involved in the polling condition.
31
* @cond: wait while this conditional expression is true
32
+ * @unlock: whether to unlock and then lock again @ctx. This apples
33
+ * only when waiting for another AioContext from the main loop.
34
+ * Otherwise it's ignored.
35
*
36
* Wait while a condition is true. Use this to implement synchronous
37
* operations that require event loop activity.
38
@@ -XXX,XX +XXX,XX @@ extern AioWait global_aio_wait;
39
* wait on conditions between two IOThreads since that could lead to deadlock,
40
* go via the main loop instead.
41
*/
42
-#define AIO_WAIT_WHILE(ctx, cond) ({ \
43
+#define AIO_WAIT_WHILE_INTERNAL(ctx, cond, unlock) ({ \
44
bool waited_ = false; \
45
AioWait *wait_ = &global_aio_wait; \
46
AioContext *ctx_ = (ctx); \
47
@@ -XXX,XX +XXX,XX @@ extern AioWait global_aio_wait;
48
assert(qemu_get_current_aio_context() == \
49
qemu_get_aio_context()); \
50
while ((cond)) { \
51
- if (ctx_) { \
52
+ if (unlock && ctx_) { \
53
aio_context_release(ctx_); \
54
} \
55
aio_poll(qemu_get_aio_context(), true); \
56
- if (ctx_) { \
57
+ if (unlock && ctx_) { \
58
aio_context_acquire(ctx_); \
59
} \
60
waited_ = true; \
61
@@ -XXX,XX +XXX,XX @@ extern AioWait global_aio_wait;
62
qatomic_dec(&wait_->num_waiters); \
63
waited_; })
64
65
+#define AIO_WAIT_WHILE(ctx, cond) \
66
+ AIO_WAIT_WHILE_INTERNAL(ctx, cond, true)
67
+
68
+#define AIO_WAIT_WHILE_UNLOCKED(ctx, cond) \
69
+ AIO_WAIT_WHILE_INTERNAL(ctx, cond, false)
70
+
71
/**
72
* aio_wait_kick:
73
* Wake up the main thread if it is waiting on AIO_WAIT_WHILE(). During
74
--
75
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
2
1
3
With "intact" we mean that all job.h functions implicitly
4
take the lock. Therefore API callers are unmodified.
5
6
This means that:
7
- many static functions that will be always called with job lock held
8
become _locked, and call _locked functions
9
- all public functions take the lock internally if needed, and call _locked
10
functions
11
- all public functions called internally by other functions in job.c will have a
12
_locked counterpart (sometimes public), to avoid deadlocks (job lock already taken).
13
These functions are not used for now.
14
- some public functions called only from exernal files (not job.c) do not
15
have _locked() counterpart and take the lock inside. Others won't need
16
the lock at all because use fields only set at initialization and
17
never modified.
18
19
job_{lock/unlock} is independent from real_job_{lock/unlock}.
20
21
Note: at this stage, job_{lock/unlock} and job lock guard macros
22
are *nop*
23
24
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
25
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
26
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
27
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
28
Message-Id: <20220926093214.506243-6-eesposit@redhat.com>
29
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
30
---
31
include/qemu/job.h | 138 +++++++++-
32
job.c | 610 ++++++++++++++++++++++++++++++++-------------
33
2 files changed, 561 insertions(+), 187 deletions(-)
34
35
diff --git a/include/qemu/job.h b/include/qemu/job.h
36
index XXXXXXX..XXXXXXX 100644
37
--- a/include/qemu/job.h
38
+++ b/include/qemu/job.h
39
@@ -XXX,XX +XXX,XX @@ JobTxn *job_txn_new(void);
40
*/
41
void job_txn_unref(JobTxn *txn);
42
43
+/*
44
+ * Same as job_txn_unref(), but called with job lock held.
45
+ * Might release the lock temporarily.
46
+ */
47
+void job_txn_unref_locked(JobTxn *txn);
48
+
49
/**
50
* Create a new long-running job and return it.
51
+ * Called with job_mutex *not* held.
52
*
53
* @job_id: The id of the newly-created job, or %NULL for internal jobs
54
* @driver: The class object for the newly-created job.
55
@@ -XXX,XX +XXX,XX @@ void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn,
56
*/
57
void job_ref(Job *job);
58
59
+/* Same as job_ref(), but called with job lock held. */
60
+void job_ref_locked(Job *job);
61
+
62
/**
63
* Release a reference that was previously acquired with job_ref() or
64
* job_create(). If it's the last reference to the object, it will be freed.
65
*/
66
void job_unref(Job *job);
67
68
+/* Same as job_unref(), but called with job lock held. */
69
+void job_unref_locked(Job *job);
70
+
71
/**
72
* @job: The job that has made progress
73
* @done: How much progress the job made since the last call
74
*
75
* Updates the progress counter of the job.
76
+ *
77
+ * May be called with mutex held or not held.
78
*/
79
void job_progress_update(Job *job, uint64_t done);
80
81
@@ -XXX,XX +XXX,XX @@ void job_progress_update(Job *job, uint64_t done);
82
*
83
* Sets the expected end value of the progress counter of a job so that a
84
* completion percentage can be calculated when the progress is updated.
85
+ *
86
+ * May be called with mutex held or not held.
87
*/
88
void job_progress_set_remaining(Job *job, uint64_t remaining);
89
90
@@ -XXX,XX +XXX,XX @@ void job_progress_set_remaining(Job *job, uint64_t remaining);
91
* length before, and job_progress_update() afterwards.
92
* (So the operation acts as a parenthesis in regards to the main job
93
* operation running in background.)
94
+ *
95
+ * May be called with mutex held or not held.
96
*/
97
void job_progress_increase_remaining(Job *job, uint64_t delta);
98
99
@@ -XXX,XX +XXX,XX @@ void job_progress_increase_remaining(Job *job, uint64_t delta);
100
*/
101
void job_enter_cond(Job *job, bool(*fn)(Job *job));
102
103
+/*
104
+ * Same as job_enter_cond(), but called with job lock held.
105
+ * Might release the lock temporarily.
106
+ */
107
+void job_enter_cond_locked(Job *job, bool(*fn)(Job *job));
108
+
109
/**
110
* @job: A job that has not yet been started.
111
*
112
* Begins execution of a job.
113
* Takes ownership of one reference to the job object.
114
+ *
115
+ * Called with job_mutex *not* held.
116
*/
117
void job_start(Job *job);
118
119
@@ -XXX,XX +XXX,XX @@ void job_start(Job *job);
120
* @job: The job to enter.
121
*
122
* Continue the specified job by entering the coroutine.
123
+ * Called with job_mutex *not* held.
124
*/
125
void job_enter(Job *job);
126
127
@@ -XXX,XX +XXX,XX @@ void job_enter(Job *job);
128
*
129
* Pause now if job_pause() has been called. Jobs that perform lots of I/O
130
* must call this between requests so that the job can be paused.
131
+ *
132
+ * Called with job_mutex *not* held.
133
*/
134
void coroutine_fn job_pause_point(Job *job);
135
136
@@ -XXX,XX +XXX,XX @@ void coroutine_fn job_pause_point(Job *job);
137
* @job: The job that calls the function.
138
*
139
* Yield the job coroutine.
140
+ * Called with job_mutex *not* held.
141
*/
142
void coroutine_fn job_yield(Job *job);
143
144
@@ -XXX,XX +XXX,XX @@ void coroutine_fn job_yield(Job *job);
145
* Put the job to sleep (assuming that it wasn't canceled) for @ns
146
* %QEMU_CLOCK_REALTIME nanoseconds. Canceling the job will immediately
147
* interrupt the wait.
148
+ *
149
+ * Called with job_mutex *not* held.
150
*/
151
void coroutine_fn job_sleep_ns(Job *job, int64_t ns);
152
153
@@ -XXX,XX +XXX,XX @@ const char *job_type_str(const Job *job);
154
/** Returns true if the job should not be visible to the management layer. */
155
bool job_is_internal(Job *job);
156
157
-/** Returns whether the job is being cancelled. */
158
+/**
159
+ * Returns whether the job is being cancelled.
160
+ * Called with job_mutex *not* held.
161
+ */
162
bool job_is_cancelled(Job *job);
163
164
+/* Same as job_is_cancelled(), but called with job lock held. */
165
+bool job_is_cancelled_locked(Job *job);
166
+
167
/**
168
* Returns whether the job is scheduled for cancellation (at an
169
* indefinite point).
170
+ * Called with job_mutex *not* held.
171
*/
172
bool job_cancel_requested(Job *job);
173
174
-/** Returns whether the job is in a completed state. */
175
+/**
176
+ * Returns whether the job is in a completed state.
177
+ * Called with job_mutex *not* held.
178
+ */
179
bool job_is_completed(Job *job);
180
181
-/** Returns whether the job is ready to be completed. */
182
+/* Same as job_is_completed(), but called with job lock held. */
183
+bool job_is_completed_locked(Job *job);
184
+
185
+/**
186
+ * Returns whether the job is ready to be completed.
187
+ * Called with job_mutex *not* held.
188
+ */
189
bool job_is_ready(Job *job);
190
191
+/* Same as job_is_ready(), but called with job lock held. */
192
+bool job_is_ready_locked(Job *job);
193
+
194
/**
195
* Request @job to pause at the next pause point. Must be paired with
196
* job_resume(). If the job is supposed to be resumed by user action, call
197
@@ -XXX,XX +XXX,XX @@ bool job_is_ready(Job *job);
198
*/
199
void job_pause(Job *job);
200
201
+/* Same as job_pause(), but called with job lock held. */
202
+void job_pause_locked(Job *job);
203
+
204
/** Resumes a @job paused with job_pause. */
205
void job_resume(Job *job);
206
207
+/*
208
+ * Same as job_resume(), but called with job lock held.
209
+ * Might release the lock temporarily.
210
+ */
211
+void job_resume_locked(Job *job);
212
+
213
/**
214
* Asynchronously pause the specified @job.
215
* Do not allow a resume until a matching call to job_user_resume.
216
*/
217
void job_user_pause(Job *job, Error **errp);
218
219
+/* Same as job_user_pause(), but called with job lock held. */
220
+void job_user_pause_locked(Job *job, Error **errp);
221
+
222
/** Returns true if the job is user-paused. */
223
bool job_user_paused(Job *job);
224
225
+/* Same as job_user_paused(), but called with job lock held. */
226
+bool job_user_paused_locked(Job *job);
227
+
228
/**
229
* Resume the specified @job.
230
* Must be paired with a preceding job_user_pause.
231
*/
232
void job_user_resume(Job *job, Error **errp);
233
234
+/*
235
+ * Same as job_user_resume(), but called with job lock held.
236
+ * Might release the lock temporarily.
237
+ */
238
+void job_user_resume_locked(Job *job, Error **errp);
239
+
240
/**
241
* Get the next element from the list of block jobs after @job, or the
242
* first one if @job is %NULL.
243
@@ -XXX,XX +XXX,XX @@ void job_user_resume(Job *job, Error **errp);
244
*/
245
Job *job_next(Job *job);
246
247
+/* Same as job_next(), but called with job lock held. */
248
+Job *job_next_locked(Job *job);
249
+
250
/**
251
* Get the job identified by @id (which must not be %NULL).
252
*
253
@@ -XXX,XX +XXX,XX @@ Job *job_next(Job *job);
254
*/
255
Job *job_get(const char *id);
256
257
+/* Same as job_get(), but called with job lock held. */
258
+Job *job_get_locked(const char *id);
259
+
260
/**
261
* Check whether the verb @verb can be applied to @job in its current state.
262
* Returns 0 if the verb can be applied; otherwise errp is set and -EPERM
263
@@ -XXX,XX +XXX,XX @@ Job *job_get(const char *id);
264
*/
265
int job_apply_verb(Job *job, JobVerb verb, Error **errp);
266
267
-/** The @job could not be started, free it. */
268
+/* Same as job_apply_verb, but called with job lock held. */
269
+int job_apply_verb_locked(Job *job, JobVerb verb, Error **errp);
270
+
271
+/**
272
+ * The @job could not be started, free it.
273
+ * Called with job_mutex *not* held.
274
+ */
275
void job_early_fail(Job *job);
276
277
-/** Moves the @job from RUNNING to READY */
278
+/**
279
+ * Moves the @job from RUNNING to READY.
280
+ * Called with job_mutex *not* held.
281
+ */
282
void job_transition_to_ready(Job *job);
283
284
/** Asynchronously complete the specified @job. */
285
void job_complete(Job *job, Error **errp);
286
287
+/*
288
+ * Same as job_complete(), but called with job lock held.
289
+ * Might release the lock temporarily.
290
+ */
291
+void job_complete_locked(Job *job, Error **errp);
292
+
293
/**
294
* Asynchronously cancel the specified @job. If @force is true, the job should
295
* be cancelled immediately without waiting for a consistent state.
296
*/
297
void job_cancel(Job *job, bool force);
298
299
+/* Same as job_cancel(), but called with job lock held. */
300
+void job_cancel_locked(Job *job, bool force);
301
+
302
/**
303
* Cancels the specified job like job_cancel(), but may refuse to do so if the
304
* operation isn't meaningful in the current state of the job.
305
*/
306
void job_user_cancel(Job *job, bool force, Error **errp);
307
308
+/* Same as job_user_cancel(), but called with job lock held. */
309
+void job_user_cancel_locked(Job *job, bool force, Error **errp);
310
+
311
/**
312
* Synchronously cancel the @job. The completion callback is called
313
* before the function returns. If @force is false, the job may
314
@@ -XXX,XX +XXX,XX @@ void job_user_cancel(Job *job, bool force, Error **errp);
315
*/
316
int job_cancel_sync(Job *job, bool force);
317
318
-/** Synchronously force-cancels all jobs using job_cancel_sync(). */
319
+/* Same as job_cancel_sync, but called with job lock held. */
320
+int job_cancel_sync_locked(Job *job, bool force);
321
+
322
+/**
323
+ * Synchronously force-cancels all jobs using job_cancel_sync_locked().
324
+ *
325
+ * Called with job_lock *not* held.
326
+ */
327
void job_cancel_sync_all(void);
328
329
/**
330
@@ -XXX,XX +XXX,XX @@ void job_cancel_sync_all(void);
331
*/
332
int job_complete_sync(Job *job, Error **errp);
333
334
+/* Same as job_complete_sync, but called with job lock held. */
335
+int job_complete_sync_locked(Job *job, Error **errp);
336
+
337
/**
338
* For a @job that has finished its work and is pending awaiting explicit
339
* acknowledgement to commit its work, this will commit that work.
340
@@ -XXX,XX +XXX,XX @@ int job_complete_sync(Job *job, Error **errp);
341
*/
342
void job_finalize(Job *job, Error **errp);
343
344
+/* Same as job_finalize(), but called with job lock held. */
345
+void job_finalize_locked(Job *job, Error **errp);
346
+
347
/**
348
* Remove the concluded @job from the query list and resets the passed pointer
349
* to %NULL. Returns an error if the job is not actually concluded.
350
*/
351
void job_dismiss(Job **job, Error **errp);
352
353
+/* Same as job_dismiss(), but called with job lock held. */
354
+void job_dismiss_locked(Job **job, Error **errp);
355
+
356
/**
357
* Synchronously finishes the given @job. If @finish is given, it is called to
358
* trigger completion or cancellation of the job.
359
@@ -XXX,XX +XXX,XX @@ void job_dismiss(Job **job, Error **errp);
360
*
361
* Callers must hold the AioContext lock of job->aio_context.
362
*/
363
-int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp);
364
+int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp),
365
+ Error **errp);
366
+
367
+/*
368
+ * Same as job_finish_sync(), but called with job lock held.
369
+ * Might release the lock temporarily.
370
+ */
371
+int job_finish_sync_locked(Job *job, void (*finish)(Job *, Error **errp),
372
+ Error **errp);
373
374
#endif
375
diff --git a/job.c b/job.c
376
index XXXXXXX..XXXXXXX 100644
377
--- a/job.c
378
+++ b/job.c
379
@@ -XXX,XX +XXX,XX @@
380
*/
381
QemuMutex job_mutex;
382
383
+/* Protected by job_mutex */
384
static QLIST_HEAD(, Job) jobs = QLIST_HEAD_INITIALIZER(jobs);
385
386
/* Job State Transition Table */
387
@@ -XXX,XX +XXX,XX @@ JobTxn *job_txn_new(void)
388
return txn;
389
}
390
391
-static void job_txn_ref(JobTxn *txn)
392
+/* Called with job_mutex held. */
393
+static void job_txn_ref_locked(JobTxn *txn)
394
{
395
txn->refcnt++;
396
}
397
398
-void job_txn_unref(JobTxn *txn)
399
+void job_txn_unref_locked(JobTxn *txn)
400
{
401
if (txn && --txn->refcnt == 0) {
402
g_free(txn);
403
}
404
}
405
406
+void job_txn_unref(JobTxn *txn)
407
+{
408
+ JOB_LOCK_GUARD();
409
+ job_txn_unref_locked(txn);
410
+}
411
+
412
/**
413
* @txn: The transaction (may be NULL)
414
* @job: Job to add to the transaction
415
@@ -XXX,XX +XXX,XX @@ void job_txn_unref(JobTxn *txn)
416
* the reference that is automatically grabbed here.
417
*
418
* If @txn is NULL, the function does nothing.
419
+ *
420
+ * Called with job_mutex held.
421
*/
422
-static void job_txn_add_job(JobTxn *txn, Job *job)
423
+static void job_txn_add_job_locked(JobTxn *txn, Job *job)
424
{
425
if (!txn) {
426
return;
427
@@ -XXX,XX +XXX,XX @@ static void job_txn_add_job(JobTxn *txn, Job *job)
428
job->txn = txn;
429
430
QLIST_INSERT_HEAD(&txn->jobs, job, txn_list);
431
- job_txn_ref(txn);
432
+ job_txn_ref_locked(txn);
433
}
434
435
-static void job_txn_del_job(Job *job)
436
+/* Called with job_mutex held. */
437
+static void job_txn_del_job_locked(Job *job)
438
{
439
if (job->txn) {
440
QLIST_REMOVE(job, txn_list);
441
- job_txn_unref(job->txn);
442
+ job_txn_unref_locked(job->txn);
443
job->txn = NULL;
444
}
445
}
446
447
-static int job_txn_apply(Job *job, int fn(Job *))
448
+/* Called with job_mutex held, but releases it temporarily. */
449
+static int job_txn_apply_locked(Job *job, int fn(Job *))
450
{
451
AioContext *inner_ctx;
452
Job *other_job, *next;
453
@@ -XXX,XX +XXX,XX @@ static int job_txn_apply(Job *job, int fn(Job *))
454
* we need to release it here to avoid holding the lock twice - which would
455
* break AIO_WAIT_WHILE from within fn.
456
*/
457
- job_ref(job);
458
+ job_ref_locked(job);
459
aio_context_release(job->aio_context);
460
461
QLIST_FOREACH_SAFE(other_job, &txn->jobs, txn_list, next) {
462
@@ -XXX,XX +XXX,XX @@ static int job_txn_apply(Job *job, int fn(Job *))
463
* can't use a local variable to cache it.
464
*/
465
aio_context_acquire(job->aio_context);
466
- job_unref(job);
467
+ job_unref_locked(job);
468
return rc;
469
}
470
471
@@ -XXX,XX +XXX,XX @@ bool job_is_internal(Job *job)
472
return (job->id == NULL);
473
}
474
475
-static void job_state_transition(Job *job, JobStatus s1)
476
+/* Called with job_mutex held. */
477
+static void job_state_transition_locked(Job *job, JobStatus s1)
478
{
479
JobStatus s0 = job->status;
480
assert(s1 >= 0 && s1 < JOB_STATUS__MAX);
481
@@ -XXX,XX +XXX,XX @@ static void job_state_transition(Job *job, JobStatus s1)
482
}
483
}
484
485
-int job_apply_verb(Job *job, JobVerb verb, Error **errp)
486
+int job_apply_verb_locked(Job *job, JobVerb verb, Error **errp)
487
{
488
JobStatus s0 = job->status;
489
assert(verb >= 0 && verb < JOB_VERB__MAX);
490
@@ -XXX,XX +XXX,XX @@ int job_apply_verb(Job *job, JobVerb verb, Error **errp)
491
return -EPERM;
492
}
493
494
+int job_apply_verb(Job *job, JobVerb verb, Error **errp)
495
+{
496
+ JOB_LOCK_GUARD();
497
+ return job_apply_verb_locked(job, verb, errp);
498
+}
499
+
500
JobType job_type(const Job *job)
501
{
502
return job->driver->job_type;
503
@@ -XXX,XX +XXX,XX @@ const char *job_type_str(const Job *job)
504
return JobType_str(job_type(job));
505
}
506
507
-bool job_is_cancelled(Job *job)
508
+bool job_is_cancelled_locked(Job *job)
509
{
510
/* force_cancel may be true only if cancelled is true, too */
511
assert(job->cancelled || !job->force_cancel);
512
return job->force_cancel;
513
}
514
515
-bool job_cancel_requested(Job *job)
516
+bool job_is_cancelled(Job *job)
517
+{
518
+ JOB_LOCK_GUARD();
519
+ return job_is_cancelled_locked(job);
520
+}
521
+
522
+/* Called with job_mutex held. */
523
+static bool job_cancel_requested_locked(Job *job)
524
{
525
return job->cancelled;
526
}
527
528
-bool job_is_ready(Job *job)
529
+bool job_cancel_requested(Job *job)
530
+{
531
+ JOB_LOCK_GUARD();
532
+ return job_cancel_requested_locked(job);
533
+}
534
+
535
+bool job_is_ready_locked(Job *job)
536
{
537
switch (job->status) {
538
case JOB_STATUS_UNDEFINED:
539
@@ -XXX,XX +XXX,XX @@ bool job_is_ready(Job *job)
540
return false;
541
}
542
543
-bool job_is_completed(Job *job)
544
+bool job_is_ready(Job *job)
545
+{
546
+ JOB_LOCK_GUARD();
547
+ return job_is_ready_locked(job);
548
+}
549
+
550
+bool job_is_completed_locked(Job *job)
551
{
552
switch (job->status) {
553
case JOB_STATUS_UNDEFINED:
554
@@ -XXX,XX +XXX,XX @@ bool job_is_completed(Job *job)
555
return false;
556
}
557
558
-static bool job_started(Job *job)
559
+bool job_is_completed(Job *job)
560
+{
561
+ JOB_LOCK_GUARD();
562
+ return job_is_completed_locked(job);
563
+}
564
+
565
+static bool job_started_locked(Job *job)
566
{
567
return job->co;
568
}
569
570
-static bool job_should_pause(Job *job)
571
+/* Called with job_mutex held. */
572
+static bool job_should_pause_locked(Job *job)
573
{
574
return job->pause_count > 0;
575
}
576
577
-Job *job_next(Job *job)
578
+Job *job_next_locked(Job *job)
579
{
580
if (!job) {
581
return QLIST_FIRST(&jobs);
582
@@ -XXX,XX +XXX,XX @@ Job *job_next(Job *job)
583
return QLIST_NEXT(job, job_list);
584
}
585
586
-Job *job_get(const char *id)
587
+Job *job_next(Job *job)
588
+{
589
+ JOB_LOCK_GUARD();
590
+ return job_next_locked(job);
591
+}
592
+
593
+Job *job_get_locked(const char *id)
594
{
595
Job *job;
596
597
@@ -XXX,XX +XXX,XX @@ Job *job_get(const char *id)
598
return NULL;
599
}
600
601
+Job *job_get(const char *id)
602
+{
603
+ JOB_LOCK_GUARD();
604
+ return job_get_locked(id);
605
+}
606
+
607
+/* Called with job_mutex *not* held. */
608
static void job_sleep_timer_cb(void *opaque)
609
{
610
Job *job = opaque;
611
@@ -XXX,XX +XXX,XX @@ void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn,
612
{
613
Job *job;
614
615
+ JOB_LOCK_GUARD();
616
+
617
if (job_id) {
618
if (flags & JOB_INTERNAL) {
619
error_setg(errp, "Cannot specify job ID for internal job");
620
@@ -XXX,XX +XXX,XX @@ void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn,
621
error_setg(errp, "Invalid job ID '%s'", job_id);
622
return NULL;
623
}
624
- if (job_get(job_id)) {
625
+ if (job_get_locked(job_id)) {
626
error_setg(errp, "Job ID '%s' already in use", job_id);
627
return NULL;
628
}
629
@@ -XXX,XX +XXX,XX @@ void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn,
630
notifier_list_init(&job->on_ready);
631
notifier_list_init(&job->on_idle);
632
633
- job_state_transition(job, JOB_STATUS_CREATED);
634
+ job_state_transition_locked(job, JOB_STATUS_CREATED);
635
aio_timer_init(qemu_get_aio_context(), &job->sleep_timer,
636
QEMU_CLOCK_REALTIME, SCALE_NS,
637
job_sleep_timer_cb, job);
638
@@ -XXX,XX +XXX,XX @@ void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn,
639
* consolidating the job management logic */
640
if (!txn) {
641
txn = job_txn_new();
642
- job_txn_add_job(txn, job);
643
- job_txn_unref(txn);
644
+ job_txn_add_job_locked(txn, job);
645
+ job_txn_unref_locked(txn);
646
} else {
647
- job_txn_add_job(txn, job);
648
+ job_txn_add_job_locked(txn, job);
649
}
650
651
return job;
652
}
653
654
-void job_ref(Job *job)
655
+void job_ref_locked(Job *job)
656
{
657
++job->refcnt;
658
}
659
660
-void job_unref(Job *job)
661
+void job_ref(Job *job)
662
+{
663
+ JOB_LOCK_GUARD();
664
+ job_ref_locked(job);
665
+}
666
+
667
+void job_unref_locked(Job *job)
668
{
669
GLOBAL_STATE_CODE();
670
671
@@ -XXX,XX +XXX,XX @@ void job_unref(Job *job)
672
assert(!job->txn);
673
674
if (job->driver->free) {
675
+ job_unlock();
676
job->driver->free(job);
677
+ job_lock();
678
}
679
680
QLIST_REMOVE(job, job_list);
681
@@ -XXX,XX +XXX,XX @@ void job_unref(Job *job)
682
}
683
}
684
685
+void job_unref(Job *job)
686
+{
687
+ JOB_LOCK_GUARD();
688
+ job_unref_locked(job);
689
+}
690
+
691
void job_progress_update(Job *job, uint64_t done)
692
{
693
progress_work_done(&job->progress, done);
694
@@ -XXX,XX +XXX,XX @@ void job_progress_increase_remaining(Job *job, uint64_t delta)
695
696
/**
697
* To be called when a cancelled job is finalised.
698
+ * Called with job_mutex held.
699
*/
700
-static void job_event_cancelled(Job *job)
701
+static void job_event_cancelled_locked(Job *job)
702
{
703
notifier_list_notify(&job->on_finalize_cancelled, job);
704
}
705
706
/**
707
* To be called when a successfully completed job is finalised.
708
+ * Called with job_mutex held.
709
*/
710
-static void job_event_completed(Job *job)
711
+static void job_event_completed_locked(Job *job)
712
{
713
notifier_list_notify(&job->on_finalize_completed, job);
714
}
715
716
-static void job_event_pending(Job *job)
717
+/* Called with job_mutex held. */
718
+static void job_event_pending_locked(Job *job)
719
{
720
notifier_list_notify(&job->on_pending, job);
721
}
722
723
-static void job_event_ready(Job *job)
724
+/* Called with job_mutex held. */
725
+static void job_event_ready_locked(Job *job)
726
{
727
notifier_list_notify(&job->on_ready, job);
728
}
729
730
-static void job_event_idle(Job *job)
731
+/* Called with job_mutex held. */
732
+static void job_event_idle_locked(Job *job)
733
{
734
notifier_list_notify(&job->on_idle, job);
735
}
736
737
-void job_enter_cond(Job *job, bool(*fn)(Job *job))
738
+void job_enter_cond_locked(Job *job, bool(*fn)(Job *job))
739
{
740
- if (!job_started(job)) {
741
+ if (!job_started_locked(job)) {
742
return;
743
}
744
if (job->deferred_to_main_loop) {
745
@@ -XXX,XX +XXX,XX @@ void job_enter_cond(Job *job, bool(*fn)(Job *job))
746
timer_del(&job->sleep_timer);
747
job->busy = true;
748
real_job_unlock();
749
+ job_unlock();
750
aio_co_enter(job->aio_context, job->co);
751
+ job_lock();
752
+}
753
+
754
+void job_enter_cond(Job *job, bool(*fn)(Job *job))
755
+{
756
+ JOB_LOCK_GUARD();
757
+ job_enter_cond_locked(job, fn);
758
}
759
760
void job_enter(Job *job)
761
{
762
- job_enter_cond(job, NULL);
763
+ JOB_LOCK_GUARD();
764
+ job_enter_cond_locked(job, NULL);
765
}
766
767
/* Yield, and schedule a timer to reenter the coroutine after @ns nanoseconds.
768
@@ -XXX,XX +XXX,XX @@ void job_enter(Job *job)
769
* is allowed and cancels the timer.
770
*
771
* If @ns is (uint64_t) -1, no timer is scheduled and job_enter() must be
772
- * called explicitly. */
773
-static void coroutine_fn job_do_yield(Job *job, uint64_t ns)
774
+ * called explicitly.
775
+ *
776
+ * Called with job_mutex held, but releases it temporarily.
777
+ */
778
+static void coroutine_fn job_do_yield_locked(Job *job, uint64_t ns)
779
{
780
real_job_lock();
781
if (ns != -1) {
782
timer_mod(&job->sleep_timer, ns);
783
}
784
job->busy = false;
785
- job_event_idle(job);
786
+ job_event_idle_locked(job);
787
real_job_unlock();
788
+ job_unlock();
789
qemu_coroutine_yield();
790
+ job_lock();
791
792
/* Set by job_enter_cond() before re-entering the coroutine. */
793
assert(job->busy);
794
}
795
796
-void coroutine_fn job_pause_point(Job *job)
797
+/* Called with job_mutex held, but releases it temporarily. */
798
+static void coroutine_fn job_pause_point_locked(Job *job)
799
{
800
- assert(job && job_started(job));
801
+ assert(job && job_started_locked(job));
802
803
- if (!job_should_pause(job)) {
804
+ if (!job_should_pause_locked(job)) {
805
return;
806
}
807
- if (job_is_cancelled(job)) {
808
+ if (job_is_cancelled_locked(job)) {
809
return;
810
}
811
812
if (job->driver->pause) {
813
+ job_unlock();
814
job->driver->pause(job);
815
+ job_lock();
816
}
817
818
- if (job_should_pause(job) && !job_is_cancelled(job)) {
819
+ if (job_should_pause_locked(job) && !job_is_cancelled_locked(job)) {
820
JobStatus status = job->status;
821
- job_state_transition(job, status == JOB_STATUS_READY
822
- ? JOB_STATUS_STANDBY
823
- : JOB_STATUS_PAUSED);
824
+ job_state_transition_locked(job, status == JOB_STATUS_READY
825
+ ? JOB_STATUS_STANDBY
826
+ : JOB_STATUS_PAUSED);
827
job->paused = true;
828
- job_do_yield(job, -1);
829
+ job_do_yield_locked(job, -1);
830
job->paused = false;
831
- job_state_transition(job, status);
832
+ job_state_transition_locked(job, status);
833
}
834
835
if (job->driver->resume) {
836
+ job_unlock();
837
job->driver->resume(job);
838
+ job_lock();
839
}
840
}
841
842
-void coroutine_fn job_yield(Job *job)
843
+void coroutine_fn job_pause_point(Job *job)
844
+{
845
+ JOB_LOCK_GUARD();
846
+ job_pause_point_locked(job);
847
+}
848
+
849
+static void coroutine_fn job_yield_locked(Job *job)
850
{
851
assert(job->busy);
852
853
/* Check cancellation *before* setting busy = false, too! */
854
- if (job_is_cancelled(job)) {
855
+ if (job_is_cancelled_locked(job)) {
856
return;
857
}
858
859
- if (!job_should_pause(job)) {
860
- job_do_yield(job, -1);
861
+ if (!job_should_pause_locked(job)) {
862
+ job_do_yield_locked(job, -1);
863
}
864
865
- job_pause_point(job);
866
+ job_pause_point_locked(job);
867
+}
868
+
869
+void coroutine_fn job_yield(Job *job)
870
+{
871
+ JOB_LOCK_GUARD();
872
+ job_yield_locked(job);
873
}
874
875
void coroutine_fn job_sleep_ns(Job *job, int64_t ns)
876
{
877
+ JOB_LOCK_GUARD();
878
assert(job->busy);
879
880
/* Check cancellation *before* setting busy = false, too! */
881
- if (job_is_cancelled(job)) {
882
+ if (job_is_cancelled_locked(job)) {
883
return;
884
}
885
886
- if (!job_should_pause(job)) {
887
- job_do_yield(job, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + ns);
888
+ if (!job_should_pause_locked(job)) {
889
+ job_do_yield_locked(job, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + ns);
890
}
891
892
- job_pause_point(job);
893
+ job_pause_point_locked(job);
894
}
895
896
-/* Assumes the block_job_mutex is held */
897
-static bool job_timer_not_pending(Job *job)
898
+/* Assumes the job_mutex is held */
899
+static bool job_timer_not_pending_locked(Job *job)
900
{
901
return !timer_pending(&job->sleep_timer);
902
}
903
904
-void job_pause(Job *job)
905
+void job_pause_locked(Job *job)
906
{
907
job->pause_count++;
908
if (!job->paused) {
909
- job_enter(job);
910
+ job_enter_cond_locked(job, NULL);
911
}
912
}
913
914
-void job_resume(Job *job)
915
+void job_pause(Job *job)
916
+{
917
+ JOB_LOCK_GUARD();
918
+ job_pause_locked(job);
919
+}
920
+
921
+void job_resume_locked(Job *job)
922
{
923
assert(job->pause_count > 0);
924
job->pause_count--;
925
@@ -XXX,XX +XXX,XX @@ void job_resume(Job *job)
926
}
927
928
/* kick only if no timer is pending */
929
- job_enter_cond(job, job_timer_not_pending);
930
+ job_enter_cond_locked(job, job_timer_not_pending_locked);
931
}
932
933
-void job_user_pause(Job *job, Error **errp)
934
+void job_resume(Job *job)
935
{
936
- if (job_apply_verb(job, JOB_VERB_PAUSE, errp)) {
937
+ JOB_LOCK_GUARD();
938
+ job_resume_locked(job);
939
+}
940
+
941
+void job_user_pause_locked(Job *job, Error **errp)
942
+{
943
+ if (job_apply_verb_locked(job, JOB_VERB_PAUSE, errp)) {
944
return;
945
}
946
if (job->user_paused) {
947
@@ -XXX,XX +XXX,XX @@ void job_user_pause(Job *job, Error **errp)
948
return;
949
}
950
job->user_paused = true;
951
- job_pause(job);
952
+ job_pause_locked(job);
953
}
954
955
-bool job_user_paused(Job *job)
956
+void job_user_pause(Job *job, Error **errp)
957
+{
958
+ JOB_LOCK_GUARD();
959
+ job_user_pause_locked(job, errp);
960
+}
961
+
962
+bool job_user_paused_locked(Job *job)
963
{
964
return job->user_paused;
965
}
966
967
-void job_user_resume(Job *job, Error **errp)
968
+bool job_user_paused(Job *job)
969
+{
970
+ JOB_LOCK_GUARD();
971
+ return job_user_paused_locked(job);
972
+}
973
+
974
+void job_user_resume_locked(Job *job, Error **errp)
975
{
976
assert(job);
977
GLOBAL_STATE_CODE();
978
@@ -XXX,XX +XXX,XX @@ void job_user_resume(Job *job, Error **errp)
979
error_setg(errp, "Can't resume a job that was not paused");
980
return;
981
}
982
- if (job_apply_verb(job, JOB_VERB_RESUME, errp)) {
983
+ if (job_apply_verb_locked(job, JOB_VERB_RESUME, errp)) {
984
return;
985
}
986
if (job->driver->user_resume) {
987
+ job_unlock();
988
job->driver->user_resume(job);
989
+ job_lock();
990
}
991
job->user_paused = false;
992
- job_resume(job);
993
+ job_resume_locked(job);
994
}
995
996
-static void job_do_dismiss(Job *job)
997
+void job_user_resume(Job *job, Error **errp)
998
+{
999
+ JOB_LOCK_GUARD();
1000
+ job_user_resume_locked(job, errp);
1001
+}
1002
+
1003
+/* Called with job_mutex held, but releases it temporarily. */
1004
+static void job_do_dismiss_locked(Job *job)
1005
{
1006
assert(job);
1007
job->busy = false;
1008
job->paused = false;
1009
job->deferred_to_main_loop = true;
1010
1011
- job_txn_del_job(job);
1012
+ job_txn_del_job_locked(job);
1013
1014
- job_state_transition(job, JOB_STATUS_NULL);
1015
- job_unref(job);
1016
+ job_state_transition_locked(job, JOB_STATUS_NULL);
1017
+ job_unref_locked(job);
1018
}
1019
1020
-void job_dismiss(Job **jobptr, Error **errp)
1021
+void job_dismiss_locked(Job **jobptr, Error **errp)
1022
{
1023
Job *job = *jobptr;
1024
/* similarly to _complete, this is QMP-interface only. */
1025
assert(job->id);
1026
- if (job_apply_verb(job, JOB_VERB_DISMISS, errp)) {
1027
+ if (job_apply_verb_locked(job, JOB_VERB_DISMISS, errp)) {
1028
return;
1029
}
1030
1031
- job_do_dismiss(job);
1032
+ job_do_dismiss_locked(job);
1033
*jobptr = NULL;
1034
}
1035
1036
+void job_dismiss(Job **jobptr, Error **errp)
1037
+{
1038
+ JOB_LOCK_GUARD();
1039
+ job_dismiss_locked(jobptr, errp);
1040
+}
1041
+
1042
void job_early_fail(Job *job)
1043
{
1044
+ JOB_LOCK_GUARD();
1045
assert(job->status == JOB_STATUS_CREATED);
1046
- job_do_dismiss(job);
1047
+ job_do_dismiss_locked(job);
1048
}
1049
1050
-static void job_conclude(Job *job)
1051
+/* Called with job_mutex held. */
1052
+static void job_conclude_locked(Job *job)
1053
{
1054
- job_state_transition(job, JOB_STATUS_CONCLUDED);
1055
- if (job->auto_dismiss || !job_started(job)) {
1056
- job_do_dismiss(job);
1057
+ job_state_transition_locked(job, JOB_STATUS_CONCLUDED);
1058
+ if (job->auto_dismiss || !job_started_locked(job)) {
1059
+ job_do_dismiss_locked(job);
1060
}
1061
}
1062
1063
-static void job_update_rc(Job *job)
1064
+/* Called with job_mutex held. */
1065
+static void job_update_rc_locked(Job *job)
1066
{
1067
- if (!job->ret && job_is_cancelled(job)) {
1068
+ if (!job->ret && job_is_cancelled_locked(job)) {
1069
job->ret = -ECANCELED;
1070
}
1071
if (job->ret) {
1072
if (!job->err) {
1073
error_setg(&job->err, "%s", strerror(-job->ret));
1074
}
1075
- job_state_transition(job, JOB_STATUS_ABORTING);
1076
+ job_state_transition_locked(job, JOB_STATUS_ABORTING);
1077
}
1078
}
1079
1080
@@ -XXX,XX +XXX,XX @@ static void job_clean(Job *job)
1081
}
1082
}
1083
1084
-static int job_finalize_single(Job *job)
1085
+/* Called with job_mutex held, but releases it temporarily */
1086
+static int job_finalize_single_locked(Job *job)
1087
{
1088
- assert(job_is_completed(job));
1089
+ int job_ret;
1090
+
1091
+ assert(job_is_completed_locked(job));
1092
1093
/* Ensure abort is called for late-transactional failures */
1094
- job_update_rc(job);
1095
+ job_update_rc_locked(job);
1096
+
1097
+ job_ret = job->ret;
1098
+ job_unlock();
1099
1100
- if (!job->ret) {
1101
+ if (!job_ret) {
1102
job_commit(job);
1103
} else {
1104
job_abort(job);
1105
}
1106
job_clean(job);
1107
1108
+ job_lock();
1109
+
1110
if (job->cb) {
1111
- job->cb(job->opaque, job->ret);
1112
+ job_ret = job->ret;
1113
+ job_unlock();
1114
+ job->cb(job->opaque, job_ret);
1115
+ job_lock();
1116
}
1117
1118
/* Emit events only if we actually started */
1119
- if (job_started(job)) {
1120
- if (job_is_cancelled(job)) {
1121
- job_event_cancelled(job);
1122
+ if (job_started_locked(job)) {
1123
+ if (job_is_cancelled_locked(job)) {
1124
+ job_event_cancelled_locked(job);
1125
} else {
1126
- job_event_completed(job);
1127
+ job_event_completed_locked(job);
1128
}
1129
}
1130
1131
- job_txn_del_job(job);
1132
- job_conclude(job);
1133
+ job_txn_del_job_locked(job);
1134
+ job_conclude_locked(job);
1135
return 0;
1136
}
1137
1138
-static void job_cancel_async(Job *job, bool force)
1139
+/* Called with job_mutex held, but releases it temporarily */
1140
+static void job_cancel_async_locked(Job *job, bool force)
1141
{
1142
GLOBAL_STATE_CODE();
1143
if (job->driver->cancel) {
1144
+ job_unlock();
1145
force = job->driver->cancel(job, force);
1146
+ job_lock();
1147
} else {
1148
/* No .cancel() means the job will behave as if force-cancelled */
1149
force = true;
1150
@@ -XXX,XX +XXX,XX @@ static void job_cancel_async(Job *job, bool force)
1151
if (job->user_paused) {
1152
/* Do not call job_enter here, the caller will handle it. */
1153
if (job->driver->user_resume) {
1154
+ job_unlock();
1155
job->driver->user_resume(job);
1156
+ job_lock();
1157
}
1158
job->user_paused = false;
1159
assert(job->pause_count > 0);
1160
@@ -XXX,XX +XXX,XX @@ static void job_cancel_async(Job *job, bool force)
1161
}
1162
}
1163
1164
-static void job_completed_txn_abort(Job *job)
1165
+/* Called with job_mutex held, but releases it temporarily. */
1166
+static void job_completed_txn_abort_locked(Job *job)
1167
{
1168
AioContext *ctx;
1169
JobTxn *txn = job->txn;
1170
@@ -XXX,XX +XXX,XX @@ static void job_completed_txn_abort(Job *job)
1171
return;
1172
}
1173
txn->aborting = true;
1174
- job_txn_ref(txn);
1175
+ job_txn_ref_locked(txn);
1176
1177
/*
1178
* We can only hold the single job's AioContext lock while calling
1179
@@ -XXX,XX +XXX,XX @@ static void job_completed_txn_abort(Job *job)
1180
* calls of AIO_WAIT_WHILE(), which could deadlock otherwise.
1181
* Note that the job's AioContext may change when it is finalized.
1182
*/
1183
- job_ref(job);
1184
+ job_ref_locked(job);
1185
aio_context_release(job->aio_context);
1186
1187
/* Other jobs are effectively cancelled by us, set the status for
1188
@@ -XXX,XX +XXX,XX @@ static void job_completed_txn_abort(Job *job)
1189
* Therefore, pass force=true to terminate all other jobs as quickly
1190
* as possible.
1191
*/
1192
- job_cancel_async(other_job, true);
1193
+ job_cancel_async_locked(other_job, true);
1194
aio_context_release(ctx);
1195
}
1196
}
1197
@@ -XXX,XX +XXX,XX @@ static void job_completed_txn_abort(Job *job)
1198
*/
1199
ctx = other_job->aio_context;
1200
aio_context_acquire(ctx);
1201
- if (!job_is_completed(other_job)) {
1202
- assert(job_cancel_requested(other_job));
1203
- job_finish_sync(other_job, NULL, NULL);
1204
+ if (!job_is_completed_locked(other_job)) {
1205
+ assert(job_cancel_requested_locked(other_job));
1206
+ job_finish_sync_locked(other_job, NULL, NULL);
1207
}
1208
- job_finalize_single(other_job);
1209
+ job_finalize_single_locked(other_job);
1210
aio_context_release(ctx);
1211
}
1212
1213
@@ -XXX,XX +XXX,XX @@ static void job_completed_txn_abort(Job *job)
1214
* even if the job went away during job_finalize_single().
1215
*/
1216
aio_context_acquire(job->aio_context);
1217
- job_unref(job);
1218
+ job_unref_locked(job);
1219
1220
- job_txn_unref(txn);
1221
+ job_txn_unref_locked(txn);
1222
}
1223
1224
-static int job_prepare(Job *job)
1225
+/* Called with job_mutex held, but releases it temporarily */
1226
+static int job_prepare_locked(Job *job)
1227
{
1228
+ int ret;
1229
+
1230
GLOBAL_STATE_CODE();
1231
if (job->ret == 0 && job->driver->prepare) {
1232
- job->ret = job->driver->prepare(job);
1233
- job_update_rc(job);
1234
+ job_unlock();
1235
+ ret = job->driver->prepare(job);
1236
+ job_lock();
1237
+ job->ret = ret;
1238
+ job_update_rc_locked(job);
1239
}
1240
return job->ret;
1241
}
1242
1243
-static int job_needs_finalize(Job *job)
1244
+/* Called with job_mutex held */
1245
+static int job_needs_finalize_locked(Job *job)
1246
{
1247
return !job->auto_finalize;
1248
}
1249
1250
-static void job_do_finalize(Job *job)
1251
+/* Called with job_mutex held */
1252
+static void job_do_finalize_locked(Job *job)
1253
{
1254
int rc;
1255
assert(job && job->txn);
1256
1257
/* prepare the transaction to complete */
1258
- rc = job_txn_apply(job, job_prepare);
1259
+ rc = job_txn_apply_locked(job, job_prepare_locked);
1260
if (rc) {
1261
- job_completed_txn_abort(job);
1262
+ job_completed_txn_abort_locked(job);
1263
} else {
1264
- job_txn_apply(job, job_finalize_single);
1265
+ job_txn_apply_locked(job, job_finalize_single_locked);
1266
}
1267
}
1268
1269
-void job_finalize(Job *job, Error **errp)
1270
+void job_finalize_locked(Job *job, Error **errp)
1271
{
1272
assert(job && job->id);
1273
- if (job_apply_verb(job, JOB_VERB_FINALIZE, errp)) {
1274
+ if (job_apply_verb_locked(job, JOB_VERB_FINALIZE, errp)) {
1275
return;
1276
}
1277
- job_do_finalize(job);
1278
+ job_do_finalize_locked(job);
1279
}
1280
1281
-static int job_transition_to_pending(Job *job)
1282
+void job_finalize(Job *job, Error **errp)
1283
{
1284
- job_state_transition(job, JOB_STATUS_PENDING);
1285
+ JOB_LOCK_GUARD();
1286
+ job_finalize_locked(job, errp);
1287
+}
1288
+
1289
+/* Called with job_mutex held. */
1290
+static int job_transition_to_pending_locked(Job *job)
1291
+{
1292
+ job_state_transition_locked(job, JOB_STATUS_PENDING);
1293
if (!job->auto_finalize) {
1294
- job_event_pending(job);
1295
+ job_event_pending_locked(job);
1296
}
1297
return 0;
1298
}
1299
1300
void job_transition_to_ready(Job *job)
1301
{
1302
- job_state_transition(job, JOB_STATUS_READY);
1303
- job_event_ready(job);
1304
+ JOB_LOCK_GUARD();
1305
+ job_state_transition_locked(job, JOB_STATUS_READY);
1306
+ job_event_ready_locked(job);
1307
}
1308
1309
-static void job_completed_txn_success(Job *job)
1310
+/* Called with job_mutex held. */
1311
+static void job_completed_txn_success_locked(Job *job)
1312
{
1313
JobTxn *txn = job->txn;
1314
Job *other_job;
1315
1316
- job_state_transition(job, JOB_STATUS_WAITING);
1317
+ job_state_transition_locked(job, JOB_STATUS_WAITING);
1318
1319
/*
1320
* Successful completion, see if there are other running jobs in this
1321
* txn.
1322
*/
1323
QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
1324
- if (!job_is_completed(other_job)) {
1325
+ if (!job_is_completed_locked(other_job)) {
1326
return;
1327
}
1328
assert(other_job->ret == 0);
1329
}
1330
1331
- job_txn_apply(job, job_transition_to_pending);
1332
+ job_txn_apply_locked(job, job_transition_to_pending_locked);
1333
1334
/* If no jobs need manual finalization, automatically do so */
1335
- if (job_txn_apply(job, job_needs_finalize) == 0) {
1336
- job_do_finalize(job);
1337
+ if (job_txn_apply_locked(job, job_needs_finalize_locked) == 0) {
1338
+ job_do_finalize_locked(job);
1339
}
1340
}
1341
1342
-static void job_completed(Job *job)
1343
+/* Called with job_mutex held. */
1344
+static void job_completed_locked(Job *job)
1345
{
1346
- assert(job && job->txn && !job_is_completed(job));
1347
+ assert(job && job->txn && !job_is_completed_locked(job));
1348
1349
- job_update_rc(job);
1350
+ job_update_rc_locked(job);
1351
trace_job_completed(job, job->ret);
1352
if (job->ret) {
1353
- job_completed_txn_abort(job);
1354
+ job_completed_txn_abort_locked(job);
1355
} else {
1356
- job_completed_txn_success(job);
1357
+ job_completed_txn_success_locked(job);
1358
}
1359
}
1360
1361
-/** Useful only as a type shim for aio_bh_schedule_oneshot. */
1362
+/**
1363
+ * Useful only as a type shim for aio_bh_schedule_oneshot.
1364
+ * Called with job_mutex *not* held.
1365
+ */
1366
static void job_exit(void *opaque)
1367
{
1368
Job *job = (Job *)opaque;
1369
AioContext *ctx;
1370
+ JOB_LOCK_GUARD();
1371
1372
- job_ref(job);
1373
+ job_ref_locked(job);
1374
aio_context_acquire(job->aio_context);
1375
1376
/* This is a lie, we're not quiescent, but still doing the completion
1377
@@ -XXX,XX +XXX,XX @@ static void job_exit(void *opaque)
1378
* drain block nodes, and if .drained_poll still returned true, we would
1379
* deadlock. */
1380
job->busy = false;
1381
- job_event_idle(job);
1382
+ job_event_idle_locked(job);
1383
1384
- job_completed(job);
1385
+ job_completed_locked(job);
1386
1387
/*
1388
* Note that calling job_completed can move the job to a different
1389
@@ -XXX,XX +XXX,XX @@ static void job_exit(void *opaque)
1390
* the job underneath us.
1391
*/
1392
ctx = job->aio_context;
1393
- job_unref(job);
1394
+ job_unref_locked(job);
1395
aio_context_release(ctx);
1396
}
1397
1398
@@ -XXX,XX +XXX,XX @@ static void job_exit(void *opaque)
1399
static void coroutine_fn job_co_entry(void *opaque)
1400
{
1401
Job *job = opaque;
1402
+ int ret;
1403
1404
assert(job && job->driver && job->driver->run);
1405
- assert(job->aio_context == qemu_get_current_aio_context());
1406
- job_pause_point(job);
1407
- job->ret = job->driver->run(job, &job->err);
1408
- job->deferred_to_main_loop = true;
1409
- job->busy = true;
1410
+ WITH_JOB_LOCK_GUARD() {
1411
+ assert(job->aio_context == qemu_get_current_aio_context());
1412
+ job_pause_point_locked(job);
1413
+ }
1414
+ ret = job->driver->run(job, &job->err);
1415
+ WITH_JOB_LOCK_GUARD() {
1416
+ job->ret = ret;
1417
+ job->deferred_to_main_loop = true;
1418
+ job->busy = true;
1419
+ }
1420
aio_bh_schedule_oneshot(qemu_get_aio_context(), job_exit, job);
1421
}
1422
1423
void job_start(Job *job)
1424
{
1425
- assert(job && !job_started(job) && job->paused &&
1426
- job->driver && job->driver->run);
1427
- job->co = qemu_coroutine_create(job_co_entry, job);
1428
- job->pause_count--;
1429
- job->busy = true;
1430
- job->paused = false;
1431
- job_state_transition(job, JOB_STATUS_RUNNING);
1432
+ assert(qemu_in_main_thread());
1433
+
1434
+ WITH_JOB_LOCK_GUARD() {
1435
+ assert(job && !job_started_locked(job) && job->paused &&
1436
+ job->driver && job->driver->run);
1437
+ job->co = qemu_coroutine_create(job_co_entry, job);
1438
+ job->pause_count--;
1439
+ job->busy = true;
1440
+ job->paused = false;
1441
+ job_state_transition_locked(job, JOB_STATUS_RUNNING);
1442
+ }
1443
aio_co_enter(job->aio_context, job->co);
1444
}
1445
1446
-void job_cancel(Job *job, bool force)
1447
+void job_cancel_locked(Job *job, bool force)
1448
{
1449
if (job->status == JOB_STATUS_CONCLUDED) {
1450
- job_do_dismiss(job);
1451
+ job_do_dismiss_locked(job);
1452
return;
1453
}
1454
- job_cancel_async(job, force);
1455
- if (!job_started(job)) {
1456
- job_completed(job);
1457
+ job_cancel_async_locked(job, force);
1458
+ if (!job_started_locked(job)) {
1459
+ job_completed_locked(job);
1460
} else if (job->deferred_to_main_loop) {
1461
/*
1462
* job_cancel_async() ignores soft-cancel requests for jobs
1463
@@ -XXX,XX +XXX,XX @@ void job_cancel(Job *job, bool force)
1464
* choose to call job_is_cancelled() to show that we invoke
1465
* job_completed_txn_abort() only for force-cancelled jobs.)
1466
*/
1467
- if (job_is_cancelled(job)) {
1468
- job_completed_txn_abort(job);
1469
+ if (job_is_cancelled_locked(job)) {
1470
+ job_completed_txn_abort_locked(job);
1471
}
1472
} else {
1473
- job_enter(job);
1474
+ job_enter_cond_locked(job, NULL);
1475
}
1476
}
1477
1478
-void job_user_cancel(Job *job, bool force, Error **errp)
1479
+void job_cancel(Job *job, bool force)
1480
{
1481
- if (job_apply_verb(job, JOB_VERB_CANCEL, errp)) {
1482
+ JOB_LOCK_GUARD();
1483
+ job_cancel_locked(job, force);
1484
+}
1485
+
1486
+void job_user_cancel_locked(Job *job, bool force, Error **errp)
1487
+{
1488
+ if (job_apply_verb_locked(job, JOB_VERB_CANCEL, errp)) {
1489
return;
1490
}
1491
- job_cancel(job, force);
1492
+ job_cancel_locked(job, force);
1493
+}
1494
+
1495
+void job_user_cancel(Job *job, bool force, Error **errp)
1496
+{
1497
+ JOB_LOCK_GUARD();
1498
+ job_user_cancel_locked(job, force, errp);
1499
}
1500
1501
/* A wrapper around job_cancel() taking an Error ** parameter so it may be
1502
* used with job_finish_sync() without the need for (rather nasty) function
1503
- * pointer casts there. */
1504
-static void job_cancel_err(Job *job, Error **errp)
1505
+ * pointer casts there.
1506
+ *
1507
+ * Called with job_mutex held.
1508
+ */
1509
+static void job_cancel_err_locked(Job *job, Error **errp)
1510
{
1511
- job_cancel(job, false);
1512
+ job_cancel_locked(job, false);
1513
}
1514
1515
/**
1516
* Same as job_cancel_err(), but force-cancel.
1517
+ * Called with job_mutex held.
1518
*/
1519
-static void job_force_cancel_err(Job *job, Error **errp)
1520
+static void job_force_cancel_err_locked(Job *job, Error **errp)
1521
{
1522
- job_cancel(job, true);
1523
+ job_cancel_locked(job, true);
1524
}
1525
1526
-int job_cancel_sync(Job *job, bool force)
1527
+int job_cancel_sync_locked(Job *job, bool force)
1528
{
1529
if (force) {
1530
- return job_finish_sync(job, &job_force_cancel_err, NULL);
1531
+ return job_finish_sync_locked(job, &job_force_cancel_err_locked, NULL);
1532
} else {
1533
- return job_finish_sync(job, &job_cancel_err, NULL);
1534
+ return job_finish_sync_locked(job, &job_cancel_err_locked, NULL);
1535
}
1536
}
1537
1538
+int job_cancel_sync(Job *job, bool force)
1539
+{
1540
+ JOB_LOCK_GUARD();
1541
+ return job_cancel_sync_locked(job, force);
1542
+}
1543
+
1544
void job_cancel_sync_all(void)
1545
{
1546
Job *job;
1547
AioContext *aio_context;
1548
+ JOB_LOCK_GUARD();
1549
1550
- while ((job = job_next(NULL))) {
1551
+ while ((job = job_next_locked(NULL))) {
1552
aio_context = job->aio_context;
1553
aio_context_acquire(aio_context);
1554
- job_cancel_sync(job, true);
1555
+ job_cancel_sync_locked(job, true);
1556
aio_context_release(aio_context);
1557
}
1558
}
1559
1560
+int job_complete_sync_locked(Job *job, Error **errp)
1561
+{
1562
+ return job_finish_sync_locked(job, job_complete_locked, errp);
1563
+}
1564
+
1565
int job_complete_sync(Job *job, Error **errp)
1566
{
1567
- return job_finish_sync(job, job_complete, errp);
1568
+ JOB_LOCK_GUARD();
1569
+ return job_complete_sync_locked(job, errp);
1570
}
1571
1572
-void job_complete(Job *job, Error **errp)
1573
+void job_complete_locked(Job *job, Error **errp)
1574
{
1575
/* Should not be reachable via external interface for internal jobs */
1576
assert(job->id);
1577
GLOBAL_STATE_CODE();
1578
- if (job_apply_verb(job, JOB_VERB_COMPLETE, errp)) {
1579
+ if (job_apply_verb_locked(job, JOB_VERB_COMPLETE, errp)) {
1580
return;
1581
}
1582
- if (job_cancel_requested(job) || !job->driver->complete) {
1583
+ if (job_cancel_requested_locked(job) || !job->driver->complete) {
1584
error_setg(errp, "The active block job '%s' cannot be completed",
1585
job->id);
1586
return;
1587
}
1588
1589
+ job_unlock();
1590
job->driver->complete(job, errp);
1591
+ job_lock();
1592
}
1593
1594
-int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp)
1595
+void job_complete(Job *job, Error **errp)
1596
+{
1597
+ JOB_LOCK_GUARD();
1598
+ job_complete_locked(job, errp);
1599
+}
1600
+
1601
+int job_finish_sync_locked(Job *job,
1602
+ void (*finish)(Job *, Error **errp),
1603
+ Error **errp)
1604
{
1605
Error *local_err = NULL;
1606
int ret;
1607
1608
- job_ref(job);
1609
+ job_ref_locked(job);
1610
1611
if (finish) {
1612
finish(job, &local_err);
1613
}
1614
if (local_err) {
1615
error_propagate(errp, local_err);
1616
- job_unref(job);
1617
+ job_unref_locked(job);
1618
return -EBUSY;
1619
}
1620
1621
+ job_unlock();
1622
AIO_WAIT_WHILE(job->aio_context,
1623
(job_enter(job), !job_is_completed(job)));
1624
+ job_lock();
1625
1626
- ret = (job_is_cancelled(job) && job->ret == 0) ? -ECANCELED : job->ret;
1627
- job_unref(job);
1628
+ ret = (job_is_cancelled_locked(job) && job->ret == 0)
1629
+ ? -ECANCELED : job->ret;
1630
+ job_unref_locked(job);
1631
return ret;
1632
}
1633
+
1634
+int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp)
1635
+{
1636
+ JOB_LOCK_GUARD();
1637
+ return job_finish_sync_locked(job, finish, errp);
1638
+}
1639
--
1640
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
2
1
3
This comment applies more on job, it was left in blockjob as in the past
4
the whole job logic was implemented there.
5
6
Note: at this stage, job_{lock/unlock} and job lock guard macros
7
are *nop*.
8
9
No functional change intended.
10
11
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
12
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
13
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
14
Message-Id: <20220926093214.506243-7-eesposit@redhat.com>
15
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
16
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
17
---
18
blockjob.c | 20 --------------------
19
job.c | 16 ++++++++++++++++
20
2 files changed, 16 insertions(+), 20 deletions(-)
21
22
diff --git a/blockjob.c b/blockjob.c
23
index XXXXXXX..XXXXXXX 100644
24
--- a/blockjob.c
25
+++ b/blockjob.c
26
@@ -XXX,XX +XXX,XX @@
27
#include "qemu/main-loop.h"
28
#include "qemu/timer.h"
29
30
-/*
31
- * The block job API is composed of two categories of functions.
32
- *
33
- * The first includes functions used by the monitor. The monitor is
34
- * peculiar in that it accesses the block job list with block_job_get, and
35
- * therefore needs consistency across block_job_get and the actual operation
36
- * (e.g. block_job_set_speed). The consistency is achieved with
37
- * aio_context_acquire/release. These functions are declared in blockjob.h.
38
- *
39
- * The second includes functions used by the block job drivers and sometimes
40
- * by the core block layer. These do not care about locking, because the
41
- * whole coroutine runs under the AioContext lock, and are declared in
42
- * blockjob_int.h.
43
- */
44
-
45
static bool is_block_job(Job *job)
46
{
47
return job_type(job) == JOB_TYPE_BACKUP ||
48
@@ -XXX,XX +XXX,XX @@ static void block_job_event_ready(Notifier *n, void *opaque)
49
}
50
51
52
-/*
53
- * API for block job drivers and the block layer. These functions are
54
- * declared in blockjob_int.h.
55
- */
56
-
57
void *block_job_create(const char *job_id, const BlockJobDriver *driver,
58
JobTxn *txn, BlockDriverState *bs, uint64_t perm,
59
uint64_t shared_perm, int64_t speed, int flags,
60
diff --git a/job.c b/job.c
61
index XXXXXXX..XXXXXXX 100644
62
--- a/job.c
63
+++ b/job.c
64
@@ -XXX,XX +XXX,XX @@
65
#include "trace/trace-root.h"
66
#include "qapi/qapi-events-job.h"
67
68
+/*
69
+ * The job API is composed of two categories of functions.
70
+ *
71
+ * The first includes functions used by the monitor. The monitor is
72
+ * peculiar in that it accesses the job list with job_get, and
73
+ * therefore needs consistency across job_get and the actual operation
74
+ * (e.g. job_user_cancel). To achieve this consistency, the caller
75
+ * calls job_lock/job_unlock itself around the whole operation.
76
+ *
77
+ *
78
+ * The second includes functions used by the job drivers and sometimes
79
+ * by the core block layer. These delegate the locking to the callee instead.
80
+ *
81
+ * TODO Actually make this true
82
+ */
83
+
84
/*
85
* job_mutex protects the jobs list, but also makes the
86
* struct job fields thread-safe.
87
--
88
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
2
1
3
Just as done with job.h, create _locked() functions in blockjob.h
4
5
These functions will be later useful when caller has already taken
6
the lock. All blockjob _locked functions call job _locked functions.
7
8
Note: at this stage, job_{lock/unlock} and job lock guard macros
9
are *nop*.
10
11
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
12
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
13
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
14
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
15
Message-Id: <20220926093214.506243-8-eesposit@redhat.com>
16
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
17
---
18
include/block/blockjob.h | 18 ++++++++++++++
19
blockjob.c | 52 ++++++++++++++++++++++++++++++++--------
20
2 files changed, 60 insertions(+), 10 deletions(-)
21
22
diff --git a/include/block/blockjob.h b/include/block/blockjob.h
23
index XXXXXXX..XXXXXXX 100644
24
--- a/include/block/blockjob.h
25
+++ b/include/block/blockjob.h
26
@@ -XXX,XX +XXX,XX @@ typedef struct BlockJob {
27
*/
28
BlockJob *block_job_next(BlockJob *job);
29
30
+/* Same as block_job_next(), but called with job lock held. */
31
+BlockJob *block_job_next_locked(BlockJob *job);
32
+
33
/**
34
* block_job_get:
35
* @id: The id of the block job.
36
@@ -XXX,XX +XXX,XX @@ BlockJob *block_job_next(BlockJob *job);
37
*/
38
BlockJob *block_job_get(const char *id);
39
40
+/* Same as block_job_get(), but called with job lock held. */
41
+BlockJob *block_job_get_locked(const char *id);
42
+
43
/**
44
* block_job_add_bdrv:
45
* @job: A block job
46
@@ -XXX,XX +XXX,XX @@ bool block_job_has_bdrv(BlockJob *job, BlockDriverState *bs);
47
*/
48
bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp);
49
50
+/*
51
+ * Same as block_job_set_speed(), but called with job lock held.
52
+ * Might release the lock temporarily.
53
+ */
54
+bool block_job_set_speed_locked(BlockJob *job, int64_t speed, Error **errp);
55
+
56
/**
57
* block_job_query:
58
* @job: The job to get information about.
59
@@ -XXX,XX +XXX,XX @@ bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp);
60
*/
61
BlockJobInfo *block_job_query(BlockJob *job, Error **errp);
62
63
+/* Same as block_job_query(), but called with job lock held. */
64
+BlockJobInfo *block_job_query_locked(BlockJob *job, Error **errp);
65
+
66
/**
67
* block_job_iostatus_reset:
68
* @job: The job whose I/O status should be reset.
69
@@ -XXX,XX +XXX,XX @@ BlockJobInfo *block_job_query(BlockJob *job, Error **errp);
70
*/
71
void block_job_iostatus_reset(BlockJob *job);
72
73
+/* Same as block_job_iostatus_reset(), but called with job lock held. */
74
+void block_job_iostatus_reset_locked(BlockJob *job);
75
+
76
/*
77
* block_job_get_aio_context:
78
*
79
diff --git a/blockjob.c b/blockjob.c
80
index XXXXXXX..XXXXXXX 100644
81
--- a/blockjob.c
82
+++ b/blockjob.c
83
@@ -XXX,XX +XXX,XX @@ static bool is_block_job(Job *job)
84
job_type(job) == JOB_TYPE_STREAM;
85
}
86
87
-BlockJob *block_job_next(BlockJob *bjob)
88
+BlockJob *block_job_next_locked(BlockJob *bjob)
89
{
90
Job *job = bjob ? &bjob->job : NULL;
91
GLOBAL_STATE_CODE();
92
93
do {
94
- job = job_next(job);
95
+ job = job_next_locked(job);
96
} while (job && !is_block_job(job));
97
98
return job ? container_of(job, BlockJob, job) : NULL;
99
}
100
101
-BlockJob *block_job_get(const char *id)
102
+BlockJob *block_job_next(BlockJob *bjob)
103
{
104
- Job *job = job_get(id);
105
+ JOB_LOCK_GUARD();
106
+ return block_job_next_locked(bjob);
107
+}
108
+
109
+BlockJob *block_job_get_locked(const char *id)
110
+{
111
+ Job *job = job_get_locked(id);
112
GLOBAL_STATE_CODE();
113
114
if (job && is_block_job(job)) {
115
@@ -XXX,XX +XXX,XX @@ BlockJob *block_job_get(const char *id)
116
}
117
}
118
119
+BlockJob *block_job_get(const char *id)
120
+{
121
+ JOB_LOCK_GUARD();
122
+ return block_job_get_locked(id);
123
+}
124
+
125
void block_job_free(Job *job)
126
{
127
BlockJob *bjob = container_of(job, BlockJob, job);
128
@@ -XXX,XX +XXX,XX @@ static bool job_timer_pending(Job *job)
129
return timer_pending(&job->sleep_timer);
130
}
131
132
-bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
133
+bool block_job_set_speed_locked(BlockJob *job, int64_t speed, Error **errp)
134
{
135
const BlockJobDriver *drv = block_job_driver(job);
136
int64_t old_speed = job->speed;
137
138
GLOBAL_STATE_CODE();
139
140
- if (job_apply_verb(&job->job, JOB_VERB_SET_SPEED, errp) < 0) {
141
+ if (job_apply_verb_locked(&job->job, JOB_VERB_SET_SPEED, errp) < 0) {
142
return false;
143
}
144
if (speed < 0) {
145
@@ -XXX,XX +XXX,XX @@ bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
146
job->speed = speed;
147
148
if (drv->set_speed) {
149
+ job_unlock();
150
drv->set_speed(job, speed);
151
+ job_lock();
152
}
153
154
if (speed && speed <= old_speed) {
155
@@ -XXX,XX +XXX,XX @@ bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
156
}
157
158
/* kick only if a timer is pending */
159
- job_enter_cond(&job->job, job_timer_pending);
160
+ job_enter_cond_locked(&job->job, job_timer_pending);
161
162
return true;
163
}
164
165
+bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
166
+{
167
+ JOB_LOCK_GUARD();
168
+ return block_job_set_speed_locked(job, speed, errp);
169
+}
170
+
171
int64_t block_job_ratelimit_get_delay(BlockJob *job, uint64_t n)
172
{
173
IO_CODE();
174
return ratelimit_calculate_delay(&job->limit, n);
175
}
176
177
-BlockJobInfo *block_job_query(BlockJob *job, Error **errp)
178
+BlockJobInfo *block_job_query_locked(BlockJob *job, Error **errp)
179
{
180
BlockJobInfo *info;
181
uint64_t progress_current, progress_total;
182
@@ -XXX,XX +XXX,XX @@ BlockJobInfo *block_job_query(BlockJob *job, Error **errp)
183
info->len = progress_total;
184
info->speed = job->speed;
185
info->io_status = job->iostatus;
186
- info->ready = job_is_ready(&job->job),
187
+ info->ready = job_is_ready_locked(&job->job),
188
info->status = job->job.status;
189
info->auto_finalize = job->job.auto_finalize;
190
info->auto_dismiss = job->job.auto_dismiss;
191
@@ -XXX,XX +XXX,XX @@ BlockJobInfo *block_job_query(BlockJob *job, Error **errp)
192
return info;
193
}
194
195
+BlockJobInfo *block_job_query(BlockJob *job, Error **errp)
196
+{
197
+ JOB_LOCK_GUARD();
198
+ return block_job_query_locked(job, errp);
199
+}
200
+
201
static void block_job_iostatus_set_err(BlockJob *job, int error)
202
{
203
if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
204
@@ -XXX,XX +XXX,XX @@ fail:
205
return NULL;
206
}
207
208
-void block_job_iostatus_reset(BlockJob *job)
209
+void block_job_iostatus_reset_locked(BlockJob *job)
210
{
211
GLOBAL_STATE_CODE();
212
if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
213
@@ -XXX,XX +XXX,XX @@ void block_job_iostatus_reset(BlockJob *job)
214
job->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
215
}
216
217
+void block_job_iostatus_reset(BlockJob *job)
218
+{
219
+ JOB_LOCK_GUARD();
220
+ block_job_iostatus_reset_locked(job);
221
+}
222
+
223
void block_job_user_resume(Job *job)
224
{
225
BlockJob *bjob = container_of(job, BlockJob, job);
226
--
227
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
2
1
3
Both blockdev.c and job-qmp.c have TOC/TOU conditions, because
4
they first search for the job and then perform an action on it.
5
Therefore, we need to do the search + action under the same
6
job mutex critical section.
7
8
Note: at this stage, job_{lock/unlock} and job lock guard macros
9
are *nop*.
10
11
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
12
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
13
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
14
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
15
Message-Id: <20220926093214.506243-9-eesposit@redhat.com>
16
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
17
---
18
blockdev.c | 67 +++++++++++++++++++++++++++++++++++++-----------------
19
job-qmp.c | 57 ++++++++++++++++++++++++++++++++--------------
20
2 files changed, 86 insertions(+), 38 deletions(-)
21
22
diff --git a/blockdev.c b/blockdev.c
23
index XXXXXXX..XXXXXXX 100644
24
--- a/blockdev.c
25
+++ b/blockdev.c
26
@@ -XXX,XX +XXX,XX @@ out:
27
aio_context_release(aio_context);
28
}
29
30
-/* Get a block job using its ID and acquire its AioContext */
31
-static BlockJob *find_block_job(const char *id, AioContext **aio_context,
32
- Error **errp)
33
+/*
34
+ * Get a block job using its ID and acquire its AioContext.
35
+ * Called with job_mutex held.
36
+ */
37
+static BlockJob *find_block_job_locked(const char *id,
38
+ AioContext **aio_context,
39
+ Error **errp)
40
{
41
BlockJob *job;
42
43
@@ -XXX,XX +XXX,XX @@ static BlockJob *find_block_job(const char *id, AioContext **aio_context,
44
45
*aio_context = NULL;
46
47
- job = block_job_get(id);
48
+ job = block_job_get_locked(id);
49
50
if (!job) {
51
error_set(errp, ERROR_CLASS_DEVICE_NOT_ACTIVE,
52
@@ -XXX,XX +XXX,XX @@ static BlockJob *find_block_job(const char *id, AioContext **aio_context,
53
void qmp_block_job_set_speed(const char *device, int64_t speed, Error **errp)
54
{
55
AioContext *aio_context;
56
- BlockJob *job = find_block_job(device, &aio_context, errp);
57
+ BlockJob *job;
58
+
59
+ JOB_LOCK_GUARD();
60
+ job = find_block_job_locked(device, &aio_context, errp);
61
62
if (!job) {
63
return;
64
}
65
66
- block_job_set_speed(job, speed, errp);
67
+ block_job_set_speed_locked(job, speed, errp);
68
aio_context_release(aio_context);
69
}
70
71
@@ -XXX,XX +XXX,XX @@ void qmp_block_job_cancel(const char *device,
72
bool has_force, bool force, Error **errp)
73
{
74
AioContext *aio_context;
75
- BlockJob *job = find_block_job(device, &aio_context, errp);
76
+ BlockJob *job;
77
+
78
+ JOB_LOCK_GUARD();
79
+ job = find_block_job_locked(device, &aio_context, errp);
80
81
if (!job) {
82
return;
83
@@ -XXX,XX +XXX,XX @@ void qmp_block_job_cancel(const char *device,
84
force = false;
85
}
86
87
- if (job_user_paused(&job->job) && !force) {
88
+ if (job_user_paused_locked(&job->job) && !force) {
89
error_setg(errp, "The block job for device '%s' is currently paused",
90
device);
91
goto out;
92
}
93
94
trace_qmp_block_job_cancel(job);
95
- job_user_cancel(&job->job, force, errp);
96
+ job_user_cancel_locked(&job->job, force, errp);
97
out:
98
aio_context_release(aio_context);
99
}
100
@@ -XXX,XX +XXX,XX @@ out:
101
void qmp_block_job_pause(const char *device, Error **errp)
102
{
103
AioContext *aio_context;
104
- BlockJob *job = find_block_job(device, &aio_context, errp);
105
+ BlockJob *job;
106
+
107
+ JOB_LOCK_GUARD();
108
+ job = find_block_job_locked(device, &aio_context, errp);
109
110
if (!job) {
111
return;
112
}
113
114
trace_qmp_block_job_pause(job);
115
- job_user_pause(&job->job, errp);
116
+ job_user_pause_locked(&job->job, errp);
117
aio_context_release(aio_context);
118
}
119
120
void qmp_block_job_resume(const char *device, Error **errp)
121
{
122
AioContext *aio_context;
123
- BlockJob *job = find_block_job(device, &aio_context, errp);
124
+ BlockJob *job;
125
+
126
+ JOB_LOCK_GUARD();
127
+ job = find_block_job_locked(device, &aio_context, errp);
128
129
if (!job) {
130
return;
131
}
132
133
trace_qmp_block_job_resume(job);
134
- job_user_resume(&job->job, errp);
135
+ job_user_resume_locked(&job->job, errp);
136
aio_context_release(aio_context);
137
}
138
139
void qmp_block_job_complete(const char *device, Error **errp)
140
{
141
AioContext *aio_context;
142
- BlockJob *job = find_block_job(device, &aio_context, errp);
143
+ BlockJob *job;
144
+
145
+ JOB_LOCK_GUARD();
146
+ job = find_block_job_locked(device, &aio_context, errp);
147
148
if (!job) {
149
return;
150
}
151
152
trace_qmp_block_job_complete(job);
153
- job_complete(&job->job, errp);
154
+ job_complete_locked(&job->job, errp);
155
aio_context_release(aio_context);
156
}
157
158
void qmp_block_job_finalize(const char *id, Error **errp)
159
{
160
AioContext *aio_context;
161
- BlockJob *job = find_block_job(id, &aio_context, errp);
162
+ BlockJob *job;
163
+
164
+ JOB_LOCK_GUARD();
165
+ job = find_block_job_locked(id, &aio_context, errp);
166
167
if (!job) {
168
return;
169
}
170
171
trace_qmp_block_job_finalize(job);
172
- job_ref(&job->job);
173
- job_finalize(&job->job, errp);
174
+ job_ref_locked(&job->job);
175
+ job_finalize_locked(&job->job, errp);
176
177
/*
178
* Job's context might have changed via job_finalize (and job_txn_apply
179
@@ -XXX,XX +XXX,XX @@ void qmp_block_job_finalize(const char *id, Error **errp)
180
* one.
181
*/
182
aio_context = block_job_get_aio_context(job);
183
- job_unref(&job->job);
184
+ job_unref_locked(&job->job);
185
aio_context_release(aio_context);
186
}
187
188
void qmp_block_job_dismiss(const char *id, Error **errp)
189
{
190
AioContext *aio_context;
191
- BlockJob *bjob = find_block_job(id, &aio_context, errp);
192
+ BlockJob *bjob;
193
Job *job;
194
195
+ JOB_LOCK_GUARD();
196
+ bjob = find_block_job_locked(id, &aio_context, errp);
197
+
198
if (!bjob) {
199
return;
200
}
201
202
trace_qmp_block_job_dismiss(bjob);
203
job = &bjob->job;
204
- job_dismiss(&job, errp);
205
+ job_dismiss_locked(&job, errp);
206
aio_context_release(aio_context);
207
}
208
209
diff --git a/job-qmp.c b/job-qmp.c
210
index XXXXXXX..XXXXXXX 100644
211
--- a/job-qmp.c
212
+++ b/job-qmp.c
213
@@ -XXX,XX +XXX,XX @@
214
#include "qapi/error.h"
215
#include "trace/trace-root.h"
216
217
-/* Get a job using its ID and acquire its AioContext */
218
-static Job *find_job(const char *id, AioContext **aio_context, Error **errp)
219
+/*
220
+ * Get a job using its ID and acquire its AioContext.
221
+ * Called with job_mutex held.
222
+ */
223
+static Job *find_job_locked(const char *id,
224
+ AioContext **aio_context,
225
+ Error **errp)
226
{
227
Job *job;
228
229
*aio_context = NULL;
230
231
- job = job_get(id);
232
+ job = job_get_locked(id);
233
if (!job) {
234
error_setg(errp, "Job not found");
235
return NULL;
236
@@ -XXX,XX +XXX,XX @@ static Job *find_job(const char *id, AioContext **aio_context, Error **errp)
237
void qmp_job_cancel(const char *id, Error **errp)
238
{
239
AioContext *aio_context;
240
- Job *job = find_job(id, &aio_context, errp);
241
+ Job *job;
242
+
243
+ JOB_LOCK_GUARD();
244
+ job = find_job_locked(id, &aio_context, errp);
245
246
if (!job) {
247
return;
248
}
249
250
trace_qmp_job_cancel(job);
251
- job_user_cancel(job, true, errp);
252
+ job_user_cancel_locked(job, true, errp);
253
aio_context_release(aio_context);
254
}
255
256
void qmp_job_pause(const char *id, Error **errp)
257
{
258
AioContext *aio_context;
259
- Job *job = find_job(id, &aio_context, errp);
260
+ Job *job;
261
+
262
+ JOB_LOCK_GUARD();
263
+ job = find_job_locked(id, &aio_context, errp);
264
265
if (!job) {
266
return;
267
}
268
269
trace_qmp_job_pause(job);
270
- job_user_pause(job, errp);
271
+ job_user_pause_locked(job, errp);
272
aio_context_release(aio_context);
273
}
274
275
void qmp_job_resume(const char *id, Error **errp)
276
{
277
AioContext *aio_context;
278
- Job *job = find_job(id, &aio_context, errp);
279
+ Job *job;
280
+
281
+ JOB_LOCK_GUARD();
282
+ job = find_job_locked(id, &aio_context, errp);
283
284
if (!job) {
285
return;
286
}
287
288
trace_qmp_job_resume(job);
289
- job_user_resume(job, errp);
290
+ job_user_resume_locked(job, errp);
291
aio_context_release(aio_context);
292
}
293
294
void qmp_job_complete(const char *id, Error **errp)
295
{
296
AioContext *aio_context;
297
- Job *job = find_job(id, &aio_context, errp);
298
+ Job *job;
299
+
300
+ JOB_LOCK_GUARD();
301
+ job = find_job_locked(id, &aio_context, errp);
302
303
if (!job) {
304
return;
305
}
306
307
trace_qmp_job_complete(job);
308
- job_complete(job, errp);
309
+ job_complete_locked(job, errp);
310
aio_context_release(aio_context);
311
}
312
313
void qmp_job_finalize(const char *id, Error **errp)
314
{
315
AioContext *aio_context;
316
- Job *job = find_job(id, &aio_context, errp);
317
+ Job *job;
318
+
319
+ JOB_LOCK_GUARD();
320
+ job = find_job_locked(id, &aio_context, errp);
321
322
if (!job) {
323
return;
324
}
325
326
trace_qmp_job_finalize(job);
327
- job_ref(job);
328
- job_finalize(job, errp);
329
+ job_ref_locked(job);
330
+ job_finalize_locked(job, errp);
331
332
/*
333
* Job's context might have changed via job_finalize (and job_txn_apply
334
@@ -XXX,XX +XXX,XX @@ void qmp_job_finalize(const char *id, Error **errp)
335
* one.
336
*/
337
aio_context = job->aio_context;
338
- job_unref(job);
339
+ job_unref_locked(job);
340
aio_context_release(aio_context);
341
}
342
343
void qmp_job_dismiss(const char *id, Error **errp)
344
{
345
AioContext *aio_context;
346
- Job *job = find_job(id, &aio_context, errp);
347
+ Job *job;
348
+
349
+ JOB_LOCK_GUARD();
350
+ job = find_job_locked(id, &aio_context, errp);
351
352
if (!job) {
353
return;
354
}
355
356
trace_qmp_job_dismiss(job);
357
- job_dismiss(&job, errp);
358
+ job_dismiss_locked(&job, errp);
359
aio_context_release(aio_context);
360
}
361
362
--
363
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
2
1
3
Add missing job synchronization in the unit tests, with
4
explicit locks.
5
6
We are deliberately using _locked functions wrapped by a guard
7
instead of a normal call because the normal call will be removed
8
in future, as the only usage is limited to the tests.
9
10
In other words, if a function like job_pause() is/will be only used
11
in tests to avoid:
12
13
WITH_JOB_LOCK_GUARD(){
14
job_pause_locked();
15
}
16
17
then it is not worth keeping job_pause(), and just use the guard.
18
19
Note: at this stage, job_{lock/unlock} and job lock guard macros
20
are *nop*.
21
22
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
23
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
24
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
25
Message-Id: <20220926093214.506243-10-eesposit@redhat.com>
26
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
27
---
28
tests/unit/test-bdrv-drain.c | 76 ++++++++++++--------
29
tests/unit/test-block-iothread.c | 8 ++-
30
tests/unit/test-blockjob-txn.c | 24 ++++---
31
tests/unit/test-blockjob.c | 115 +++++++++++++++++++------------
32
4 files changed, 140 insertions(+), 83 deletions(-)
33
34
diff --git a/tests/unit/test-bdrv-drain.c b/tests/unit/test-bdrv-drain.c
35
index XXXXXXX..XXXXXXX 100644
36
--- a/tests/unit/test-bdrv-drain.c
37
+++ b/tests/unit/test-bdrv-drain.c
38
@@ -XXX,XX +XXX,XX @@ static void test_blockjob_common_drain_node(enum drain_type drain_type,
39
}
40
}
41
42
- g_assert_cmpint(job->job.pause_count, ==, 0);
43
- g_assert_false(job->job.paused);
44
- g_assert_true(tjob->running);
45
- g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
46
+ WITH_JOB_LOCK_GUARD() {
47
+ g_assert_cmpint(job->job.pause_count, ==, 0);
48
+ g_assert_false(job->job.paused);
49
+ g_assert_true(tjob->running);
50
+ g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
51
+ }
52
53
do_drain_begin_unlocked(drain_type, drain_bs);
54
55
- if (drain_type == BDRV_DRAIN_ALL) {
56
- /* bdrv_drain_all() drains both src and target */
57
- g_assert_cmpint(job->job.pause_count, ==, 2);
58
- } else {
59
- g_assert_cmpint(job->job.pause_count, ==, 1);
60
+ WITH_JOB_LOCK_GUARD() {
61
+ if (drain_type == BDRV_DRAIN_ALL) {
62
+ /* bdrv_drain_all() drains both src and target */
63
+ g_assert_cmpint(job->job.pause_count, ==, 2);
64
+ } else {
65
+ g_assert_cmpint(job->job.pause_count, ==, 1);
66
+ }
67
+ g_assert_true(job->job.paused);
68
+ g_assert_false(job->job.busy); /* The job is paused */
69
}
70
- g_assert_true(job->job.paused);
71
- g_assert_false(job->job.busy); /* The job is paused */
72
73
do_drain_end_unlocked(drain_type, drain_bs);
74
75
if (use_iothread) {
76
- /* paused is reset in the I/O thread, wait for it */
77
+ /*
78
+ * Here we are waiting for the paused status to change,
79
+ * so don't bother protecting the read every time.
80
+ *
81
+ * paused is reset in the I/O thread, wait for it
82
+ */
83
while (job->job.paused) {
84
aio_poll(qemu_get_aio_context(), false);
85
}
86
}
87
88
- g_assert_cmpint(job->job.pause_count, ==, 0);
89
- g_assert_false(job->job.paused);
90
- g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
91
+ WITH_JOB_LOCK_GUARD() {
92
+ g_assert_cmpint(job->job.pause_count, ==, 0);
93
+ g_assert_false(job->job.paused);
94
+ g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
95
+ }
96
97
do_drain_begin_unlocked(drain_type, target);
98
99
- if (drain_type == BDRV_DRAIN_ALL) {
100
- /* bdrv_drain_all() drains both src and target */
101
- g_assert_cmpint(job->job.pause_count, ==, 2);
102
- } else {
103
- g_assert_cmpint(job->job.pause_count, ==, 1);
104
+ WITH_JOB_LOCK_GUARD() {
105
+ if (drain_type == BDRV_DRAIN_ALL) {
106
+ /* bdrv_drain_all() drains both src and target */
107
+ g_assert_cmpint(job->job.pause_count, ==, 2);
108
+ } else {
109
+ g_assert_cmpint(job->job.pause_count, ==, 1);
110
+ }
111
+ g_assert_true(job->job.paused);
112
+ g_assert_false(job->job.busy); /* The job is paused */
113
}
114
- g_assert_true(job->job.paused);
115
- g_assert_false(job->job.busy); /* The job is paused */
116
117
do_drain_end_unlocked(drain_type, target);
118
119
if (use_iothread) {
120
- /* paused is reset in the I/O thread, wait for it */
121
+ /*
122
+ * Here we are waiting for the paused status to change,
123
+ * so don't bother protecting the read every time.
124
+ *
125
+ * paused is reset in the I/O thread, wait for it
126
+ */
127
while (job->job.paused) {
128
aio_poll(qemu_get_aio_context(), false);
129
}
130
}
131
132
- g_assert_cmpint(job->job.pause_count, ==, 0);
133
- g_assert_false(job->job.paused);
134
- g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
135
+ WITH_JOB_LOCK_GUARD() {
136
+ g_assert_cmpint(job->job.pause_count, ==, 0);
137
+ g_assert_false(job->job.paused);
138
+ g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
139
+ }
140
141
aio_context_acquire(ctx);
142
- ret = job_complete_sync(&job->job, &error_abort);
143
+ WITH_JOB_LOCK_GUARD() {
144
+ ret = job_complete_sync_locked(&job->job, &error_abort);
145
+ }
146
g_assert_cmpint(ret, ==, (result == TEST_JOB_SUCCESS ? 0 : -EIO));
147
148
if (use_iothread) {
149
diff --git a/tests/unit/test-block-iothread.c b/tests/unit/test-block-iothread.c
150
index XXXXXXX..XXXXXXX 100644
151
--- a/tests/unit/test-block-iothread.c
152
+++ b/tests/unit/test-block-iothread.c
153
@@ -XXX,XX +XXX,XX @@ static void test_attach_blockjob(void)
154
}
155
156
aio_context_acquire(ctx);
157
- job_complete_sync(&tjob->common.job, &error_abort);
158
+ WITH_JOB_LOCK_GUARD() {
159
+ job_complete_sync_locked(&tjob->common.job, &error_abort);
160
+ }
161
blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
162
aio_context_release(ctx);
163
164
@@ -XXX,XX +XXX,XX @@ static void test_propagate_mirror(void)
165
BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT,
166
false, "filter_node", MIRROR_COPY_MODE_BACKGROUND,
167
&error_abort);
168
- job = job_get("job0");
169
+ WITH_JOB_LOCK_GUARD() {
170
+ job = job_get_locked("job0");
171
+ }
172
filter = bdrv_find_node("filter_node");
173
174
/* Change the AioContext of src */
175
diff --git a/tests/unit/test-blockjob-txn.c b/tests/unit/test-blockjob-txn.c
176
index XXXXXXX..XXXXXXX 100644
177
--- a/tests/unit/test-blockjob-txn.c
178
+++ b/tests/unit/test-blockjob-txn.c
179
@@ -XXX,XX +XXX,XX @@ static void test_single_job(int expected)
180
job = test_block_job_start(1, true, expected, &result, txn);
181
job_start(&job->job);
182
183
- if (expected == -ECANCELED) {
184
- job_cancel(&job->job, false);
185
+ WITH_JOB_LOCK_GUARD() {
186
+ if (expected == -ECANCELED) {
187
+ job_cancel_locked(&job->job, false);
188
+ }
189
}
190
191
while (result == -EINPROGRESS) {
192
@@ -XXX,XX +XXX,XX @@ static void test_pair_jobs(int expected1, int expected2)
193
/* Release our reference now to trigger as many nice
194
* use-after-free bugs as possible.
195
*/
196
- job_txn_unref(txn);
197
+ WITH_JOB_LOCK_GUARD() {
198
+ job_txn_unref_locked(txn);
199
200
- if (expected1 == -ECANCELED) {
201
- job_cancel(&job1->job, false);
202
- }
203
- if (expected2 == -ECANCELED) {
204
- job_cancel(&job2->job, false);
205
+ if (expected1 == -ECANCELED) {
206
+ job_cancel_locked(&job1->job, false);
207
+ }
208
+ if (expected2 == -ECANCELED) {
209
+ job_cancel_locked(&job2->job, false);
210
+ }
211
}
212
213
while (result1 == -EINPROGRESS || result2 == -EINPROGRESS) {
214
@@ -XXX,XX +XXX,XX @@ static void test_pair_jobs_fail_cancel_race(void)
215
job_start(&job1->job);
216
job_start(&job2->job);
217
218
- job_cancel(&job1->job, false);
219
+ WITH_JOB_LOCK_GUARD() {
220
+ job_cancel_locked(&job1->job, false);
221
+ }
222
223
/* Now make job2 finish before the main loop kicks jobs. This simulates
224
* the race between a pending kick and another job completing.
225
diff --git a/tests/unit/test-blockjob.c b/tests/unit/test-blockjob.c
226
index XXXXXXX..XXXXXXX 100644
227
--- a/tests/unit/test-blockjob.c
228
+++ b/tests/unit/test-blockjob.c
229
@@ -XXX,XX +XXX,XX @@ static CancelJob *create_common(Job **pjob)
230
bjob = mk_job(blk, "Steve", &test_cancel_driver, true,
231
JOB_MANUAL_FINALIZE | JOB_MANUAL_DISMISS);
232
job = &bjob->job;
233
- job_ref(job);
234
- assert(job->status == JOB_STATUS_CREATED);
235
+ WITH_JOB_LOCK_GUARD() {
236
+ job_ref_locked(job);
237
+ assert(job->status == JOB_STATUS_CREATED);
238
+ }
239
+
240
s = container_of(bjob, CancelJob, common);
241
s->blk = blk;
242
243
@@ -XXX,XX +XXX,XX @@ static void cancel_common(CancelJob *s)
244
aio_context_acquire(ctx);
245
246
job_cancel_sync(&job->job, true);
247
- if (sts != JOB_STATUS_CREATED && sts != JOB_STATUS_CONCLUDED) {
248
- Job *dummy = &job->job;
249
- job_dismiss(&dummy, &error_abort);
250
+ WITH_JOB_LOCK_GUARD() {
251
+ if (sts != JOB_STATUS_CREATED && sts != JOB_STATUS_CONCLUDED) {
252
+ Job *dummy = &job->job;
253
+ job_dismiss_locked(&dummy, &error_abort);
254
+ }
255
+ assert(job->job.status == JOB_STATUS_NULL);
256
+ job_unref_locked(&job->job);
257
}
258
- assert(job->job.status == JOB_STATUS_NULL);
259
- job_unref(&job->job);
260
destroy_blk(blk);
261
262
aio_context_release(ctx);
263
@@ -XXX,XX +XXX,XX @@ static void test_cancel_created(void)
264
cancel_common(s);
265
}
266
267
+static void assert_job_status_is(Job *job, int status)
268
+{
269
+ WITH_JOB_LOCK_GUARD() {
270
+ assert(job->status == status);
271
+ }
272
+}
273
+
274
static void test_cancel_running(void)
275
{
276
Job *job;
277
@@ -XXX,XX +XXX,XX @@ static void test_cancel_running(void)
278
s = create_common(&job);
279
280
job_start(job);
281
- assert(job->status == JOB_STATUS_RUNNING);
282
+ assert_job_status_is(job, JOB_STATUS_RUNNING);
283
284
cancel_common(s);
285
}
286
@@ -XXX,XX +XXX,XX @@ static void test_cancel_paused(void)
287
s = create_common(&job);
288
289
job_start(job);
290
- assert(job->status == JOB_STATUS_RUNNING);
291
-
292
- job_user_pause(job, &error_abort);
293
+ WITH_JOB_LOCK_GUARD() {
294
+ assert(job->status == JOB_STATUS_RUNNING);
295
+ job_user_pause_locked(job, &error_abort);
296
+ }
297
job_enter(job);
298
- assert(job->status == JOB_STATUS_PAUSED);
299
+ assert_job_status_is(job, JOB_STATUS_PAUSED);
300
301
cancel_common(s);
302
}
303
@@ -XXX,XX +XXX,XX @@ static void test_cancel_ready(void)
304
s = create_common(&job);
305
306
job_start(job);
307
- assert(job->status == JOB_STATUS_RUNNING);
308
+ assert_job_status_is(job, JOB_STATUS_RUNNING);
309
310
s->should_converge = true;
311
job_enter(job);
312
- assert(job->status == JOB_STATUS_READY);
313
+ assert_job_status_is(job, JOB_STATUS_READY);
314
315
cancel_common(s);
316
}
317
@@ -XXX,XX +XXX,XX @@ static void test_cancel_standby(void)
318
s = create_common(&job);
319
320
job_start(job);
321
- assert(job->status == JOB_STATUS_RUNNING);
322
+ assert_job_status_is(job, JOB_STATUS_RUNNING);
323
324
s->should_converge = true;
325
job_enter(job);
326
- assert(job->status == JOB_STATUS_READY);
327
-
328
- job_user_pause(job, &error_abort);
329
+ WITH_JOB_LOCK_GUARD() {
330
+ assert(job->status == JOB_STATUS_READY);
331
+ job_user_pause_locked(job, &error_abort);
332
+ }
333
job_enter(job);
334
- assert(job->status == JOB_STATUS_STANDBY);
335
+ assert_job_status_is(job, JOB_STATUS_STANDBY);
336
337
cancel_common(s);
338
}
339
@@ -XXX,XX +XXX,XX @@ static void test_cancel_pending(void)
340
s = create_common(&job);
341
342
job_start(job);
343
- assert(job->status == JOB_STATUS_RUNNING);
344
+ assert_job_status_is(job, JOB_STATUS_RUNNING);
345
346
s->should_converge = true;
347
job_enter(job);
348
- assert(job->status == JOB_STATUS_READY);
349
-
350
- job_complete(job, &error_abort);
351
+ WITH_JOB_LOCK_GUARD() {
352
+ assert(job->status == JOB_STATUS_READY);
353
+ job_complete_locked(job, &error_abort);
354
+ }
355
job_enter(job);
356
while (!job->deferred_to_main_loop) {
357
aio_poll(qemu_get_aio_context(), true);
358
}
359
- assert(job->status == JOB_STATUS_READY);
360
+ assert_job_status_is(job, JOB_STATUS_READY);
361
aio_poll(qemu_get_aio_context(), true);
362
- assert(job->status == JOB_STATUS_PENDING);
363
+ assert_job_status_is(job, JOB_STATUS_PENDING);
364
365
cancel_common(s);
366
}
367
@@ -XXX,XX +XXX,XX @@ static void test_cancel_concluded(void)
368
s = create_common(&job);
369
370
job_start(job);
371
- assert(job->status == JOB_STATUS_RUNNING);
372
+ assert_job_status_is(job, JOB_STATUS_RUNNING);
373
374
s->should_converge = true;
375
job_enter(job);
376
- assert(job->status == JOB_STATUS_READY);
377
-
378
- job_complete(job, &error_abort);
379
+ WITH_JOB_LOCK_GUARD() {
380
+ assert(job->status == JOB_STATUS_READY);
381
+ job_complete_locked(job, &error_abort);
382
+ }
383
job_enter(job);
384
while (!job->deferred_to_main_loop) {
385
aio_poll(qemu_get_aio_context(), true);
386
}
387
- assert(job->status == JOB_STATUS_READY);
388
+ assert_job_status_is(job, JOB_STATUS_READY);
389
aio_poll(qemu_get_aio_context(), true);
390
- assert(job->status == JOB_STATUS_PENDING);
391
+ assert_job_status_is(job, JOB_STATUS_PENDING);
392
393
aio_context_acquire(job->aio_context);
394
- job_finalize(job, &error_abort);
395
+ WITH_JOB_LOCK_GUARD() {
396
+ job_finalize_locked(job, &error_abort);
397
+ }
398
aio_context_release(job->aio_context);
399
- assert(job->status == JOB_STATUS_CONCLUDED);
400
+ assert_job_status_is(job, JOB_STATUS_CONCLUDED);
401
402
cancel_common(s);
403
}
404
@@ -XXX,XX +XXX,XX @@ static void test_complete_in_standby(void)
405
bjob = mk_job(blk, "job", &test_yielding_driver, true,
406
JOB_MANUAL_FINALIZE | JOB_MANUAL_DISMISS);
407
job = &bjob->job;
408
- assert(job->status == JOB_STATUS_CREATED);
409
+ assert_job_status_is(job, JOB_STATUS_CREATED);
410
411
/* Wait for the job to become READY */
412
job_start(job);
413
aio_context_acquire(ctx);
414
+ /*
415
+ * Here we are waiting for the status to change, so don't bother
416
+ * protecting the read every time.
417
+ */
418
AIO_WAIT_WHILE(ctx, job->status != JOB_STATUS_READY);
419
aio_context_release(ctx);
420
421
/* Begin the drained section, pausing the job */
422
bdrv_drain_all_begin();
423
- assert(job->status == JOB_STATUS_STANDBY);
424
+ assert_job_status_is(job, JOB_STATUS_STANDBY);
425
+
426
/* Lock the IO thread to prevent the job from being run */
427
aio_context_acquire(ctx);
428
/* This will schedule the job to resume it */
429
bdrv_drain_all_end();
430
431
- /* But the job cannot run, so it will remain on standby */
432
- assert(job->status == JOB_STATUS_STANDBY);
433
+ WITH_JOB_LOCK_GUARD() {
434
+ /* But the job cannot run, so it will remain on standby */
435
+ assert(job->status == JOB_STATUS_STANDBY);
436
437
- /* Even though the job is on standby, this should work */
438
- job_complete(job, &error_abort);
439
+ /* Even though the job is on standby, this should work */
440
+ job_complete_locked(job, &error_abort);
441
442
- /* The test is done now, clean up. */
443
- job_finish_sync(job, NULL, &error_abort);
444
- assert(job->status == JOB_STATUS_PENDING);
445
+ /* The test is done now, clean up. */
446
+ job_finish_sync_locked(job, NULL, &error_abort);
447
+ assert(job->status == JOB_STATUS_PENDING);
448
449
- job_finalize(job, &error_abort);
450
- assert(job->status == JOB_STATUS_CONCLUDED);
451
+ job_finalize_locked(job, &error_abort);
452
+ assert(job->status == JOB_STATUS_CONCLUDED);
453
454
- job_dismiss(&job, &error_abort);
455
+ job_dismiss_locked(&job, &error_abort);
456
+ }
457
458
destroy_blk(blk);
459
aio_context_release(ctx);
460
--
461
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
2
1
3
Once job lock is used and aiocontext is removed, mirror has
4
to perform job operations under the same critical section,
5
Note: at this stage, job_{lock/unlock} and job lock guard macros
6
are *nop*.
7
8
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
9
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
10
Message-Id: <20220926093214.506243-11-eesposit@redhat.com>
11
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
12
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
13
---
14
block/mirror.c | 13 +++++++++----
15
1 file changed, 9 insertions(+), 4 deletions(-)
16
17
diff --git a/block/mirror.c b/block/mirror.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/block/mirror.c
20
+++ b/block/mirror.c
21
@@ -XXX,XX +XXX,XX @@ static void mirror_complete(Job *job, Error **errp)
22
s->should_complete = true;
23
24
/* If the job is paused, it will be re-entered when it is resumed */
25
- if (!job->paused) {
26
- job_enter(job);
27
+ WITH_JOB_LOCK_GUARD() {
28
+ if (!job->paused) {
29
+ job_enter_cond_locked(job, NULL);
30
+ }
31
}
32
}
33
34
@@ -XXX,XX +XXX,XX @@ static bool mirror_drained_poll(BlockJob *job)
35
* from one of our own drain sections, to avoid a deadlock waiting for
36
* ourselves.
37
*/
38
- if (!s->common.job.paused && !job_is_cancelled(&job->job) && !s->in_drain) {
39
- return true;
40
+ WITH_JOB_LOCK_GUARD() {
41
+ if (!s->common.job.paused && !job_is_cancelled_locked(&job->job)
42
+ && !s->in_drain) {
43
+ return true;
44
+ }
45
}
46
47
return !!s->in_flight;
48
--
49
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
2
1
3
Now that the API offers also _locked() functions, take advantage
4
of it and give also the caller control to take the lock and call
5
_locked functions.
6
7
This makes sense especially when we have for loops, because it
8
makes no sense to have:
9
10
for(job = job_next(); ...)
11
12
where each job_next() takes the lock internally.
13
Instead we want
14
15
JOB_LOCK_GUARD();
16
for(job = job_next_locked(); ...)
17
18
In addition, protect also direct field accesses, by either creating a
19
new critical section or widening the existing ones.
20
21
Note: at this stage, job_{lock/unlock} and job lock guard macros
22
are *nop*.
23
24
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
25
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
26
Message-Id: <20220926093214.506243-12-eesposit@redhat.com>
27
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
28
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
29
---
30
block.c | 17 ++++++++++-------
31
blockdev.c | 14 ++++++++++----
32
blockjob.c | 35 ++++++++++++++++++++++-------------
33
job-qmp.c | 9 ++++++---
34
monitor/qmp-cmds.c | 7 +++++--
35
qemu-img.c | 15 ++++++++++-----
36
6 files changed, 63 insertions(+), 34 deletions(-)
37
38
diff --git a/block.c b/block.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/block.c
41
+++ b/block.c
42
@@ -XXX,XX +XXX,XX @@ static void bdrv_close(BlockDriverState *bs)
43
44
void bdrv_close_all(void)
45
{
46
- assert(job_next(NULL) == NULL);
47
GLOBAL_STATE_CODE();
48
+ assert(job_next(NULL) == NULL);
49
50
/* Drop references from requests still in flight, such as canceled block
51
* jobs whose AIO context has not been polled yet */
52
@@ -XXX,XX +XXX,XX @@ XDbgBlockGraph *bdrv_get_xdbg_block_graph(Error **errp)
53
}
54
}
55
56
- for (job = block_job_next(NULL); job; job = block_job_next(job)) {
57
- GSList *el;
58
+ WITH_JOB_LOCK_GUARD() {
59
+ for (job = block_job_next_locked(NULL); job;
60
+ job = block_job_next_locked(job)) {
61
+ GSList *el;
62
63
- xdbg_graph_add_node(gr, job, X_DBG_BLOCK_GRAPH_NODE_TYPE_BLOCK_JOB,
64
- job->job.id);
65
- for (el = job->nodes; el; el = el->next) {
66
- xdbg_graph_add_edge(gr, job, (BdrvChild *)el->data);
67
+ xdbg_graph_add_node(gr, job, X_DBG_BLOCK_GRAPH_NODE_TYPE_BLOCK_JOB,
68
+ job->job.id);
69
+ for (el = job->nodes; el; el = el->next) {
70
+ xdbg_graph_add_edge(gr, job, (BdrvChild *)el->data);
71
+ }
72
}
73
}
74
75
diff --git a/blockdev.c b/blockdev.c
76
index XXXXXXX..XXXXXXX 100644
77
--- a/blockdev.c
78
+++ b/blockdev.c
79
@@ -XXX,XX +XXX,XX @@ void blockdev_mark_auto_del(BlockBackend *blk)
80
return;
81
}
82
83
- for (job = block_job_next(NULL); job; job = block_job_next(job)) {
84
+ JOB_LOCK_GUARD();
85
+
86
+ for (job = block_job_next_locked(NULL); job;
87
+ job = block_job_next_locked(job)) {
88
if (block_job_has_bdrv(job, blk_bs(blk))) {
89
AioContext *aio_context = job->job.aio_context;
90
aio_context_acquire(aio_context);
91
92
- job_cancel(&job->job, false);
93
+ job_cancel_locked(&job->job, false);
94
95
aio_context_release(aio_context);
96
}
97
@@ -XXX,XX +XXX,XX @@ BlockJobInfoList *qmp_query_block_jobs(Error **errp)
98
BlockJobInfoList *head = NULL, **tail = &head;
99
BlockJob *job;
100
101
- for (job = block_job_next(NULL); job; job = block_job_next(job)) {
102
+ JOB_LOCK_GUARD();
103
+
104
+ for (job = block_job_next_locked(NULL); job;
105
+ job = block_job_next_locked(job)) {
106
BlockJobInfo *value;
107
AioContext *aio_context;
108
109
@@ -XXX,XX +XXX,XX @@ BlockJobInfoList *qmp_query_block_jobs(Error **errp)
110
}
111
aio_context = block_job_get_aio_context(job);
112
aio_context_acquire(aio_context);
113
- value = block_job_query(job, errp);
114
+ value = block_job_query_locked(job, errp);
115
aio_context_release(aio_context);
116
if (!value) {
117
qapi_free_BlockJobInfoList(head);
118
diff --git a/blockjob.c b/blockjob.c
119
index XXXXXXX..XXXXXXX 100644
120
--- a/blockjob.c
121
+++ b/blockjob.c
122
@@ -XXX,XX +XXX,XX @@ static bool child_job_drained_poll(BdrvChild *c)
123
/* An inactive or completed job doesn't have any pending requests. Jobs
124
* with !job->busy are either already paused or have a pause point after
125
* being reentered, so no job driver code will run before they pause. */
126
- if (!job->busy || job_is_completed(job)) {
127
- return false;
128
+ WITH_JOB_LOCK_GUARD() {
129
+ if (!job->busy || job_is_completed_locked(job)) {
130
+ return false;
131
+ }
132
}
133
134
/* Otherwise, assume that it isn't fully stopped yet, but allow the job to
135
@@ -XXX,XX +XXX,XX @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
136
job->ready_notifier.notify = block_job_event_ready;
137
job->idle_notifier.notify = block_job_on_idle;
138
139
- notifier_list_add(&job->job.on_finalize_cancelled,
140
- &job->finalize_cancelled_notifier);
141
- notifier_list_add(&job->job.on_finalize_completed,
142
- &job->finalize_completed_notifier);
143
- notifier_list_add(&job->job.on_pending, &job->pending_notifier);
144
- notifier_list_add(&job->job.on_ready, &job->ready_notifier);
145
- notifier_list_add(&job->job.on_idle, &job->idle_notifier);
146
+ WITH_JOB_LOCK_GUARD() {
147
+ notifier_list_add(&job->job.on_finalize_cancelled,
148
+ &job->finalize_cancelled_notifier);
149
+ notifier_list_add(&job->job.on_finalize_completed,
150
+ &job->finalize_completed_notifier);
151
+ notifier_list_add(&job->job.on_pending, &job->pending_notifier);
152
+ notifier_list_add(&job->job.on_ready, &job->ready_notifier);
153
+ notifier_list_add(&job->job.on_idle, &job->idle_notifier);
154
+ }
155
156
error_setg(&job->blocker, "block device is in use by block job: %s",
157
job_type_str(&job->job));
158
@@ -XXX,XX +XXX,XX @@ BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err,
159
action);
160
}
161
if (action == BLOCK_ERROR_ACTION_STOP) {
162
- if (!job->job.user_paused) {
163
- job_pause(&job->job);
164
- /* make the pause user visible, which will be resumed from QMP. */
165
- job->job.user_paused = true;
166
+ WITH_JOB_LOCK_GUARD() {
167
+ if (!job->job.user_paused) {
168
+ job_pause_locked(&job->job);
169
+ /*
170
+ * make the pause user visible, which will be
171
+ * resumed from QMP.
172
+ */
173
+ job->job.user_paused = true;
174
+ }
175
}
176
block_job_iostatus_set_err(job, error);
177
}
178
diff --git a/job-qmp.c b/job-qmp.c
179
index XXXXXXX..XXXXXXX 100644
180
--- a/job-qmp.c
181
+++ b/job-qmp.c
182
@@ -XXX,XX +XXX,XX @@ void qmp_job_dismiss(const char *id, Error **errp)
183
aio_context_release(aio_context);
184
}
185
186
-static JobInfo *job_query_single(Job *job, Error **errp)
187
+/* Called with job_mutex held. */
188
+static JobInfo *job_query_single_locked(Job *job, Error **errp)
189
{
190
JobInfo *info;
191
uint64_t progress_current;
192
@@ -XXX,XX +XXX,XX @@ JobInfoList *qmp_query_jobs(Error **errp)
193
JobInfoList *head = NULL, **tail = &head;
194
Job *job;
195
196
- for (job = job_next(NULL); job; job = job_next(job)) {
197
+ JOB_LOCK_GUARD();
198
+
199
+ for (job = job_next_locked(NULL); job; job = job_next_locked(job)) {
200
JobInfo *value;
201
AioContext *aio_context;
202
203
@@ -XXX,XX +XXX,XX @@ JobInfoList *qmp_query_jobs(Error **errp)
204
}
205
aio_context = job->aio_context;
206
aio_context_acquire(aio_context);
207
- value = job_query_single(job, errp);
208
+ value = job_query_single_locked(job, errp);
209
aio_context_release(aio_context);
210
if (!value) {
211
qapi_free_JobInfoList(head);
212
diff --git a/monitor/qmp-cmds.c b/monitor/qmp-cmds.c
213
index XXXXXXX..XXXXXXX 100644
214
--- a/monitor/qmp-cmds.c
215
+++ b/monitor/qmp-cmds.c
216
@@ -XXX,XX +XXX,XX @@ void qmp_cont(Error **errp)
217
blk_iostatus_reset(blk);
218
}
219
220
- for (job = block_job_next(NULL); job; job = block_job_next(job)) {
221
- block_job_iostatus_reset(job);
222
+ WITH_JOB_LOCK_GUARD() {
223
+ for (job = block_job_next_locked(NULL); job;
224
+ job = block_job_next_locked(job)) {
225
+ block_job_iostatus_reset_locked(job);
226
+ }
227
}
228
229
/* Continuing after completed migration. Images have been inactivated to
230
diff --git a/qemu-img.c b/qemu-img.c
231
index XXXXXXX..XXXXXXX 100644
232
--- a/qemu-img.c
233
+++ b/qemu-img.c
234
@@ -XXX,XX +XXX,XX @@ static void run_block_job(BlockJob *job, Error **errp)
235
int ret = 0;
236
237
aio_context_acquire(aio_context);
238
- job_ref(&job->job);
239
+ job_lock();
240
+ job_ref_locked(&job->job);
241
do {
242
float progress = 0.0f;
243
+ job_unlock();
244
aio_poll(aio_context, true);
245
246
progress_get_snapshot(&job->job.progress, &progress_current,
247
@@ -XXX,XX +XXX,XX @@ static void run_block_job(BlockJob *job, Error **errp)
248
progress = (float)progress_current / progress_total * 100.f;
249
}
250
qemu_progress_print(progress, 0);
251
- } while (!job_is_ready(&job->job) && !job_is_completed(&job->job));
252
+ job_lock();
253
+ } while (!job_is_ready_locked(&job->job) &&
254
+ !job_is_completed_locked(&job->job));
255
256
- if (!job_is_completed(&job->job)) {
257
- ret = job_complete_sync(&job->job, errp);
258
+ if (!job_is_completed_locked(&job->job)) {
259
+ ret = job_complete_sync_locked(&job->job, errp);
260
} else {
261
ret = job->job.ret;
262
}
263
- job_unref(&job->job);
264
+ job_unref_locked(&job->job);
265
+ job_unlock();
266
aio_context_release(aio_context);
267
268
/* publish completion progress only when success */
269
--
270
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
1
3
We want to make sure access of job->aio_context is always done
4
under either BQL or job_mutex. The problem is that using
5
aio_co_enter(job->aiocontext, job->co) in job_start and job_enter_cond
6
makes the coroutine immediately resume, so we can't hold the job lock.
7
And caching it is not safe either, as it might change.
8
9
job_start is under BQL, so it can freely read job->aiocontext, but
10
job_enter_cond is not.
11
We want to avoid reading job->aio_context in job_enter_cond, therefore:
12
1) use aio_co_wake(), since it doesn't want an aiocontext as argument
13
but uses job->co->ctx
14
2) detect possible discrepancy between job->co->ctx and job->aio_context
15
by checking right after the coroutine resumes back from yielding if
16
job->aio_context has changed. If so, reschedule the coroutine to the
17
new context.
18
19
Calling bdrv_try_set_aio_context() will issue the following calls
20
(simplified):
21
* in terms of bdrv callbacks:
22
.drained_begin -> .set_aio_context -> .drained_end
23
* in terms of child_job functions:
24
child_job_drained_begin -> child_job_set_aio_context -> child_job_drained_end
25
* in terms of job functions:
26
job_pause_locked -> job_set_aio_context -> job_resume_locked
27
28
We can see that after setting the new aio_context, job_resume_locked
29
calls again job_enter_cond, which then invokes aio_co_wake(). But
30
while job->aiocontext has been set in job_set_aio_context,
31
job->co->ctx has not changed, so the coroutine would be entering in
32
the wrong aiocontext.
33
34
Using aio_co_schedule in job_resume_locked() might seem as a valid
35
alternative, but the problem is that the bh resuming the coroutine
36
is not scheduled immediately, and if in the meanwhile another
37
bdrv_try_set_aio_context() is run (see test_propagate_mirror() in
38
test-block-iothread.c), we would have the first schedule in the
39
wrong aiocontext, and the second set of drains won't even manage
40
to schedule the coroutine, as job->busy would still be true from
41
the previous job_resume_locked().
42
43
The solution is to stick with aio_co_wake() and detect every time
44
the coroutine resumes back from yielding if job->aio_context
45
has changed. If so, we can reschedule it to the new context.
46
47
Check for the aiocontext change in job_do_yield_locked because:
48
1) aio_co_reschedule_self requires to be in the running coroutine
49
2) since child_job_set_aio_context allows changing the aiocontext only
50
while the job is paused, this is the exact place where the coroutine
51
resumes, before running JobDriver's code.
52
53
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
54
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
55
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
56
Message-Id: <20220926093214.506243-13-eesposit@redhat.com>
57
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
58
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
59
---
60
job.c | 19 +++++++++++++++++--
61
1 file changed, 17 insertions(+), 2 deletions(-)
62
63
diff --git a/job.c b/job.c
64
index XXXXXXX..XXXXXXX 100644
65
--- a/job.c
66
+++ b/job.c
67
@@ -XXX,XX +XXX,XX @@ void job_enter_cond_locked(Job *job, bool(*fn)(Job *job))
68
job->busy = true;
69
real_job_unlock();
70
job_unlock();
71
- aio_co_enter(job->aio_context, job->co);
72
+ aio_co_wake(job->co);
73
job_lock();
74
}
75
76
@@ -XXX,XX +XXX,XX @@ void job_enter(Job *job)
77
*/
78
static void coroutine_fn job_do_yield_locked(Job *job, uint64_t ns)
79
{
80
+ AioContext *next_aio_context;
81
+
82
real_job_lock();
83
if (ns != -1) {
84
timer_mod(&job->sleep_timer, ns);
85
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn job_do_yield_locked(Job *job, uint64_t ns)
86
qemu_coroutine_yield();
87
job_lock();
88
89
- /* Set by job_enter_cond() before re-entering the coroutine. */
90
+ next_aio_context = job->aio_context;
91
+ /*
92
+ * Coroutine has resumed, but in the meanwhile the job AioContext
93
+ * might have changed via bdrv_try_set_aio_context(), so we need to move
94
+ * the coroutine too in the new aiocontext.
95
+ */
96
+ while (qemu_get_current_aio_context() != next_aio_context) {
97
+ job_unlock();
98
+ aio_co_reschedule_self(next_aio_context);
99
+ job_lock();
100
+ next_aio_context = job->aio_context;
101
+ }
102
+
103
+ /* Set by job_enter_cond_locked() before re-entering the coroutine. */
104
assert(job->busy);
105
}
106
107
--
108
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
2
1
3
In order to make it thread safe, implement a "fake rwlock",
4
where we allow reads under BQL *or* job_mutex held, but
5
writes only under BQL *and* job_mutex.
6
7
The only write we have is in child_job_set_aio_ctx, which always
8
happens under drain (so the job is paused).
9
For this reason, introduce job_set_aio_context and make sure that
10
the context is set under BQL, job_mutex and drain.
11
Also make sure all other places where the aiocontext is read
12
are protected.
13
14
The reads in commit.c and mirror.c are actually safe, because always
15
done under BQL.
16
17
Note: at this stage, job_{lock/unlock} and job lock guard macros
18
are *nop*.
19
20
Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
21
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
22
Message-Id: <20220926093214.506243-14-eesposit@redhat.com>
23
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
24
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
25
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
26
---
27
include/qemu/job.h | 23 ++++++++++++++++++++---
28
block/replication.c | 1 +
29
blockjob.c | 3 ++-
30
job.c | 12 ++++++++++++
31
4 files changed, 35 insertions(+), 4 deletions(-)
32
33
diff --git a/include/qemu/job.h b/include/qemu/job.h
34
index XXXXXXX..XXXXXXX 100644
35
--- a/include/qemu/job.h
36
+++ b/include/qemu/job.h
37
@@ -XXX,XX +XXX,XX @@ typedef struct Job {
38
/* ProgressMeter API is thread-safe */
39
ProgressMeter progress;
40
41
+ /**
42
+ * AioContext to run the job coroutine in.
43
+ * The job Aiocontext can be read when holding *either*
44
+ * the BQL (so we are in the main loop) or the job_mutex.
45
+ * It can only be written when we hold *both* BQL
46
+ * and the job_mutex.
47
+ */
48
+ AioContext *aio_context;
49
50
- /** Protected by AioContext lock */
51
52
- /** AioContext to run the job coroutine in */
53
- AioContext *aio_context;
54
+ /** Protected by AioContext lock */
55
56
/** Reference count of the block job */
57
int refcnt;
58
@@ -XXX,XX +XXX,XX @@ int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp),
59
int job_finish_sync_locked(Job *job, void (*finish)(Job *, Error **errp),
60
Error **errp);
61
62
+/**
63
+ * Sets the @job->aio_context.
64
+ * Called with job_mutex *not* held.
65
+ *
66
+ * This function must run in the main thread to protect against
67
+ * concurrent read in job_finish_sync_locked(), takes the job_mutex
68
+ * lock to protect against the read in job_do_yield_locked(), and must
69
+ * be called when the job is quiescent.
70
+ */
71
+void job_set_aio_context(Job *job, AioContext *ctx);
72
+
73
#endif
74
diff --git a/block/replication.c b/block/replication.c
75
index XXXXXXX..XXXXXXX 100644
76
--- a/block/replication.c
77
+++ b/block/replication.c
78
@@ -XXX,XX +XXX,XX @@ static void replication_close(BlockDriverState *bs)
79
{
80
BDRVReplicationState *s = bs->opaque;
81
Job *commit_job;
82
+ GLOBAL_STATE_CODE();
83
84
if (s->stage == BLOCK_REPLICATION_RUNNING) {
85
replication_stop(s->rs, false, NULL);
86
diff --git a/blockjob.c b/blockjob.c
87
index XXXXXXX..XXXXXXX 100644
88
--- a/blockjob.c
89
+++ b/blockjob.c
90
@@ -XXX,XX +XXX,XX @@ static void child_job_set_aio_ctx(BdrvChild *c, AioContext *ctx,
91
bdrv_set_aio_context_ignore(sibling->bs, ctx, ignore);
92
}
93
94
- job->job.aio_context = ctx;
95
+ job_set_aio_context(&job->job, ctx);
96
}
97
98
static AioContext *child_job_get_parent_aio_context(BdrvChild *c)
99
{
100
BlockJob *job = c->opaque;
101
+ GLOBAL_STATE_CODE();
102
103
return job->job.aio_context;
104
}
105
diff --git a/job.c b/job.c
106
index XXXXXXX..XXXXXXX 100644
107
--- a/job.c
108
+++ b/job.c
109
@@ -XXX,XX +XXX,XX @@ Job *job_get(const char *id)
110
return job_get_locked(id);
111
}
112
113
+void job_set_aio_context(Job *job, AioContext *ctx)
114
+{
115
+ /* protect against read in job_finish_sync_locked and job_start */
116
+ GLOBAL_STATE_CODE();
117
+ /* protect against read in job_do_yield_locked */
118
+ JOB_LOCK_GUARD();
119
+ /* ensure the job is quiescent while the AioContext is changed */
120
+ assert(job->paused || job_is_completed_locked(job));
121
+ job->aio_context = ctx;
122
+}
123
+
124
/* Called with job_mutex *not* held. */
125
static void job_sleep_timer_cb(void *opaque)
126
{
127
@@ -XXX,XX +XXX,XX @@ int job_finish_sync_locked(Job *job,
128
{
129
Error *local_err = NULL;
130
int ret;
131
+ GLOBAL_STATE_CODE();
132
133
job_ref_locked(job);
134
135
--
136
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
2
1
3
The same job lock is being used also to protect some of blockjob fields.
4
Categorize them just as done in job.h.
5
6
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
7
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
8
Message-Id: <20220926093214.506243-15-eesposit@redhat.com>
9
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
10
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
11
---
12
include/block/blockjob.h | 32 ++++++++++++++++++++++++++------
13
1 file changed, 26 insertions(+), 6 deletions(-)
14
15
diff --git a/include/block/blockjob.h b/include/block/blockjob.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/include/block/blockjob.h
18
+++ b/include/block/blockjob.h
19
@@ -XXX,XX +XXX,XX @@ typedef struct BlockJobDriver BlockJobDriver;
20
* Long-running operation on a BlockDriverState.
21
*/
22
typedef struct BlockJob {
23
- /** Data belonging to the generic Job infrastructure */
24
+ /**
25
+ * Data belonging to the generic Job infrastructure.
26
+ * Protected by job mutex.
27
+ */
28
Job job;
29
30
- /** Status that is published by the query-block-jobs QMP API */
31
+ /**
32
+ * Status that is published by the query-block-jobs QMP API.
33
+ * Protected by job mutex.
34
+ */
35
BlockDeviceIoStatus iostatus;
36
37
- /** Speed that was set with @block_job_set_speed. */
38
+ /**
39
+ * Speed that was set with @block_job_set_speed.
40
+ * Always modified and read under QEMU global mutex (GLOBAL_STATE_CODE).
41
+ */
42
int64_t speed;
43
44
- /** Rate limiting data structure for implementing @speed. */
45
+ /**
46
+ * Rate limiting data structure for implementing @speed.
47
+ * RateLimit API is thread-safe.
48
+ */
49
RateLimit limit;
50
51
- /** Block other operations when block job is running */
52
+ /**
53
+ * Block other operations when block job is running.
54
+ * Always modified and read under QEMU global mutex (GLOBAL_STATE_CODE).
55
+ */
56
Error *blocker;
57
58
+ /** All notifiers are set once in block_job_create() and never modified. */
59
+
60
/** Called when a cancelled job is finalised. */
61
Notifier finalize_cancelled_notifier;
62
63
@@ -XXX,XX +XXX,XX @@ typedef struct BlockJob {
64
/** Called when the job coroutine yields or terminates */
65
Notifier idle_notifier;
66
67
- /** BlockDriverStates that are involved in this block job */
68
+ /**
69
+ * BlockDriverStates that are involved in this block job.
70
+ * Always modified and read under QEMU global mutex (GLOBAL_STATE_CODE).
71
+ */
72
GSList *nodes;
73
} BlockJob;
74
75
--
76
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
2
1
3
They all are called with job_lock held, in job_event_*_locked()
4
5
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
6
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
7
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
8
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
9
Message-Id: <20220926093214.506243-16-eesposit@redhat.com>
10
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
11
---
12
blockjob.c | 25 +++++++++++++++----------
13
1 file changed, 15 insertions(+), 10 deletions(-)
14
15
diff --git a/blockjob.c b/blockjob.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/blockjob.c
18
+++ b/blockjob.c
19
@@ -XXX,XX +XXX,XX @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
20
return 0;
21
}
22
23
-static void block_job_on_idle(Notifier *n, void *opaque)
24
+/* Called with job_mutex lock held. */
25
+static void block_job_on_idle_locked(Notifier *n, void *opaque)
26
{
27
aio_wait_kick();
28
}
29
@@ -XXX,XX +XXX,XX @@ static void block_job_iostatus_set_err(BlockJob *job, int error)
30
}
31
}
32
33
-static void block_job_event_cancelled(Notifier *n, void *opaque)
34
+/* Called with job_mutex lock held. */
35
+static void block_job_event_cancelled_locked(Notifier *n, void *opaque)
36
{
37
BlockJob *job = opaque;
38
uint64_t progress_current, progress_total;
39
@@ -XXX,XX +XXX,XX @@ static void block_job_event_cancelled(Notifier *n, void *opaque)
40
job->speed);
41
}
42
43
-static void block_job_event_completed(Notifier *n, void *opaque)
44
+/* Called with job_mutex lock held. */
45
+static void block_job_event_completed_locked(Notifier *n, void *opaque)
46
{
47
BlockJob *job = opaque;
48
const char *msg = NULL;
49
@@ -XXX,XX +XXX,XX @@ static void block_job_event_completed(Notifier *n, void *opaque)
50
msg);
51
}
52
53
-static void block_job_event_pending(Notifier *n, void *opaque)
54
+/* Called with job_mutex lock held. */
55
+static void block_job_event_pending_locked(Notifier *n, void *opaque)
56
{
57
BlockJob *job = opaque;
58
59
@@ -XXX,XX +XXX,XX @@ static void block_job_event_pending(Notifier *n, void *opaque)
60
job->job.id);
61
}
62
63
-static void block_job_event_ready(Notifier *n, void *opaque)
64
+/* Called with job_mutex lock held. */
65
+static void block_job_event_ready_locked(Notifier *n, void *opaque)
66
{
67
BlockJob *job = opaque;
68
uint64_t progress_current, progress_total;
69
@@ -XXX,XX +XXX,XX @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
70
71
ratelimit_init(&job->limit);
72
73
- job->finalize_cancelled_notifier.notify = block_job_event_cancelled;
74
- job->finalize_completed_notifier.notify = block_job_event_completed;
75
- job->pending_notifier.notify = block_job_event_pending;
76
- job->ready_notifier.notify = block_job_event_ready;
77
- job->idle_notifier.notify = block_job_on_idle;
78
+ job->finalize_cancelled_notifier.notify = block_job_event_cancelled_locked;
79
+ job->finalize_completed_notifier.notify = block_job_event_completed_locked;
80
+ job->pending_notifier.notify = block_job_event_pending_locked;
81
+ job->ready_notifier.notify = block_job_event_ready_locked;
82
+ job->idle_notifier.notify = block_job_on_idle_locked;
83
84
WITH_JOB_LOCK_GUARD() {
85
notifier_list_add(&job->job.on_finalize_cancelled,
86
--
87
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
2
1
3
iostatus is the only field (together with .job) that needs
4
protection using the job mutex.
5
6
It is set in the main loop (GLOBAL_STATE functions) but read
7
in I/O code (block_job_error_action).
8
9
In order to protect it, change block_job_iostatus_set_err
10
to block_job_iostatus_set_err_locked(), always called under
11
job lock.
12
13
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
14
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
15
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
16
Message-Id: <20220926093214.506243-17-eesposit@redhat.com>
17
[kwolf: Fixed up type of iostatus]
18
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
19
---
20
block/mirror.c | 6 +++++-
21
blockjob.c | 5 +++--
22
2 files changed, 8 insertions(+), 3 deletions(-)
23
24
diff --git a/block/mirror.c b/block/mirror.c
25
index XXXXXXX..XXXXXXX 100644
26
--- a/block/mirror.c
27
+++ b/block/mirror.c
28
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn mirror_run(Job *job, Error **errp)
29
BlockDriverState *bs = s->mirror_top_bs->backing->bs;
30
BlockDriverState *target_bs = blk_bs(s->target);
31
bool need_drain = true;
32
+ BlockDeviceIoStatus iostatus;
33
int64_t length;
34
int64_t target_length;
35
BlockDriverInfo bdi;
36
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn mirror_run(Job *job, Error **errp)
37
* We do so every BLKOCK_JOB_SLICE_TIME nanoseconds, or when there is
38
* an error, or when the source is clean, whichever comes first. */
39
delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns;
40
+ WITH_JOB_LOCK_GUARD() {
41
+ iostatus = s->common.iostatus;
42
+ }
43
if (delta < BLOCK_JOB_SLICE_TIME &&
44
- s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
45
+ iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
46
if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 ||
47
(cnt == 0 && s->in_flight > 0)) {
48
trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight);
49
diff --git a/blockjob.c b/blockjob.c
50
index XXXXXXX..XXXXXXX 100644
51
--- a/blockjob.c
52
+++ b/blockjob.c
53
@@ -XXX,XX +XXX,XX @@ BlockJobInfo *block_job_query(BlockJob *job, Error **errp)
54
return block_job_query_locked(job, errp);
55
}
56
57
-static void block_job_iostatus_set_err(BlockJob *job, int error)
58
+/* Called with job lock held */
59
+static void block_job_iostatus_set_err_locked(BlockJob *job, int error)
60
{
61
if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
62
job->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
63
@@ -XXX,XX +XXX,XX @@ BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err,
64
*/
65
job->job.user_paused = true;
66
}
67
+ block_job_iostatus_set_err_locked(job, error);
68
}
69
- block_job_iostatus_set_err(job, error);
70
}
71
return action;
72
}
73
--
74
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
2
1
3
Some callbacks implementation use bdrv_* APIs that assume the
4
AioContext lock is held. Make sure this invariant is documented.
5
6
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
7
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
8
Message-Id: <20220926093214.506243-18-eesposit@redhat.com>
9
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
10
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
11
---
12
include/qemu/job.h | 27 +++++++++++++++++++++++++--
13
1 file changed, 25 insertions(+), 2 deletions(-)
14
15
diff --git a/include/qemu/job.h b/include/qemu/job.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/include/qemu/job.h
18
+++ b/include/qemu/job.h
19
@@ -XXX,XX +XXX,XX @@ typedef struct Job {
20
/** True if this job should automatically dismiss itself */
21
bool auto_dismiss;
22
23
- /** The completion function that will be called when the job completes. */
24
+ /**
25
+ * The completion function that will be called when the job completes.
26
+ * Called with AioContext lock held, since many callback implementations
27
+ * use bdrv_* functions that require to hold the lock.
28
+ */
29
BlockCompletionFunc *cb;
30
31
/** The opaque value that is passed to the completion function. */
32
@@ -XXX,XX +XXX,XX @@ struct JobDriver {
33
*
34
* This callback will not be invoked if the job has already failed.
35
* If it fails, abort and then clean will be called.
36
+ *
37
+ * Called with AioContext lock held, since many callbacs implementations
38
+ * use bdrv_* functions that require to hold the lock.
39
*/
40
int (*prepare)(Job *job);
41
42
@@ -XXX,XX +XXX,XX @@ struct JobDriver {
43
*
44
* All jobs will complete with a call to either .commit() or .abort() but
45
* never both.
46
+ *
47
+ * Called with AioContext lock held, since many callback implementations
48
+ * use bdrv_* functions that require to hold the lock.
49
*/
50
void (*commit)(Job *job);
51
52
@@ -XXX,XX +XXX,XX @@ struct JobDriver {
53
*
54
* All jobs will complete with a call to either .commit() or .abort() but
55
* never both.
56
+ *
57
+ * Called with AioContext lock held, since many callback implementations
58
+ * use bdrv_* functions that require to hold the lock.
59
*/
60
void (*abort)(Job *job);
61
62
@@ -XXX,XX +XXX,XX @@ struct JobDriver {
63
* .commit() or .abort(). Regardless of which callback is invoked after
64
* completion, .clean() will always be called, even if the job does not
65
* belong to a transaction group.
66
+ *
67
+ * Called with AioContext lock held, since many callbacs implementations
68
+ * use bdrv_* functions that require to hold the lock.
69
*/
70
void (*clean)(Job *job);
71
72
@@ -XXX,XX +XXX,XX @@ struct JobDriver {
73
* READY).
74
* (If the callback is NULL, the job is assumed to terminate
75
* without I/O.)
76
+ *
77
+ * Called with AioContext lock held, since many callback implementations
78
+ * use bdrv_* functions that require to hold the lock.
79
*/
80
bool (*cancel)(Job *job, bool force);
81
82
83
- /** Called when the job is freed */
84
+ /**
85
+ * Called when the job is freed.
86
+ * Called with AioContext lock held, since many callback implementations
87
+ * use bdrv_* functions that require to hold the lock.
88
+ */
89
void (*free)(Job *job);
90
};
91
92
--
93
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
2
1
3
Change the job_{lock/unlock} and macros to use job_mutex.
4
5
Now that they are not nop anymore, remove the aiocontext
6
to avoid deadlocks.
7
8
Therefore:
9
- when possible, remove completely the aiocontext lock/unlock pair
10
- if it is used by some other function too, reduce the locking
11
section as much as possible, leaving the job API outside.
12
- change AIO_WAIT_WHILE in AIO_WAIT_WHILE_UNLOCKED, since we
13
are not using the aiocontext lock anymore
14
15
The only functions that still need the aiocontext lock are:
16
- the JobDriver callbacks, already documented in job.h
17
- job_cancel_sync() in replication.c is called with aio_context_lock
18
taken, but now job is using AIO_WAIT_WHILE_UNLOCKED so we need to
19
release the lock.
20
21
Reduce the locking section to only cover the callback invocation
22
and document the functions that take the AioContext lock,
23
to avoid taking it twice.
24
25
Also remove real_job_{lock/unlock}, as they are replaced by the
26
public functions.
27
28
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
29
Message-Id: <20220926093214.506243-19-eesposit@redhat.com>
30
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
31
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
32
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
33
---
34
include/qemu/job.h | 17 ++---
35
block/replication.c | 2 +
36
blockdev.c | 72 +++-----------------
37
job-qmp.c | 46 +++----------
38
job.c | 111 +++++++++----------------------
39
qemu-img.c | 2 -
40
tests/unit/test-bdrv-drain.c | 4 +-
41
tests/unit/test-block-iothread.c | 2 +-
42
tests/unit/test-blockjob.c | 19 +++---
43
9 files changed, 72 insertions(+), 203 deletions(-)
44
45
diff --git a/include/qemu/job.h b/include/qemu/job.h
46
index XXXXXXX..XXXXXXX 100644
47
--- a/include/qemu/job.h
48
+++ b/include/qemu/job.h
49
@@ -XXX,XX +XXX,XX @@ typedef struct Job {
50
AioContext *aio_context;
51
52
53
- /** Protected by AioContext lock */
54
+ /** Protected by job_mutex */
55
56
/** Reference count of the block job */
57
int refcnt;
58
@@ -XXX,XX +XXX,XX @@ typedef struct Job {
59
/**
60
* Set to false by the job while the coroutine has yielded and may be
61
* re-entered by job_enter(). There may still be I/O or event loop activity
62
- * pending. Accessed under block_job_mutex (in blockjob.c).
63
+ * pending. Accessed under job_mutex.
64
*
65
* When the job is deferred to the main loop, busy is true as long as the
66
* bottom half is still pending.
67
@@ -XXX,XX +XXX,XX @@ typedef enum JobCreateFlags {
68
69
extern QemuMutex job_mutex;
70
71
-#define JOB_LOCK_GUARD() /* QEMU_LOCK_GUARD(&job_mutex) */
72
+#define JOB_LOCK_GUARD() QEMU_LOCK_GUARD(&job_mutex)
73
74
-#define WITH_JOB_LOCK_GUARD() /* WITH_QEMU_LOCK_GUARD(&job_mutex) */
75
+#define WITH_JOB_LOCK_GUARD() WITH_QEMU_LOCK_GUARD(&job_mutex)
76
77
/**
78
* job_lock:
79
@@ -XXX,XX +XXX,XX @@ void job_ref_locked(Job *job);
80
/**
81
* Release a reference that was previously acquired with job_ref() or
82
* job_create(). If it's the last reference to the object, it will be freed.
83
+ *
84
+ * Takes AioContext lock internally to invoke a job->driver callback.
85
*/
86
void job_unref(Job *job);
87
88
@@ -XXX,XX +XXX,XX @@ void job_user_cancel_locked(Job *job, bool force, Error **errp);
89
* Returns the return value from the job if the job actually completed
90
* during the call, or -ECANCELED if it was canceled.
91
*
92
- * Callers must hold the AioContext lock of job->aio_context.
93
+ * Called with job_lock *not* held.
94
*/
95
int job_cancel_sync(Job *job, bool force);
96
97
@@ -XXX,XX +XXX,XX @@ void job_cancel_sync_all(void);
98
* function).
99
*
100
* Returns the return value from the job.
101
- *
102
- * Callers must hold the AioContext lock of job->aio_context.
103
+ * Called with job_lock *not* held.
104
*/
105
int job_complete_sync(Job *job, Error **errp);
106
107
@@ -XXX,XX +XXX,XX @@ void job_dismiss_locked(Job **job, Error **errp);
108
* Returns 0 if the job is successfully completed, -ECANCELED if the job was
109
* cancelled before completing, and -errno in other error cases.
110
*
111
- * Callers must hold the AioContext lock of job->aio_context.
112
+ * Called with job_lock *not* held.
113
*/
114
int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp),
115
Error **errp);
116
diff --git a/block/replication.c b/block/replication.c
117
index XXXXXXX..XXXXXXX 100644
118
--- a/block/replication.c
119
+++ b/block/replication.c
120
@@ -XXX,XX +XXX,XX @@ static void replication_stop(ReplicationState *rs, bool failover, Error **errp)
121
* disk, secondary disk in backup_job_completed().
122
*/
123
if (s->backup_job) {
124
+ aio_context_release(aio_context);
125
job_cancel_sync(&s->backup_job->job, true);
126
+ aio_context_acquire(aio_context);
127
}
128
129
if (!failover) {
130
diff --git a/blockdev.c b/blockdev.c
131
index XXXXXXX..XXXXXXX 100644
132
--- a/blockdev.c
133
+++ b/blockdev.c
134
@@ -XXX,XX +XXX,XX @@ void blockdev_mark_auto_del(BlockBackend *blk)
135
for (job = block_job_next_locked(NULL); job;
136
job = block_job_next_locked(job)) {
137
if (block_job_has_bdrv(job, blk_bs(blk))) {
138
- AioContext *aio_context = job->job.aio_context;
139
- aio_context_acquire(aio_context);
140
-
141
job_cancel_locked(&job->job, false);
142
-
143
- aio_context_release(aio_context);
144
}
145
}
146
147
@@ -XXX,XX +XXX,XX @@ static void drive_backup_abort(BlkActionState *common)
148
DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common);
149
150
if (state->job) {
151
- AioContext *aio_context;
152
-
153
- aio_context = bdrv_get_aio_context(state->bs);
154
- aio_context_acquire(aio_context);
155
-
156
job_cancel_sync(&state->job->job, true);
157
-
158
- aio_context_release(aio_context);
159
}
160
}
161
162
@@ -XXX,XX +XXX,XX @@ static void blockdev_backup_abort(BlkActionState *common)
163
BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common);
164
165
if (state->job) {
166
- AioContext *aio_context;
167
-
168
- aio_context = bdrv_get_aio_context(state->bs);
169
- aio_context_acquire(aio_context);
170
-
171
job_cancel_sync(&state->job->job, true);
172
-
173
- aio_context_release(aio_context);
174
}
175
}
176
177
@@ -XXX,XX +XXX,XX @@ out:
178
}
179
180
/*
181
- * Get a block job using its ID and acquire its AioContext.
182
- * Called with job_mutex held.
183
+ * Get a block job using its ID. Called with job_mutex held.
184
*/
185
-static BlockJob *find_block_job_locked(const char *id,
186
- AioContext **aio_context,
187
- Error **errp)
188
+static BlockJob *find_block_job_locked(const char *id, Error **errp)
189
{
190
BlockJob *job;
191
192
assert(id != NULL);
193
194
- *aio_context = NULL;
195
-
196
job = block_job_get_locked(id);
197
198
if (!job) {
199
@@ -XXX,XX +XXX,XX @@ static BlockJob *find_block_job_locked(const char *id,
200
return NULL;
201
}
202
203
- *aio_context = block_job_get_aio_context(job);
204
- aio_context_acquire(*aio_context);
205
-
206
return job;
207
}
208
209
void qmp_block_job_set_speed(const char *device, int64_t speed, Error **errp)
210
{
211
- AioContext *aio_context;
212
BlockJob *job;
213
214
JOB_LOCK_GUARD();
215
- job = find_block_job_locked(device, &aio_context, errp);
216
+ job = find_block_job_locked(device, errp);
217
218
if (!job) {
219
return;
220
}
221
222
block_job_set_speed_locked(job, speed, errp);
223
- aio_context_release(aio_context);
224
}
225
226
void qmp_block_job_cancel(const char *device,
227
bool has_force, bool force, Error **errp)
228
{
229
- AioContext *aio_context;
230
BlockJob *job;
231
232
JOB_LOCK_GUARD();
233
- job = find_block_job_locked(device, &aio_context, errp);
234
+ job = find_block_job_locked(device, errp);
235
236
if (!job) {
237
return;
238
@@ -XXX,XX +XXX,XX @@ void qmp_block_job_cancel(const char *device,
239
if (job_user_paused_locked(&job->job) && !force) {
240
error_setg(errp, "The block job for device '%s' is currently paused",
241
device);
242
- goto out;
243
+ return;
244
}
245
246
trace_qmp_block_job_cancel(job);
247
job_user_cancel_locked(&job->job, force, errp);
248
-out:
249
- aio_context_release(aio_context);
250
}
251
252
void qmp_block_job_pause(const char *device, Error **errp)
253
{
254
- AioContext *aio_context;
255
BlockJob *job;
256
257
JOB_LOCK_GUARD();
258
- job = find_block_job_locked(device, &aio_context, errp);
259
+ job = find_block_job_locked(device, errp);
260
261
if (!job) {
262
return;
263
@@ -XXX,XX +XXX,XX @@ void qmp_block_job_pause(const char *device, Error **errp)
264
265
trace_qmp_block_job_pause(job);
266
job_user_pause_locked(&job->job, errp);
267
- aio_context_release(aio_context);
268
}
269
270
void qmp_block_job_resume(const char *device, Error **errp)
271
{
272
- AioContext *aio_context;
273
BlockJob *job;
274
275
JOB_LOCK_GUARD();
276
- job = find_block_job_locked(device, &aio_context, errp);
277
+ job = find_block_job_locked(device, errp);
278
279
if (!job) {
280
return;
281
@@ -XXX,XX +XXX,XX @@ void qmp_block_job_resume(const char *device, Error **errp)
282
283
trace_qmp_block_job_resume(job);
284
job_user_resume_locked(&job->job, errp);
285
- aio_context_release(aio_context);
286
}
287
288
void qmp_block_job_complete(const char *device, Error **errp)
289
{
290
- AioContext *aio_context;
291
BlockJob *job;
292
293
JOB_LOCK_GUARD();
294
- job = find_block_job_locked(device, &aio_context, errp);
295
+ job = find_block_job_locked(device, errp);
296
297
if (!job) {
298
return;
299
@@ -XXX,XX +XXX,XX @@ void qmp_block_job_complete(const char *device, Error **errp)
300
301
trace_qmp_block_job_complete(job);
302
job_complete_locked(&job->job, errp);
303
- aio_context_release(aio_context);
304
}
305
306
void qmp_block_job_finalize(const char *id, Error **errp)
307
{
308
- AioContext *aio_context;
309
BlockJob *job;
310
311
JOB_LOCK_GUARD();
312
- job = find_block_job_locked(id, &aio_context, errp);
313
+ job = find_block_job_locked(id, errp);
314
315
if (!job) {
316
return;
317
@@ -XXX,XX +XXX,XX @@ void qmp_block_job_finalize(const char *id, Error **errp)
318
job_ref_locked(&job->job);
319
job_finalize_locked(&job->job, errp);
320
321
- /*
322
- * Job's context might have changed via job_finalize (and job_txn_apply
323
- * automatically acquires the new one), so make sure we release the correct
324
- * one.
325
- */
326
- aio_context = block_job_get_aio_context(job);
327
job_unref_locked(&job->job);
328
- aio_context_release(aio_context);
329
}
330
331
void qmp_block_job_dismiss(const char *id, Error **errp)
332
{
333
- AioContext *aio_context;
334
BlockJob *bjob;
335
Job *job;
336
337
JOB_LOCK_GUARD();
338
- bjob = find_block_job_locked(id, &aio_context, errp);
339
+ bjob = find_block_job_locked(id, errp);
340
341
if (!bjob) {
342
return;
343
@@ -XXX,XX +XXX,XX @@ void qmp_block_job_dismiss(const char *id, Error **errp)
344
trace_qmp_block_job_dismiss(bjob);
345
job = &bjob->job;
346
job_dismiss_locked(&job, errp);
347
- aio_context_release(aio_context);
348
}
349
350
void qmp_change_backing_file(const char *device,
351
@@ -XXX,XX +XXX,XX @@ BlockJobInfoList *qmp_query_block_jobs(Error **errp)
352
for (job = block_job_next_locked(NULL); job;
353
job = block_job_next_locked(job)) {
354
BlockJobInfo *value;
355
- AioContext *aio_context;
356
357
if (block_job_is_internal(job)) {
358
continue;
359
}
360
- aio_context = block_job_get_aio_context(job);
361
- aio_context_acquire(aio_context);
362
value = block_job_query_locked(job, errp);
363
- aio_context_release(aio_context);
364
if (!value) {
365
qapi_free_BlockJobInfoList(head);
366
return NULL;
367
diff --git a/job-qmp.c b/job-qmp.c
368
index XXXXXXX..XXXXXXX 100644
369
--- a/job-qmp.c
370
+++ b/job-qmp.c
371
@@ -XXX,XX +XXX,XX @@
372
#include "trace/trace-root.h"
373
374
/*
375
- * Get a job using its ID and acquire its AioContext.
376
- * Called with job_mutex held.
377
+ * Get a job using its ID. Called with job_mutex held.
378
*/
379
-static Job *find_job_locked(const char *id,
380
- AioContext **aio_context,
381
- Error **errp)
382
+static Job *find_job_locked(const char *id, Error **errp)
383
{
384
Job *job;
385
386
- *aio_context = NULL;
387
-
388
job = job_get_locked(id);
389
if (!job) {
390
error_setg(errp, "Job not found");
391
return NULL;
392
}
393
394
- *aio_context = job->aio_context;
395
- aio_context_acquire(*aio_context);
396
-
397
return job;
398
}
399
400
void qmp_job_cancel(const char *id, Error **errp)
401
{
402
- AioContext *aio_context;
403
Job *job;
404
405
JOB_LOCK_GUARD();
406
- job = find_job_locked(id, &aio_context, errp);
407
+ job = find_job_locked(id, errp);
408
409
if (!job) {
410
return;
411
@@ -XXX,XX +XXX,XX @@ void qmp_job_cancel(const char *id, Error **errp)
412
413
trace_qmp_job_cancel(job);
414
job_user_cancel_locked(job, true, errp);
415
- aio_context_release(aio_context);
416
}
417
418
void qmp_job_pause(const char *id, Error **errp)
419
{
420
- AioContext *aio_context;
421
Job *job;
422
423
JOB_LOCK_GUARD();
424
- job = find_job_locked(id, &aio_context, errp);
425
+ job = find_job_locked(id, errp);
426
427
if (!job) {
428
return;
429
@@ -XXX,XX +XXX,XX @@ void qmp_job_pause(const char *id, Error **errp)
430
431
trace_qmp_job_pause(job);
432
job_user_pause_locked(job, errp);
433
- aio_context_release(aio_context);
434
}
435
436
void qmp_job_resume(const char *id, Error **errp)
437
{
438
- AioContext *aio_context;
439
Job *job;
440
441
JOB_LOCK_GUARD();
442
- job = find_job_locked(id, &aio_context, errp);
443
+ job = find_job_locked(id, errp);
444
445
if (!job) {
446
return;
447
@@ -XXX,XX +XXX,XX @@ void qmp_job_resume(const char *id, Error **errp)
448
449
trace_qmp_job_resume(job);
450
job_user_resume_locked(job, errp);
451
- aio_context_release(aio_context);
452
}
453
454
void qmp_job_complete(const char *id, Error **errp)
455
{
456
- AioContext *aio_context;
457
Job *job;
458
459
JOB_LOCK_GUARD();
460
- job = find_job_locked(id, &aio_context, errp);
461
+ job = find_job_locked(id, errp);
462
463
if (!job) {
464
return;
465
@@ -XXX,XX +XXX,XX @@ void qmp_job_complete(const char *id, Error **errp)
466
467
trace_qmp_job_complete(job);
468
job_complete_locked(job, errp);
469
- aio_context_release(aio_context);
470
}
471
472
void qmp_job_finalize(const char *id, Error **errp)
473
{
474
- AioContext *aio_context;
475
Job *job;
476
477
JOB_LOCK_GUARD();
478
- job = find_job_locked(id, &aio_context, errp);
479
+ job = find_job_locked(id, errp);
480
481
if (!job) {
482
return;
483
@@ -XXX,XX +XXX,XX @@ void qmp_job_finalize(const char *id, Error **errp)
484
job_ref_locked(job);
485
job_finalize_locked(job, errp);
486
487
- /*
488
- * Job's context might have changed via job_finalize (and job_txn_apply
489
- * automatically acquires the new one), so make sure we release the correct
490
- * one.
491
- */
492
- aio_context = job->aio_context;
493
job_unref_locked(job);
494
- aio_context_release(aio_context);
495
}
496
497
void qmp_job_dismiss(const char *id, Error **errp)
498
{
499
- AioContext *aio_context;
500
Job *job;
501
502
JOB_LOCK_GUARD();
503
- job = find_job_locked(id, &aio_context, errp);
504
+ job = find_job_locked(id, errp);
505
506
if (!job) {
507
return;
508
@@ -XXX,XX +XXX,XX @@ void qmp_job_dismiss(const char *id, Error **errp)
509
510
trace_qmp_job_dismiss(job);
511
job_dismiss_locked(&job, errp);
512
- aio_context_release(aio_context);
513
}
514
515
/* Called with job_mutex held. */
516
@@ -XXX,XX +XXX,XX @@ JobInfoList *qmp_query_jobs(Error **errp)
517
518
for (job = job_next_locked(NULL); job; job = job_next_locked(job)) {
519
JobInfo *value;
520
- AioContext *aio_context;
521
522
if (job_is_internal(job)) {
523
continue;
524
}
525
- aio_context = job->aio_context;
526
- aio_context_acquire(aio_context);
527
value = job_query_single_locked(job, errp);
528
- aio_context_release(aio_context);
529
if (!value) {
530
qapi_free_JobInfoList(head);
531
return NULL;
532
diff --git a/job.c b/job.c
533
index XXXXXXX..XXXXXXX 100644
534
--- a/job.c
535
+++ b/job.c
536
@@ -XXX,XX +XXX,XX @@
537
*
538
* The second includes functions used by the job drivers and sometimes
539
* by the core block layer. These delegate the locking to the callee instead.
540
- *
541
- * TODO Actually make this true
542
*/
543
544
/*
545
@@ -XXX,XX +XXX,XX @@ struct JobTxn {
546
};
547
548
void job_lock(void)
549
-{
550
- /* nop */
551
-}
552
-
553
-void job_unlock(void)
554
-{
555
- /* nop */
556
-}
557
-
558
-static void real_job_lock(void)
559
{
560
qemu_mutex_lock(&job_mutex);
561
}
562
563
-static void real_job_unlock(void)
564
+void job_unlock(void)
565
{
566
qemu_mutex_unlock(&job_mutex);
567
}
568
@@ -XXX,XX +XXX,XX @@ static void job_txn_del_job_locked(Job *job)
569
/* Called with job_mutex held, but releases it temporarily. */
570
static int job_txn_apply_locked(Job *job, int fn(Job *))
571
{
572
- AioContext *inner_ctx;
573
Job *other_job, *next;
574
JobTxn *txn = job->txn;
575
int rc = 0;
576
@@ -XXX,XX +XXX,XX @@ static int job_txn_apply_locked(Job *job, int fn(Job *))
577
* break AIO_WAIT_WHILE from within fn.
578
*/
579
job_ref_locked(job);
580
- aio_context_release(job->aio_context);
581
582
QLIST_FOREACH_SAFE(other_job, &txn->jobs, txn_list, next) {
583
- inner_ctx = other_job->aio_context;
584
- aio_context_acquire(inner_ctx);
585
rc = fn(other_job);
586
- aio_context_release(inner_ctx);
587
if (rc) {
588
break;
589
}
590
}
591
592
- /*
593
- * Note that job->aio_context might have been changed by calling fn, so we
594
- * can't use a local variable to cache it.
595
- */
596
- aio_context_acquire(job->aio_context);
597
job_unref_locked(job);
598
return rc;
599
}
600
@@ -XXX,XX +XXX,XX @@ void job_unref_locked(Job *job)
601
assert(!job->txn);
602
603
if (job->driver->free) {
604
+ AioContext *aio_context = job->aio_context;
605
job_unlock();
606
+ /* FIXME: aiocontext lock is required because cb calls blk_unref */
607
+ aio_context_acquire(aio_context);
608
job->driver->free(job);
609
+ aio_context_release(aio_context);
610
job_lock();
611
}
612
613
@@ -XXX,XX +XXX,XX @@ void job_enter_cond_locked(Job *job, bool(*fn)(Job *job))
614
return;
615
}
616
617
- real_job_lock();
618
if (job->busy) {
619
- real_job_unlock();
620
return;
621
}
622
623
if (fn && !fn(job)) {
624
- real_job_unlock();
625
return;
626
}
627
628
assert(!job->deferred_to_main_loop);
629
timer_del(&job->sleep_timer);
630
job->busy = true;
631
- real_job_unlock();
632
job_unlock();
633
aio_co_wake(job->co);
634
job_lock();
635
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn job_do_yield_locked(Job *job, uint64_t ns)
636
{
637
AioContext *next_aio_context;
638
639
- real_job_lock();
640
if (ns != -1) {
641
timer_mod(&job->sleep_timer, ns);
642
}
643
job->busy = false;
644
job_event_idle_locked(job);
645
- real_job_unlock();
646
job_unlock();
647
qemu_coroutine_yield();
648
job_lock();
649
@@ -XXX,XX +XXX,XX @@ static void job_clean(Job *job)
650
}
651
}
652
653
-/* Called with job_mutex held, but releases it temporarily */
654
+/*
655
+ * Called with job_mutex held, but releases it temporarily.
656
+ * Takes AioContext lock internally to invoke a job->driver callback.
657
+ */
658
static int job_finalize_single_locked(Job *job)
659
{
660
int job_ret;
661
+ AioContext *ctx = job->aio_context;
662
663
assert(job_is_completed_locked(job));
664
665
@@ -XXX,XX +XXX,XX @@ static int job_finalize_single_locked(Job *job)
666
667
job_ret = job->ret;
668
job_unlock();
669
+ aio_context_acquire(ctx);
670
671
if (!job_ret) {
672
job_commit(job);
673
@@ -XXX,XX +XXX,XX @@ static int job_finalize_single_locked(Job *job)
674
}
675
job_clean(job);
676
677
- job_lock();
678
-
679
if (job->cb) {
680
- job_ret = job->ret;
681
- job_unlock();
682
job->cb(job->opaque, job_ret);
683
- job_lock();
684
}
685
686
+ aio_context_release(ctx);
687
+ job_lock();
688
+
689
/* Emit events only if we actually started */
690
if (job_started_locked(job)) {
691
if (job_is_cancelled_locked(job)) {
692
@@ -XXX,XX +XXX,XX @@ static int job_finalize_single_locked(Job *job)
693
return 0;
694
}
695
696
-/* Called with job_mutex held, but releases it temporarily */
697
+/*
698
+ * Called with job_mutex held, but releases it temporarily.
699
+ * Takes AioContext lock internally to invoke a job->driver callback.
700
+ */
701
static void job_cancel_async_locked(Job *job, bool force)
702
{
703
+ AioContext *ctx = job->aio_context;
704
GLOBAL_STATE_CODE();
705
if (job->driver->cancel) {
706
job_unlock();
707
+ aio_context_acquire(ctx);
708
force = job->driver->cancel(job, force);
709
+ aio_context_release(ctx);
710
job_lock();
711
} else {
712
/* No .cancel() means the job will behave as if force-cancelled */
713
@@ -XXX,XX +XXX,XX @@ static void job_cancel_async_locked(Job *job, bool force)
714
}
715
}
716
717
-/* Called with job_mutex held, but releases it temporarily. */
718
+/*
719
+ * Called with job_mutex held, but releases it temporarily.
720
+ * Takes AioContext lock internally to invoke a job->driver callback.
721
+ */
722
static void job_completed_txn_abort_locked(Job *job)
723
{
724
- AioContext *ctx;
725
JobTxn *txn = job->txn;
726
Job *other_job;
727
728
@@ -XXX,XX +XXX,XX @@ static void job_completed_txn_abort_locked(Job *job)
729
txn->aborting = true;
730
job_txn_ref_locked(txn);
731
732
- /*
733
- * We can only hold the single job's AioContext lock while calling
734
- * job_finalize_single() because the finalization callbacks can involve
735
- * calls of AIO_WAIT_WHILE(), which could deadlock otherwise.
736
- * Note that the job's AioContext may change when it is finalized.
737
- */
738
job_ref_locked(job);
739
- aio_context_release(job->aio_context);
740
741
/* Other jobs are effectively cancelled by us, set the status for
742
* them; this job, however, may or may not be cancelled, depending
743
* on the caller, so leave it. */
744
QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
745
if (other_job != job) {
746
- ctx = other_job->aio_context;
747
- aio_context_acquire(ctx);
748
/*
749
* This is a transaction: If one job failed, no result will matter.
750
* Therefore, pass force=true to terminate all other jobs as quickly
751
* as possible.
752
*/
753
job_cancel_async_locked(other_job, true);
754
- aio_context_release(ctx);
755
}
756
}
757
while (!QLIST_EMPTY(&txn->jobs)) {
758
other_job = QLIST_FIRST(&txn->jobs);
759
- /*
760
- * The job's AioContext may change, so store it in @ctx so we
761
- * release the same context that we have acquired before.
762
- */
763
- ctx = other_job->aio_context;
764
- aio_context_acquire(ctx);
765
if (!job_is_completed_locked(other_job)) {
766
assert(job_cancel_requested_locked(other_job));
767
job_finish_sync_locked(other_job, NULL, NULL);
768
}
769
job_finalize_single_locked(other_job);
770
- aio_context_release(ctx);
771
}
772
773
- /*
774
- * Use job_ref()/job_unref() so we can read the AioContext here
775
- * even if the job went away during job_finalize_single().
776
- */
777
- aio_context_acquire(job->aio_context);
778
job_unref_locked(job);
779
-
780
job_txn_unref_locked(txn);
781
}
782
783
@@ -XXX,XX +XXX,XX @@ static void job_completed_txn_abort_locked(Job *job)
784
static int job_prepare_locked(Job *job)
785
{
786
int ret;
787
+ AioContext *ctx = job->aio_context;
788
789
GLOBAL_STATE_CODE();
790
+
791
if (job->ret == 0 && job->driver->prepare) {
792
job_unlock();
793
+ aio_context_acquire(ctx);
794
ret = job->driver->prepare(job);
795
+ aio_context_release(ctx);
796
job_lock();
797
job->ret = ret;
798
job_update_rc_locked(job);
799
}
800
+
801
return job->ret;
802
}
803
804
@@ -XXX,XX +XXX,XX @@ static void job_completed_locked(Job *job)
805
static void job_exit(void *opaque)
806
{
807
Job *job = (Job *)opaque;
808
- AioContext *ctx;
809
JOB_LOCK_GUARD();
810
-
811
job_ref_locked(job);
812
- aio_context_acquire(job->aio_context);
813
814
/* This is a lie, we're not quiescent, but still doing the completion
815
* callbacks. However, completion callbacks tend to involve operations that
816
@@ -XXX,XX +XXX,XX @@ static void job_exit(void *opaque)
817
job_event_idle_locked(job);
818
819
job_completed_locked(job);
820
-
821
- /*
822
- * Note that calling job_completed can move the job to a different
823
- * aio_context, so we cannot cache from above. job_txn_apply takes care of
824
- * acquiring the new lock, and we ref/unref to avoid job_completed freeing
825
- * the job underneath us.
826
- */
827
- ctx = job->aio_context;
828
job_unref_locked(job);
829
- aio_context_release(ctx);
830
}
831
832
/**
833
@@ -XXX,XX +XXX,XX @@ int job_cancel_sync(Job *job, bool force)
834
void job_cancel_sync_all(void)
835
{
836
Job *job;
837
- AioContext *aio_context;
838
JOB_LOCK_GUARD();
839
840
while ((job = job_next_locked(NULL))) {
841
- aio_context = job->aio_context;
842
- aio_context_acquire(aio_context);
843
job_cancel_sync_locked(job, true);
844
- aio_context_release(aio_context);
845
}
846
}
847
848
@@ -XXX,XX +XXX,XX @@ int job_finish_sync_locked(Job *job,
849
}
850
851
job_unlock();
852
- AIO_WAIT_WHILE(job->aio_context,
853
- (job_enter(job), !job_is_completed(job)));
854
+ AIO_WAIT_WHILE_UNLOCKED(job->aio_context,
855
+ (job_enter(job), !job_is_completed(job)));
856
job_lock();
857
858
ret = (job_is_cancelled_locked(job) && job->ret == 0)
859
diff --git a/qemu-img.c b/qemu-img.c
860
index XXXXXXX..XXXXXXX 100644
861
--- a/qemu-img.c
862
+++ b/qemu-img.c
863
@@ -XXX,XX +XXX,XX @@ static void run_block_job(BlockJob *job, Error **errp)
864
AioContext *aio_context = block_job_get_aio_context(job);
865
int ret = 0;
866
867
- aio_context_acquire(aio_context);
868
job_lock();
869
job_ref_locked(&job->job);
870
do {
871
@@ -XXX,XX +XXX,XX @@ static void run_block_job(BlockJob *job, Error **errp)
872
}
873
job_unref_locked(&job->job);
874
job_unlock();
875
- aio_context_release(aio_context);
876
877
/* publish completion progress only when success */
878
if (!ret) {
879
diff --git a/tests/unit/test-bdrv-drain.c b/tests/unit/test-bdrv-drain.c
880
index XXXXXXX..XXXXXXX 100644
881
--- a/tests/unit/test-bdrv-drain.c
882
+++ b/tests/unit/test-bdrv-drain.c
883
@@ -XXX,XX +XXX,XX @@ static void test_blockjob_common_drain_node(enum drain_type drain_type,
884
tjob->prepare_ret = -EIO;
885
break;
886
}
887
+ aio_context_release(ctx);
888
889
job_start(&job->job);
890
- aio_context_release(ctx);
891
892
if (use_iothread) {
893
/* job_co_entry() is run in the I/O thread, wait for the actual job
894
@@ -XXX,XX +XXX,XX @@ static void test_blockjob_common_drain_node(enum drain_type drain_type,
895
g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
896
}
897
898
- aio_context_acquire(ctx);
899
WITH_JOB_LOCK_GUARD() {
900
ret = job_complete_sync_locked(&job->job, &error_abort);
901
}
902
g_assert_cmpint(ret, ==, (result == TEST_JOB_SUCCESS ? 0 : -EIO));
903
904
+ aio_context_acquire(ctx);
905
if (use_iothread) {
906
blk_set_aio_context(blk_src, qemu_get_aio_context(), &error_abort);
907
assert(blk_get_aio_context(blk_target) == qemu_get_aio_context());
908
diff --git a/tests/unit/test-block-iothread.c b/tests/unit/test-block-iothread.c
909
index XXXXXXX..XXXXXXX 100644
910
--- a/tests/unit/test-block-iothread.c
911
+++ b/tests/unit/test-block-iothread.c
912
@@ -XXX,XX +XXX,XX @@ static void test_attach_blockjob(void)
913
aio_poll(qemu_get_aio_context(), false);
914
}
915
916
- aio_context_acquire(ctx);
917
WITH_JOB_LOCK_GUARD() {
918
job_complete_sync_locked(&tjob->common.job, &error_abort);
919
}
920
+ aio_context_acquire(ctx);
921
blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
922
aio_context_release(ctx);
923
924
diff --git a/tests/unit/test-blockjob.c b/tests/unit/test-blockjob.c
925
index XXXXXXX..XXXXXXX 100644
926
--- a/tests/unit/test-blockjob.c
927
+++ b/tests/unit/test-blockjob.c
928
@@ -XXX,XX +XXX,XX @@ static void cancel_common(CancelJob *s)
929
BlockJob *job = &s->common;
930
BlockBackend *blk = s->blk;
931
JobStatus sts = job->job.status;
932
- AioContext *ctx;
933
-
934
- ctx = job->job.aio_context;
935
- aio_context_acquire(ctx);
936
+ AioContext *ctx = job->job.aio_context;
937
938
job_cancel_sync(&job->job, true);
939
WITH_JOB_LOCK_GUARD() {
940
@@ -XXX,XX +XXX,XX @@ static void cancel_common(CancelJob *s)
941
assert(job->job.status == JOB_STATUS_NULL);
942
job_unref_locked(&job->job);
943
}
944
- destroy_blk(blk);
945
946
+ aio_context_acquire(ctx);
947
+ destroy_blk(blk);
948
aio_context_release(ctx);
949
+
950
}
951
952
static void test_cancel_created(void)
953
@@ -XXX,XX +XXX,XX @@ static void test_cancel_concluded(void)
954
aio_poll(qemu_get_aio_context(), true);
955
assert_job_status_is(job, JOB_STATUS_PENDING);
956
957
- aio_context_acquire(job->aio_context);
958
WITH_JOB_LOCK_GUARD() {
959
job_finalize_locked(job, &error_abort);
960
+ assert(job->status == JOB_STATUS_CONCLUDED);
961
}
962
- aio_context_release(job->aio_context);
963
- assert_job_status_is(job, JOB_STATUS_CONCLUDED);
964
965
cancel_common(s);
966
}
967
@@ -XXX,XX +XXX,XX @@ static void test_complete_in_standby(void)
968
969
/* Wait for the job to become READY */
970
job_start(job);
971
- aio_context_acquire(ctx);
972
/*
973
* Here we are waiting for the status to change, so don't bother
974
* protecting the read every time.
975
*/
976
- AIO_WAIT_WHILE(ctx, job->status != JOB_STATUS_READY);
977
- aio_context_release(ctx);
978
+ AIO_WAIT_WHILE_UNLOCKED(ctx, job->status != JOB_STATUS_READY);
979
980
/* Begin the drained section, pausing the job */
981
bdrv_drain_all_begin();
982
@@ -XXX,XX +XXX,XX @@ static void test_complete_in_standby(void)
983
aio_context_acquire(ctx);
984
/* This will schedule the job to resume it */
985
bdrv_drain_all_end();
986
+ aio_context_release(ctx);
987
988
WITH_JOB_LOCK_GUARD() {
989
/* But the job cannot run, so it will remain on standby */
990
@@ -XXX,XX +XXX,XX @@ static void test_complete_in_standby(void)
991
job_dismiss_locked(&job, &error_abort);
992
}
993
994
+ aio_context_acquire(ctx);
995
destroy_blk(blk);
996
aio_context_release(ctx);
997
iothread_join(iothread);
998
--
999
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
2
1
3
Not sure what the atomic here was supposed to do, since job.busy
4
is protected by the job lock. Since the whole function
5
is called under job_mutex, just remove the atomic.
6
7
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
8
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
9
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
10
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
11
Message-Id: <20220926093214.506243-20-eesposit@redhat.com>
12
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
13
---
14
blockjob.c | 2 +-
15
1 file changed, 1 insertion(+), 1 deletion(-)
16
17
diff --git a/blockjob.c b/blockjob.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/blockjob.c
20
+++ b/blockjob.c
21
@@ -XXX,XX +XXX,XX @@ BlockJobInfo *block_job_query_locked(BlockJob *job, Error **errp)
22
info = g_new0(BlockJobInfo, 1);
23
info->type = g_strdup(job_type_str(&job->job));
24
info->device = g_strdup(job->job.id);
25
- info->busy = qatomic_read(&job->job.busy);
26
+ info->busy = job->job.busy;
27
info->paused = job->job.pause_count > 0;
28
info->offset = progress_current;
29
info->len = progress_total;
30
--
31
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
2
1
3
These public functions are not used anywhere, thus can be dropped.
4
5
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
6
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
7
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
8
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
9
Message-Id: <20220926093214.506243-21-eesposit@redhat.com>
10
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
11
---
12
include/block/blockjob.h | 31 ++++++++++++-------------------
13
blockjob.c | 16 ++--------------
14
2 files changed, 14 insertions(+), 33 deletions(-)
15
16
diff --git a/include/block/blockjob.h b/include/block/blockjob.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/include/block/blockjob.h
19
+++ b/include/block/blockjob.h
20
@@ -XXX,XX +XXX,XX @@ typedef struct BlockJob {
21
*/
22
23
/**
24
- * block_job_next:
25
+ * block_job_next_locked:
26
* @job: A block job, or %NULL.
27
*
28
* Get the next element from the list of block jobs after @job, or the
29
* first one if @job is %NULL.
30
*
31
* Returns the requested job, or %NULL if there are no more jobs left.
32
+ * Called with job lock held.
33
*/
34
-BlockJob *block_job_next(BlockJob *job);
35
-
36
-/* Same as block_job_next(), but called with job lock held. */
37
BlockJob *block_job_next_locked(BlockJob *job);
38
39
/**
40
@@ -XXX,XX +XXX,XX @@ BlockJob *block_job_next_locked(BlockJob *job);
41
* Get the block job identified by @id (which must not be %NULL).
42
*
43
* Returns the requested job, or %NULL if it doesn't exist.
44
+ * Called with job lock *not* held.
45
*/
46
BlockJob *block_job_get(const char *id);
47
48
@@ -XXX,XX +XXX,XX @@ void block_job_remove_all_bdrv(BlockJob *job);
49
bool block_job_has_bdrv(BlockJob *job, BlockDriverState *bs);
50
51
/**
52
- * block_job_set_speed:
53
+ * block_job_set_speed_locked:
54
* @job: The job to set the speed for.
55
* @speed: The new value
56
* @errp: Error object.
57
*
58
* Set a rate-limiting parameter for the job; the actual meaning may
59
* vary depending on the job type.
60
- */
61
-bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp);
62
-
63
-/*
64
- * Same as block_job_set_speed(), but called with job lock held.
65
- * Might release the lock temporarily.
66
+ *
67
+ * Called with job lock held, but might release it temporarily.
68
*/
69
bool block_job_set_speed_locked(BlockJob *job, int64_t speed, Error **errp);
70
71
/**
72
- * block_job_query:
73
+ * block_job_query_locked:
74
* @job: The job to get information about.
75
*
76
* Return information about a job.
77
+ *
78
+ * Called with job lock held.
79
*/
80
-BlockJobInfo *block_job_query(BlockJob *job, Error **errp);
81
-
82
-/* Same as block_job_query(), but called with job lock held. */
83
BlockJobInfo *block_job_query_locked(BlockJob *job, Error **errp);
84
85
/**
86
- * block_job_iostatus_reset:
87
+ * block_job_iostatus_reset_locked:
88
* @job: The job whose I/O status should be reset.
89
*
90
* Reset I/O status on @job and on BlockDriverState objects it uses,
91
* other than job->blk.
92
+ *
93
+ * Called with job lock held.
94
*/
95
-void block_job_iostatus_reset(BlockJob *job);
96
-
97
-/* Same as block_job_iostatus_reset(), but called with job lock held. */
98
void block_job_iostatus_reset_locked(BlockJob *job);
99
100
/*
101
diff --git a/blockjob.c b/blockjob.c
102
index XXXXXXX..XXXXXXX 100644
103
--- a/blockjob.c
104
+++ b/blockjob.c
105
@@ -XXX,XX +XXX,XX @@ BlockJob *block_job_next_locked(BlockJob *bjob)
106
return job ? container_of(job, BlockJob, job) : NULL;
107
}
108
109
-BlockJob *block_job_next(BlockJob *bjob)
110
-{
111
- JOB_LOCK_GUARD();
112
- return block_job_next_locked(bjob);
113
-}
114
-
115
BlockJob *block_job_get_locked(const char *id)
116
{
117
Job *job = job_get_locked(id);
118
@@ -XXX,XX +XXX,XX @@ bool block_job_set_speed_locked(BlockJob *job, int64_t speed, Error **errp)
119
return true;
120
}
121
122
-bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
123
+static bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
124
{
125
JOB_LOCK_GUARD();
126
return block_job_set_speed_locked(job, speed, errp);
127
@@ -XXX,XX +XXX,XX @@ BlockJobInfo *block_job_query_locked(BlockJob *job, Error **errp)
128
return info;
129
}
130
131
-BlockJobInfo *block_job_query(BlockJob *job, Error **errp)
132
-{
133
- JOB_LOCK_GUARD();
134
- return block_job_query_locked(job, errp);
135
-}
136
-
137
/* Called with job lock held */
138
static void block_job_iostatus_set_err_locked(BlockJob *job, int error)
139
{
140
@@ -XXX,XX +XXX,XX @@ void block_job_iostatus_reset_locked(BlockJob *job)
141
job->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
142
}
143
144
-void block_job_iostatus_reset(BlockJob *job)
145
+static void block_job_iostatus_reset(BlockJob *job)
146
{
147
JOB_LOCK_GUARD();
148
block_job_iostatus_reset_locked(job);
149
--
150
2.37.3
diff view generated by jsdifflib
Deleted patch
1
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
2
1
3
These public functions are not used anywhere, thus can be dropped.
4
Also, since this is the final job API that doesn't use AioContext
5
lock and replaces it with job_lock, adjust all remaining function
6
documentation to clearly specify if the job lock is taken or not.
7
8
Also document the locking requirements for a few functions
9
where the second version is not removed.
10
11
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
12
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
13
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
14
Message-Id: <20220926093214.506243-22-eesposit@redhat.com>
15
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
16
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
17
---
18
include/qemu/job.h | 110 +++++++++++++------------------------
19
job.c | 107 ++----------------------------------
20
tests/unit/test-blockjob.c | 4 +-
21
3 files changed, 46 insertions(+), 175 deletions(-)
22
23
diff --git a/include/qemu/job.h b/include/qemu/job.h
24
index XXXXXXX..XXXXXXX 100644
25
--- a/include/qemu/job.h
26
+++ b/include/qemu/job.h
27
@@ -XXX,XX +XXX,XX @@ JobTxn *job_txn_new(void);
28
/**
29
* Release a reference that was previously acquired with job_txn_add_job or
30
* job_txn_new. If it's the last reference to the object, it will be freed.
31
+ *
32
+ * Called with job lock *not* held.
33
*/
34
void job_txn_unref(JobTxn *txn);
35
36
@@ -XXX,XX +XXX,XX @@ void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn,
37
/**
38
* Add a reference to Job refcnt, it will be decreased with job_unref, and then
39
* be freed if it comes to be the last reference.
40
+ *
41
+ * Called with job lock held.
42
*/
43
-void job_ref(Job *job);
44
-
45
-/* Same as job_ref(), but called with job lock held. */
46
void job_ref_locked(Job *job);
47
48
/**
49
- * Release a reference that was previously acquired with job_ref() or
50
+ * Release a reference that was previously acquired with job_ref_locked() or
51
* job_create(). If it's the last reference to the object, it will be freed.
52
*
53
* Takes AioContext lock internally to invoke a job->driver callback.
54
+ * Called with job lock held.
55
*/
56
-void job_unref(Job *job);
57
-
58
-/* Same as job_unref(), but called with job lock held. */
59
void job_unref_locked(Job *job);
60
61
/**
62
@@ -XXX,XX +XXX,XX @@ void job_progress_increase_remaining(Job *job, uint64_t delta);
63
* Conditionally enter the job coroutine if the job is ready to run, not
64
* already busy and fn() returns true. fn() is called while under the job_lock
65
* critical section.
66
- */
67
-void job_enter_cond(Job *job, bool(*fn)(Job *job));
68
-
69
-/*
70
- * Same as job_enter_cond(), but called with job lock held.
71
- * Might release the lock temporarily.
72
+ *
73
+ * Called with job lock held, but might release it temporarily.
74
*/
75
void job_enter_cond_locked(Job *job, bool(*fn)(Job *job));
76
77
@@ -XXX,XX +XXX,XX @@ bool job_cancel_requested(Job *job);
78
79
/**
80
* Returns whether the job is in a completed state.
81
- * Called with job_mutex *not* held.
82
+ * Called with job lock held.
83
*/
84
-bool job_is_completed(Job *job);
85
-
86
-/* Same as job_is_completed(), but called with job lock held. */
87
bool job_is_completed_locked(Job *job);
88
89
/**
90
@@ -XXX,XX +XXX,XX @@ bool job_is_ready_locked(Job *job);
91
/**
92
* Request @job to pause at the next pause point. Must be paired with
93
* job_resume(). If the job is supposed to be resumed by user action, call
94
- * job_user_pause() instead.
95
+ * job_user_pause_locked() instead.
96
+ *
97
+ * Called with job lock *not* held.
98
*/
99
void job_pause(Job *job);
100
101
/* Same as job_pause(), but called with job lock held. */
102
void job_pause_locked(Job *job);
103
104
-/** Resumes a @job paused with job_pause. */
105
+/** Resumes a @job paused with job_pause. Called with job lock *not* held. */
106
void job_resume(Job *job);
107
108
/*
109
@@ -XXX,XX +XXX,XX @@ void job_resume_locked(Job *job);
110
/**
111
* Asynchronously pause the specified @job.
112
* Do not allow a resume until a matching call to job_user_resume.
113
+ * Called with job lock held.
114
*/
115
-void job_user_pause(Job *job, Error **errp);
116
-
117
-/* Same as job_user_pause(), but called with job lock held. */
118
void job_user_pause_locked(Job *job, Error **errp);
119
120
-/** Returns true if the job is user-paused. */
121
-bool job_user_paused(Job *job);
122
-
123
-/* Same as job_user_paused(), but called with job lock held. */
124
+/**
125
+ * Returns true if the job is user-paused.
126
+ * Called with job lock held.
127
+ */
128
bool job_user_paused_locked(Job *job);
129
130
/**
131
* Resume the specified @job.
132
- * Must be paired with a preceding job_user_pause.
133
- */
134
-void job_user_resume(Job *job, Error **errp);
135
-
136
-/*
137
- * Same as job_user_resume(), but called with job lock held.
138
- * Might release the lock temporarily.
139
+ * Must be paired with a preceding job_user_pause_locked.
140
+ * Called with job lock held, but might release it temporarily.
141
*/
142
void job_user_resume_locked(Job *job, Error **errp);
143
144
@@ -XXX,XX +XXX,XX @@ void job_user_resume_locked(Job *job, Error **errp);
145
* first one if @job is %NULL.
146
*
147
* Returns the requested job, or %NULL if there are no more jobs left.
148
+ * Called with job lock *not* held.
149
*/
150
Job *job_next(Job *job);
151
152
@@ -XXX,XX +XXX,XX @@ Job *job_next_locked(Job *job);
153
* Get the job identified by @id (which must not be %NULL).
154
*
155
* Returns the requested job, or %NULL if it doesn't exist.
156
+ * Called with job lock held.
157
*/
158
-Job *job_get(const char *id);
159
-
160
-/* Same as job_get(), but called with job lock held. */
161
Job *job_get_locked(const char *id);
162
163
/**
164
* Check whether the verb @verb can be applied to @job in its current state.
165
* Returns 0 if the verb can be applied; otherwise errp is set and -EPERM
166
* returned.
167
+ *
168
+ * Called with job lock held.
169
*/
170
-int job_apply_verb(Job *job, JobVerb verb, Error **errp);
171
-
172
-/* Same as job_apply_verb, but called with job lock held. */
173
int job_apply_verb_locked(Job *job, JobVerb verb, Error **errp);
174
175
/**
176
@@ -XXX,XX +XXX,XX @@ void job_early_fail(Job *job);
177
*/
178
void job_transition_to_ready(Job *job);
179
180
-/** Asynchronously complete the specified @job. */
181
-void job_complete(Job *job, Error **errp);
182
-
183
-/*
184
- * Same as job_complete(), but called with job lock held.
185
- * Might release the lock temporarily.
186
+/**
187
+ * Asynchronously complete the specified @job.
188
+ * Called with job lock held, but might release it temporarily.
189
*/
190
void job_complete_locked(Job *job, Error **errp);
191
192
/**
193
* Asynchronously cancel the specified @job. If @force is true, the job should
194
* be cancelled immediately without waiting for a consistent state.
195
+ * Called with job lock held.
196
*/
197
-void job_cancel(Job *job, bool force);
198
-
199
-/* Same as job_cancel(), but called with job lock held. */
200
void job_cancel_locked(Job *job, bool force);
201
202
/**
203
- * Cancels the specified job like job_cancel(), but may refuse to do so if the
204
- * operation isn't meaningful in the current state of the job.
205
+ * Cancels the specified job like job_cancel_locked(), but may refuse
206
+ * to do so if the operation isn't meaningful in the current state of the job.
207
+ * Called with job lock held.
208
*/
209
-void job_user_cancel(Job *job, bool force, Error **errp);
210
-
211
-/* Same as job_user_cancel(), but called with job lock held. */
212
void job_user_cancel_locked(Job *job, bool force, Error **errp);
213
214
/**
215
@@ -XXX,XX +XXX,XX @@ void job_cancel_sync_all(void);
216
217
/**
218
* @job: The job to be completed.
219
- * @errp: Error object which may be set by job_complete(); this is not
220
+ * @errp: Error object which may be set by job_complete_locked(); this is not
221
* necessarily set on every error, the job return value has to be
222
* checked as well.
223
*
224
@@ -XXX,XX +XXX,XX @@ void job_cancel_sync_all(void);
225
* function).
226
*
227
* Returns the return value from the job.
228
- * Called with job_lock *not* held.
229
+ * Called with job_lock held.
230
*/
231
-int job_complete_sync(Job *job, Error **errp);
232
-
233
-/* Same as job_complete_sync, but called with job lock held. */
234
int job_complete_sync_locked(Job *job, Error **errp);
235
236
/**
237
@@ -XXX,XX +XXX,XX @@ int job_complete_sync_locked(Job *job, Error **errp);
238
* FIXME: Make the below statement universally true:
239
* For jobs that support the manual workflow mode, all graph changes that occur
240
* as a result will occur after this command and before a successful reply.
241
+ *
242
+ * Called with job lock held.
243
*/
244
-void job_finalize(Job *job, Error **errp);
245
-
246
-/* Same as job_finalize(), but called with job lock held. */
247
void job_finalize_locked(Job *job, Error **errp);
248
249
/**
250
* Remove the concluded @job from the query list and resets the passed pointer
251
* to %NULL. Returns an error if the job is not actually concluded.
252
+ *
253
+ * Called with job lock held.
254
*/
255
-void job_dismiss(Job **job, Error **errp);
256
-
257
-/* Same as job_dismiss(), but called with job lock held. */
258
void job_dismiss_locked(Job **job, Error **errp);
259
260
/**
261
@@ -XXX,XX +XXX,XX @@ void job_dismiss_locked(Job **job, Error **errp);
262
* Returns 0 if the job is successfully completed, -ECANCELED if the job was
263
* cancelled before completing, and -errno in other error cases.
264
*
265
- * Called with job_lock *not* held.
266
- */
267
-int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp),
268
- Error **errp);
269
-
270
-/*
271
- * Same as job_finish_sync(), but called with job lock held.
272
- * Might release the lock temporarily.
273
+ * Called with job_lock held, but might release it temporarily.
274
*/
275
int job_finish_sync_locked(Job *job, void (*finish)(Job *, Error **errp),
276
Error **errp);
277
diff --git a/job.c b/job.c
278
index XXXXXXX..XXXXXXX 100644
279
--- a/job.c
280
+++ b/job.c
281
@@ -XXX,XX +XXX,XX @@ int job_apply_verb_locked(Job *job, JobVerb verb, Error **errp)
282
return -EPERM;
283
}
284
285
-int job_apply_verb(Job *job, JobVerb verb, Error **errp)
286
-{
287
- JOB_LOCK_GUARD();
288
- return job_apply_verb_locked(job, verb, errp);
289
-}
290
-
291
JobType job_type(const Job *job)
292
{
293
return job->driver->job_type;
294
@@ -XXX,XX +XXX,XX @@ bool job_is_completed_locked(Job *job)
295
return false;
296
}
297
298
-bool job_is_completed(Job *job)
299
+static bool job_is_completed(Job *job)
300
{
301
JOB_LOCK_GUARD();
302
return job_is_completed_locked(job);
303
@@ -XXX,XX +XXX,XX @@ Job *job_get_locked(const char *id)
304
return NULL;
305
}
306
307
-Job *job_get(const char *id)
308
-{
309
- JOB_LOCK_GUARD();
310
- return job_get_locked(id);
311
-}
312
-
313
void job_set_aio_context(Job *job, AioContext *ctx)
314
{
315
/* protect against read in job_finish_sync_locked and job_start */
316
@@ -XXX,XX +XXX,XX @@ void job_ref_locked(Job *job)
317
++job->refcnt;
318
}
319
320
-void job_ref(Job *job)
321
-{
322
- JOB_LOCK_GUARD();
323
- job_ref_locked(job);
324
-}
325
-
326
void job_unref_locked(Job *job)
327
{
328
GLOBAL_STATE_CODE();
329
@@ -XXX,XX +XXX,XX @@ void job_unref_locked(Job *job)
330
}
331
}
332
333
-void job_unref(Job *job)
334
-{
335
- JOB_LOCK_GUARD();
336
- job_unref_locked(job);
337
-}
338
-
339
void job_progress_update(Job *job, uint64_t done)
340
{
341
progress_work_done(&job->progress, done);
342
@@ -XXX,XX +XXX,XX @@ void job_enter_cond_locked(Job *job, bool(*fn)(Job *job))
343
job_lock();
344
}
345
346
-void job_enter_cond(Job *job, bool(*fn)(Job *job))
347
-{
348
- JOB_LOCK_GUARD();
349
- job_enter_cond_locked(job, fn);
350
-}
351
-
352
void job_enter(Job *job)
353
{
354
JOB_LOCK_GUARD();
355
@@ -XXX,XX +XXX,XX @@ void coroutine_fn job_pause_point(Job *job)
356
job_pause_point_locked(job);
357
}
358
359
-static void coroutine_fn job_yield_locked(Job *job)
360
+void coroutine_fn job_yield(Job *job)
361
{
362
+ JOB_LOCK_GUARD();
363
assert(job->busy);
364
365
/* Check cancellation *before* setting busy = false, too! */
366
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn job_yield_locked(Job *job)
367
job_pause_point_locked(job);
368
}
369
370
-void coroutine_fn job_yield(Job *job)
371
-{
372
- JOB_LOCK_GUARD();
373
- job_yield_locked(job);
374
-}
375
-
376
void coroutine_fn job_sleep_ns(Job *job, int64_t ns)
377
{
378
JOB_LOCK_GUARD();
379
@@ -XXX,XX +XXX,XX @@ void job_user_pause_locked(Job *job, Error **errp)
380
job_pause_locked(job);
381
}
382
383
-void job_user_pause(Job *job, Error **errp)
384
-{
385
- JOB_LOCK_GUARD();
386
- job_user_pause_locked(job, errp);
387
-}
388
-
389
bool job_user_paused_locked(Job *job)
390
{
391
return job->user_paused;
392
}
393
394
-bool job_user_paused(Job *job)
395
-{
396
- JOB_LOCK_GUARD();
397
- return job_user_paused_locked(job);
398
-}
399
-
400
void job_user_resume_locked(Job *job, Error **errp)
401
{
402
assert(job);
403
@@ -XXX,XX +XXX,XX @@ void job_user_resume_locked(Job *job, Error **errp)
404
job_resume_locked(job);
405
}
406
407
-void job_user_resume(Job *job, Error **errp)
408
-{
409
- JOB_LOCK_GUARD();
410
- job_user_resume_locked(job, errp);
411
-}
412
-
413
/* Called with job_mutex held, but releases it temporarily. */
414
static void job_do_dismiss_locked(Job *job)
415
{
416
@@ -XXX,XX +XXX,XX @@ void job_dismiss_locked(Job **jobptr, Error **errp)
417
*jobptr = NULL;
418
}
419
420
-void job_dismiss(Job **jobptr, Error **errp)
421
-{
422
- JOB_LOCK_GUARD();
423
- job_dismiss_locked(jobptr, errp);
424
-}
425
-
426
void job_early_fail(Job *job)
427
{
428
JOB_LOCK_GUARD();
429
@@ -XXX,XX +XXX,XX @@ void job_finalize_locked(Job *job, Error **errp)
430
job_do_finalize_locked(job);
431
}
432
433
-void job_finalize(Job *job, Error **errp)
434
-{
435
- JOB_LOCK_GUARD();
436
- job_finalize_locked(job, errp);
437
-}
438
-
439
/* Called with job_mutex held. */
440
static int job_transition_to_pending_locked(Job *job)
441
{
442
@@ -XXX,XX +XXX,XX @@ void job_cancel_locked(Job *job, bool force)
443
}
444
}
445
446
-void job_cancel(Job *job, bool force)
447
-{
448
- JOB_LOCK_GUARD();
449
- job_cancel_locked(job, force);
450
-}
451
-
452
void job_user_cancel_locked(Job *job, bool force, Error **errp)
453
{
454
if (job_apply_verb_locked(job, JOB_VERB_CANCEL, errp)) {
455
@@ -XXX,XX +XXX,XX @@ void job_user_cancel_locked(Job *job, bool force, Error **errp)
456
job_cancel_locked(job, force);
457
}
458
459
-void job_user_cancel(Job *job, bool force, Error **errp)
460
-{
461
- JOB_LOCK_GUARD();
462
- job_user_cancel_locked(job, force, errp);
463
-}
464
-
465
-/* A wrapper around job_cancel() taking an Error ** parameter so it may be
466
- * used with job_finish_sync() without the need for (rather nasty) function
467
- * pointer casts there.
468
+/* A wrapper around job_cancel_locked() taking an Error ** parameter so it may
469
+ * be used with job_finish_sync_locked() without the need for (rather nasty)
470
+ * function pointer casts there.
471
*
472
* Called with job_mutex held.
473
*/
474
@@ -XXX,XX +XXX,XX @@ int job_complete_sync_locked(Job *job, Error **errp)
475
return job_finish_sync_locked(job, job_complete_locked, errp);
476
}
477
478
-int job_complete_sync(Job *job, Error **errp)
479
-{
480
- JOB_LOCK_GUARD();
481
- return job_complete_sync_locked(job, errp);
482
-}
483
-
484
void job_complete_locked(Job *job, Error **errp)
485
{
486
/* Should not be reachable via external interface for internal jobs */
487
@@ -XXX,XX +XXX,XX @@ void job_complete_locked(Job *job, Error **errp)
488
job_lock();
489
}
490
491
-void job_complete(Job *job, Error **errp)
492
-{
493
- JOB_LOCK_GUARD();
494
- job_complete_locked(job, errp);
495
-}
496
-
497
int job_finish_sync_locked(Job *job,
498
void (*finish)(Job *, Error **errp),
499
Error **errp)
500
@@ -XXX,XX +XXX,XX @@ int job_finish_sync_locked(Job *job,
501
job_unref_locked(job);
502
return ret;
503
}
504
-
505
-int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp)
506
-{
507
- JOB_LOCK_GUARD();
508
- return job_finish_sync_locked(job, finish, errp);
509
-}
510
diff --git a/tests/unit/test-blockjob.c b/tests/unit/test-blockjob.c
511
index XXXXXXX..XXXXXXX 100644
512
--- a/tests/unit/test-blockjob.c
513
+++ b/tests/unit/test-blockjob.c
514
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver test_yielding_driver = {
515
};
516
517
/*
518
- * Test that job_complete() works even on jobs that are in a paused
519
+ * Test that job_complete_locked() works even on jobs that are in a paused
520
* state (i.e., STANDBY).
521
*
522
* To do this, run YieldingJob in an IO thread, get it into the READY
523
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver test_yielding_driver = {
524
* acquire the context so the job will not be entered and will thus
525
* remain on STANDBY.
526
*
527
- * job_complete() should still work without error.
528
+ * job_complete_locked() should still work without error.
529
*
530
* Note that on the QMP interface, it is impossible to lock an IO
531
* thread before a drained section ends. In practice, the
532
--
533
2.37.3
diff view generated by jsdifflib
Deleted patch
1
The field is unused (only ever set, but never read) since commit
2
ac9185603. Additionally, the commit message of commit 34fa110e already
3
explained earlier why it's unreliable. Remove it.
4
1
5
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
6
Message-Id: <20220923142838.91043-1-kwolf@redhat.com>
7
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
8
---
9
block/file-posix.c | 9 ---------
10
1 file changed, 9 deletions(-)
11
12
diff --git a/block/file-posix.c b/block/file-posix.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/block/file-posix.c
15
+++ b/block/file-posix.c
16
@@ -XXX,XX +XXX,XX @@ typedef struct BDRVRawState {
17
18
bool has_discard:1;
19
bool has_write_zeroes:1;
20
- bool discard_zeroes:1;
21
bool use_linux_aio:1;
22
bool use_linux_io_uring:1;
23
int page_cache_inconsistent; /* errno from fdatasync failure */
24
@@ -XXX,XX +XXX,XX @@ static int raw_open_common(BlockDriverState *bs, QDict *options,
25
ret = -EINVAL;
26
goto fail;
27
} else {
28
- s->discard_zeroes = true;
29
s->has_fallocate = true;
30
}
31
} else {
32
@@ -XXX,XX +XXX,XX @@ static int raw_open_common(BlockDriverState *bs, QDict *options,
33
}
34
35
if (S_ISBLK(st.st_mode)) {
36
-#ifdef BLKDISCARDZEROES
37
- unsigned int arg;
38
- if (ioctl(s->fd, BLKDISCARDZEROES, &arg) == 0 && arg) {
39
- s->discard_zeroes = true;
40
- }
41
-#endif
42
#ifdef __linux__
43
/* On Linux 3.10, BLKDISCARD leaves stale data in the page cache. Do
44
* not rely on the contents of discarded blocks unless using O_DIRECT.
45
* Same for BLKZEROOUT.
46
*/
47
if (!(bs->open_flags & BDRV_O_NOCACHE)) {
48
- s->discard_zeroes = false;
49
s->has_write_zeroes = false;
50
}
51
#endif
52
--
53
2.37.3
diff view generated by jsdifflib