1
The following changes since commit 9964e96dc9999cf7f7c936ee854a795415d19b60:
1
The following changes since commit 88afdc92b644120e0182c8567e1b1d236e471b12:
2
2
3
Merge remote-tracking branch 'jasowang/tags/net-pull-request' into staging (2017-05-23 15:01:31 +0100)
3
Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging (2021-09-05 15:48:42 +0100)
4
4
5
are available in the git repository at:
5
are available in the Git repository at:
6
6
7
git://github.com/codyprime/qemu-kvm-jtc.git tags/block-pull-request
7
https://gitlab.com/stefanha/qemu.git tags/block-pull-request
8
8
9
for you to fetch changes up to 223a23c198787328ae75bc65d84edf5fde33c0b6:
9
for you to fetch changes up to 9bd2788f49c331b02372cc257b11e4c984d39708:
10
10
11
block/gluster: glfs_lseek() workaround (2017-05-24 16:44:46 -0400)
11
block/nvme: Only report VFIO error on failed retry (2021-09-07 09:08:24 +0100)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Block patches
14
Pull request
15
16
Userspace NVMe driver patches.
17
15
----------------------------------------------------------------
18
----------------------------------------------------------------
16
19
17
Jeff Cody (1):
20
Philippe Mathieu-Daudé (11):
18
block/gluster: glfs_lseek() workaround
21
block/nvme: Use safer trace format string
22
util/vfio-helpers: Let qemu_vfio_verify_mappings() use error_report()
23
util/vfio-helpers: Replace qemu_mutex_lock() calls with
24
QEMU_LOCK_GUARD
25
util/vfio-helpers: Remove unreachable code in qemu_vfio_dma_map()
26
block/nvme: Have nvme_create_queue_pair() report errors consistently
27
util/vfio-helpers: Pass Error handle to qemu_vfio_dma_map()
28
util/vfio-helpers: Extract qemu_vfio_water_mark_reached()
29
util/vfio-helpers: Use error_setg in qemu_vfio_find_[fixed/temp]_iova
30
util/vfio-helpers: Simplify qemu_vfio_dma_map() returning directly
31
util/vfio-helpers: Let qemu_vfio_do_mapping() propagate Error
32
block/nvme: Only report VFIO error on failed retry
19
33
20
Paolo Bonzini (11):
34
include/qemu/vfio-helpers.h | 2 +-
21
blockjob: remove unnecessary check
35
block/nvme.c | 29 +++++++----
22
blockjob: remove iostatus_reset callback
36
util/vfio-helpers.c | 99 ++++++++++++++++++++-----------------
23
blockjob: introduce block_job_early_fail
37
block/trace-events | 2 +-
24
blockjob: introduce block_job_pause/resume_all
38
4 files changed, 76 insertions(+), 56 deletions(-)
25
blockjob: separate monitor and blockjob APIs
26
blockjob: move iostatus reset inside block_job_user_resume
27
blockjob: introduce block_job_cancel_async, check iostatus invariants
28
blockjob: group BlockJob transaction functions together
29
blockjob: strengthen a bit test-blockjob-txn
30
blockjob: reorganize block_job_completed_txn_abort
31
blockjob: use deferred_to_main_loop to indicate the coroutine has
32
ended
33
34
block/backup.c | 2 +-
35
block/commit.c | 2 +-
36
block/gluster.c | 18 +-
37
block/io.c | 19 +-
38
block/mirror.c | 2 +-
39
blockdev.c | 1 -
40
blockjob.c | 750 ++++++++++++++++++++++++-------------------
41
include/block/blockjob.h | 16 -
42
include/block/blockjob_int.h | 27 +-
43
tests/test-blockjob-txn.c | 7 +-
44
tests/test-blockjob.c | 10 +-
45
11 files changed, 463 insertions(+), 391 deletions(-)
46
39
47
--
40
--
48
2.9.3
41
2.31.1
49
42
50
43
44
diff view generated by jsdifflib
1
From: Paolo Bonzini <pbonzini@redhat.com>
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
2
2
3
!job is always checked prior to the call, drop it from here.
3
Fix when building with -Wshorten-64-to-32:
4
4
5
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
5
warning: implicit conversion loses integer precision: 'unsigned long' to 'int' [-Wshorten-64-to-32]
6
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
6
7
Reviewed-by: Jeff Cody <jcody@redhat.com>
7
Reviewed-by: Klaus Jensen <k.jensen@samsung.com>
8
Message-id: 20170508141310.8674-2-pbonzini@redhat.com
8
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Signed-off-by: Jeff Cody <jcody@redhat.com>
9
Message-id: 20210902070025.197072-2-philmd@redhat.com
10
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
10
---
11
---
11
blockjob.c | 2 +-
12
block/trace-events | 2 +-
12
1 file changed, 1 insertion(+), 1 deletion(-)
13
1 file changed, 1 insertion(+), 1 deletion(-)
13
14
14
diff --git a/blockjob.c b/blockjob.c
15
diff --git a/block/trace-events b/block/trace-events
15
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
16
--- a/blockjob.c
17
--- a/block/trace-events
17
+++ b/blockjob.c
18
+++ b/block/trace-events
18
@@ -XXX,XX +XXX,XX @@ static bool block_job_should_pause(BlockJob *job)
19
@@ -XXX,XX +XXX,XX @@ nvme_dsm(void *s, uint64_t offset, uint64_t bytes) "s %p offset 0x%"PRIx64" byte
19
20
nvme_dsm_done(void *s, uint64_t offset, uint64_t bytes, int ret) "s %p offset 0x%"PRIx64" bytes %"PRId64" ret %d"
20
bool block_job_user_paused(BlockJob *job)
21
nvme_dma_map_flush(void *s) "s %p"
21
{
22
nvme_free_req_queue_wait(void *s, unsigned q_index) "s %p q #%u"
22
- return job ? job->user_paused : 0;
23
-nvme_create_queue_pair(unsigned q_index, void *q, unsigned size, void *aio_context, int fd) "index %u q %p size %u aioctx %p fd %d"
23
+ return job->user_paused;
24
+nvme_create_queue_pair(unsigned q_index, void *q, size_t size, void *aio_context, int fd) "index %u q %p size %zu aioctx %p fd %d"
24
}
25
nvme_free_queue_pair(unsigned q_index, void *q) "index %u q %p"
25
26
nvme_cmd_map_qiov(void *s, void *cmd, void *req, void *qiov, int entries) "s %p cmd %p req %p qiov %p entries %d"
26
void coroutine_fn block_job_pause_point(BlockJob *job)
27
nvme_cmd_map_qiov_pages(void *s, int i, uint64_t page) "s %p page[%d] 0x%"PRIx64
27
--
28
--
28
2.9.3
29
2.31.1
29
30
30
31
diff view generated by jsdifflib
Deleted patch
1
From: Paolo Bonzini <pbonzini@redhat.com>
2
1
3
This is unused since commit 66a0fae ("blockjob: Don't touch BDS iostatus",
4
2016-05-19).
5
6
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
7
Reviewed-by: John Snow <jsnow@redhat.com>
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
10
Reviewed-by: Jeff Cody <jcody@redhat.com>
11
Message-id: 20170508141310.8674-3-pbonzini@redhat.com
12
Signed-off-by: Jeff Cody <jcody@redhat.com>
13
---
14
blockjob.c | 3 ---
15
include/block/blockjob_int.h | 3 ---
16
2 files changed, 6 deletions(-)
17
18
diff --git a/blockjob.c b/blockjob.c
19
index XXXXXXX..XXXXXXX 100644
20
--- a/blockjob.c
21
+++ b/blockjob.c
22
@@ -XXX,XX +XXX,XX @@ bool block_job_is_cancelled(BlockJob *job)
23
void block_job_iostatus_reset(BlockJob *job)
24
{
25
job->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
26
- if (job->driver->iostatus_reset) {
27
- job->driver->iostatus_reset(job);
28
- }
29
}
30
31
static int block_job_finish_sync(BlockJob *job,
32
diff --git a/include/block/blockjob_int.h b/include/block/blockjob_int.h
33
index XXXXXXX..XXXXXXX 100644
34
--- a/include/block/blockjob_int.h
35
+++ b/include/block/blockjob_int.h
36
@@ -XXX,XX +XXX,XX @@ struct BlockJobDriver {
37
/** Optional callback for job types that support setting a speed limit */
38
void (*set_speed)(BlockJob *job, int64_t speed, Error **errp);
39
40
- /** Optional callback for job types that need to forward I/O status reset */
41
- void (*iostatus_reset)(BlockJob *job);
42
-
43
/** Mandatory: Entrypoint for the Coroutine. */
44
CoroutineEntry *start;
45
46
--
47
2.9.3
48
49
diff view generated by jsdifflib
1
On current released versions of glusterfs, glfs_lseek() will sometimes
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
2
return invalid values for SEEK_DATA or SEEK_HOLE. For SEEK_DATA and
3
SEEK_HOLE, the returned value should be >= the passed offset, or < 0 in
4
the case of error:
5
2
6
LSEEK(2):
3
Instead of displaying the error on stderr, use error_report()
4
which also report to the monitor.
7
5
8
off_t lseek(int fd, off_t offset, int whence);
6
Reviewed-by: Fam Zheng <fam@euphon.net>
7
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
8
Reviewed-by: Klaus Jensen <k.jensen@samsung.com>
9
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
10
Message-id: 20210902070025.197072-3-philmd@redhat.com
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
---
13
util/vfio-helpers.c | 4 ++--
14
1 file changed, 2 insertions(+), 2 deletions(-)
9
15
10
[...]
16
diff --git a/util/vfio-helpers.c b/util/vfio-helpers.c
11
12
SEEK_HOLE
13
Adjust the file offset to the next hole in the file greater
14
than or equal to offset. If offset points into the middle of
15
a hole, then the file offset is set to offset. If there is no
16
hole past offset, then the file offset is adjusted to the end
17
of the file (i.e., there is an implicit hole at the end of
18
any file).
19
20
[...]
21
22
RETURN VALUE
23
Upon successful completion, lseek() returns the resulting
24
offset location as measured in bytes from the beginning of the
25
file. On error, the value (off_t) -1 is returned and errno is
26
set to indicate the error
27
28
However, occasionally glfs_lseek() for SEEK_HOLE/DATA will return a
29
value less than the passed offset, yet greater than zero.
30
31
For instance, here are example values observed from this call:
32
33
offs = glfs_lseek(s->fd, start, SEEK_HOLE);
34
if (offs < 0) {
35
return -errno; /* D1 and (H3 or H4) */
36
}
37
38
start == 7608336384
39
offs == 7607877632
40
41
This causes QEMU to abort on the assert test. When this value is
42
returned, errno is also 0.
43
44
This is a reported and known bug to glusterfs:
45
https://bugzilla.redhat.com/show_bug.cgi?id=1425293
46
47
Although this is being fixed in gluster, we still should work around it
48
in QEMU, given that multiple released versions of gluster behave this
49
way.
50
51
This patch treats the return case of (offs < start) the same as if an
52
error value other than ENXIO is returned; we will assume we learned
53
nothing, and there are no holes in the file.
54
55
Signed-off-by: Jeff Cody <jcody@redhat.com>
56
Reviewed-by: Eric Blake <eblake@redhat.com>
57
Reviewed-by: Niels de Vos <ndevos@redhat.com>
58
Message-id: 87c0140e9407c08f6e74b04131b610f2e27c014c.1495560397.git.jcody@redhat.com
59
Signed-off-by: Jeff Cody <jcody@redhat.com>
60
---
61
block/gluster.c | 18 ++++++++++++++++--
62
1 file changed, 16 insertions(+), 2 deletions(-)
63
64
diff --git a/block/gluster.c b/block/gluster.c
65
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
66
--- a/block/gluster.c
18
--- a/util/vfio-helpers.c
67
+++ b/block/gluster.c
19
+++ b/util/vfio-helpers.c
68
@@ -XXX,XX +XXX,XX @@ static int find_allocation(BlockDriverState *bs, off_t start,
20
@@ -XXX,XX +XXX,XX @@ static bool qemu_vfio_verify_mappings(QEMUVFIOState *s)
69
if (offs < 0) {
21
if (QEMU_VFIO_DEBUG) {
70
return -errno; /* D3 or D4 */
22
for (i = 0; i < s->nr_mappings - 1; ++i) {
71
}
23
if (!(s->mappings[i].host < s->mappings[i + 1].host)) {
72
- assert(offs >= start);
24
- fprintf(stderr, "item %d not sorted!\n", i);
73
+
25
+ error_report("item %d not sorted!", i);
74
+ if (offs < start) {
26
qemu_vfio_dump_mappings(s);
75
+ /* This is not a valid return by lseek(). We are safe to just return
27
return false;
76
+ * -EIO in this case, and we'll treat it like D4. Unfortunately some
28
}
77
+ * versions of gluster server will return offs < start, so an assert
29
if (!(s->mappings[i].host + s->mappings[i].size <=
78
+ * here will unnecessarily abort QEMU. */
30
s->mappings[i + 1].host)) {
79
+ return -EIO;
31
- fprintf(stderr, "item %d overlap with next!\n", i);
80
+ }
32
+ error_report("item %d overlap with next!", i);
81
33
qemu_vfio_dump_mappings(s);
82
if (offs > start) {
34
return false;
83
/* D2: in hole, next data at offs */
35
}
84
@@ -XXX,XX +XXX,XX @@ static int find_allocation(BlockDriverState *bs, off_t start,
85
if (offs < 0) {
86
return -errno; /* D1 and (H3 or H4) */
87
}
88
- assert(offs >= start);
89
+
90
+ if (offs < start) {
91
+ /* This is not a valid return by lseek(). We are safe to just return
92
+ * -EIO in this case, and we'll treat it like H4. Unfortunately some
93
+ * versions of gluster server will return offs < start, so an assert
94
+ * here will unnecessarily abort QEMU. */
95
+ return -EIO;
96
+ }
97
98
if (offs > start) {
99
/*
100
--
36
--
101
2.9.3
37
2.31.1
102
38
103
39
diff view generated by jsdifflib
1
From: Paolo Bonzini <pbonzini@redhat.com>
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
2
2
3
Yet another pure code movement patch, preparing for the next change.
3
Simplify qemu_vfio_dma_[un]map() handlers by replacing a pair of
4
qemu_mutex_lock/qemu_mutex_unlock calls by the WITH_QEMU_LOCK_GUARD
5
macro.
4
6
5
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
7
Reviewed-by: Klaus Jensen <k.jensen@samsung.com>
6
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
8
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Message-id: 20170508141310.8674-9-pbonzini@redhat.com
9
Message-id: 20210902070025.197072-4-philmd@redhat.com
8
Signed-off-by: Jeff Cody <jcody@redhat.com>
10
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
9
---
11
---
10
blockjob.c | 128 ++++++++++++++++++++++++++++++-------------------------------
12
util/vfio-helpers.c | 9 +++------
11
1 file changed, 64 insertions(+), 64 deletions(-)
13
1 file changed, 3 insertions(+), 6 deletions(-)
12
14
13
diff --git a/blockjob.c b/blockjob.c
15
diff --git a/util/vfio-helpers.c b/util/vfio-helpers.c
14
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
15
--- a/blockjob.c
17
--- a/util/vfio-helpers.c
16
+++ b/blockjob.c
18
+++ b/util/vfio-helpers.c
17
@@ -XXX,XX +XXX,XX @@ BlockJob *block_job_get(const char *id)
19
@@ -XXX,XX +XXX,XX @@ int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,
18
return NULL;
20
assert(QEMU_PTR_IS_ALIGNED(host, qemu_real_host_page_size));
21
assert(QEMU_IS_ALIGNED(size, qemu_real_host_page_size));
22
trace_qemu_vfio_dma_map(s, host, size, temporary, iova);
23
- qemu_mutex_lock(&s->lock);
24
+ QEMU_LOCK_GUARD(&s->lock);
25
mapping = qemu_vfio_find_mapping(s, host, &index);
26
if (mapping) {
27
iova0 = mapping->iova + ((uint8_t *)host - (uint8_t *)mapping->host);
28
@@ -XXX,XX +XXX,XX @@ int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,
29
*iova = iova0;
30
}
31
out:
32
- qemu_mutex_unlock(&s->lock);
33
return ret;
19
}
34
}
20
35
21
+BlockJobTxn *block_job_txn_new(void)
36
@@ -XXX,XX +XXX,XX @@ void qemu_vfio_dma_unmap(QEMUVFIOState *s, void *host)
22
+{
37
}
23
+ BlockJobTxn *txn = g_new0(BlockJobTxn, 1);
38
24
+ QLIST_INIT(&txn->jobs);
39
trace_qemu_vfio_dma_unmap(s, host);
25
+ txn->refcnt = 1;
40
- qemu_mutex_lock(&s->lock);
26
+ return txn;
41
+ QEMU_LOCK_GUARD(&s->lock);
27
+}
42
m = qemu_vfio_find_mapping(s, host, &index);
28
+
43
if (!m) {
29
+static void block_job_txn_ref(BlockJobTxn *txn)
44
- goto out;
30
+{
31
+ txn->refcnt++;
32
+}
33
+
34
+void block_job_txn_unref(BlockJobTxn *txn)
35
+{
36
+ if (txn && --txn->refcnt == 0) {
37
+ g_free(txn);
38
+ }
39
+}
40
+
41
+void block_job_txn_add_job(BlockJobTxn *txn, BlockJob *job)
42
+{
43
+ if (!txn) {
44
+ return;
45
+ return;
45
+ }
46
}
46
+
47
qemu_vfio_undo_mapping(s, m, NULL);
47
+ assert(!job->txn);
48
-out:
48
+ job->txn = txn;
49
- qemu_mutex_unlock(&s->lock);
49
+
50
+ QLIST_INSERT_HEAD(&txn->jobs, job, txn_list);
51
+ block_job_txn_ref(txn);
52
+}
53
+
54
static void block_job_pause(BlockJob *job)
55
{
56
job->pause_count++;
57
@@ -XXX,XX +XXX,XX @@ static void block_job_cancel_async(BlockJob *job)
58
job->cancelled = true;
59
}
50
}
60
51
61
+static int block_job_finish_sync(BlockJob *job,
52
static void qemu_vfio_reset(QEMUVFIOState *s)
62
+ void (*finish)(BlockJob *, Error **errp),
63
+ Error **errp)
64
+{
65
+ Error *local_err = NULL;
66
+ int ret;
67
+
68
+ assert(blk_bs(job->blk)->job == job);
69
+
70
+ block_job_ref(job);
71
+
72
+ finish(job, &local_err);
73
+ if (local_err) {
74
+ error_propagate(errp, local_err);
75
+ block_job_unref(job);
76
+ return -EBUSY;
77
+ }
78
+ /* block_job_drain calls block_job_enter, and it should be enough to
79
+ * induce progress until the job completes or moves to the main thread.
80
+ */
81
+ while (!job->deferred_to_main_loop && !job->completed) {
82
+ block_job_drain(job);
83
+ }
84
+ while (!job->completed) {
85
+ aio_poll(qemu_get_aio_context(), true);
86
+ }
87
+ ret = (job->cancelled && job->ret == 0) ? -ECANCELED : job->ret;
88
+ block_job_unref(job);
89
+ return ret;
90
+}
91
+
92
static void block_job_completed_txn_abort(BlockJob *job)
93
{
94
AioContext *ctx;
95
@@ -XXX,XX +XXX,XX @@ void block_job_cancel(BlockJob *job)
96
}
97
}
98
99
-static int block_job_finish_sync(BlockJob *job,
100
- void (*finish)(BlockJob *, Error **errp),
101
- Error **errp)
102
-{
103
- Error *local_err = NULL;
104
- int ret;
105
-
106
- assert(blk_bs(job->blk)->job == job);
107
-
108
- block_job_ref(job);
109
-
110
- finish(job, &local_err);
111
- if (local_err) {
112
- error_propagate(errp, local_err);
113
- block_job_unref(job);
114
- return -EBUSY;
115
- }
116
- /* block_job_drain calls block_job_enter, and it should be enough to
117
- * induce progress until the job completes or moves to the main thread.
118
- */
119
- while (!job->deferred_to_main_loop && !job->completed) {
120
- block_job_drain(job);
121
- }
122
- while (!job->completed) {
123
- aio_poll(qemu_get_aio_context(), true);
124
- }
125
- ret = (job->cancelled && job->ret == 0) ? -ECANCELED : job->ret;
126
- block_job_unref(job);
127
- return ret;
128
-}
129
-
130
/* A wrapper around block_job_cancel() taking an Error ** parameter so it may be
131
* used with block_job_finish_sync() without the need for (rather nasty)
132
* function pointer casts there. */
133
@@ -XXX,XX +XXX,XX @@ void block_job_defer_to_main_loop(BlockJob *job,
134
aio_bh_schedule_oneshot(qemu_get_aio_context(),
135
block_job_defer_to_main_loop_bh, data);
136
}
137
-
138
-BlockJobTxn *block_job_txn_new(void)
139
-{
140
- BlockJobTxn *txn = g_new0(BlockJobTxn, 1);
141
- QLIST_INIT(&txn->jobs);
142
- txn->refcnt = 1;
143
- return txn;
144
-}
145
-
146
-static void block_job_txn_ref(BlockJobTxn *txn)
147
-{
148
- txn->refcnt++;
149
-}
150
-
151
-void block_job_txn_unref(BlockJobTxn *txn)
152
-{
153
- if (txn && --txn->refcnt == 0) {
154
- g_free(txn);
155
- }
156
-}
157
-
158
-void block_job_txn_add_job(BlockJobTxn *txn, BlockJob *job)
159
-{
160
- if (!txn) {
161
- return;
162
- }
163
-
164
- assert(!job->txn);
165
- job->txn = txn;
166
-
167
- QLIST_INSERT_HEAD(&txn->jobs, job, txn_list);
168
- block_job_txn_ref(txn);
169
-}
170
--
53
--
171
2.9.3
54
2.31.1
172
55
173
56
diff view generated by jsdifflib
1
From: Paolo Bonzini <pbonzini@redhat.com>
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
2
2
3
All block jobs are using block_job_defer_to_main_loop as the final
3
qemu_vfio_add_mapping() returns a pointer to an indexed entry
4
step just before the coroutine terminates. At this point,
4
in pre-allocated QEMUVFIOState::mappings[], thus can not be NULL.
5
block_job_enter should do nothing, but currently it restarts
5
Remove the pointless check.
6
the freed coroutine.
7
6
8
Now, the job->co states should probably be changed to an enum
7
Reviewed-by: Klaus Jensen <k.jensen@samsung.com>
9
(e.g. BEFORE_START, STARTED, YIELDED, COMPLETED) subsuming
8
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
10
block_job_started, job->deferred_to_main_loop and job->busy.
9
Message-id: 20210902070025.197072-5-philmd@redhat.com
11
For now, this patch eliminates the problematic reenter by
10
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
removing the reset of job->deferred_to_main_loop (which served
11
---
13
no purpose, as far as I could see) and checking the flag in
12
util/vfio-helpers.c | 4 ----
14
block_job_enter.
13
1 file changed, 4 deletions(-)
15
14
16
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
15
diff --git a/util/vfio-helpers.c b/util/vfio-helpers.c
17
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
18
Message-id: 20170508141310.8674-12-pbonzini@redhat.com
19
Signed-off-by: Jeff Cody <jcody@redhat.com>
20
---
21
blockjob.c | 10 ++++++++--
22
include/block/blockjob_int.h | 3 ++-
23
2 files changed, 10 insertions(+), 3 deletions(-)
24
25
diff --git a/blockjob.c b/blockjob.c
26
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
27
--- a/blockjob.c
17
--- a/util/vfio-helpers.c
28
+++ b/blockjob.c
18
+++ b/util/vfio-helpers.c
29
@@ -XXX,XX +XXX,XX @@ void block_job_resume_all(void)
19
@@ -XXX,XX +XXX,XX @@ int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,
30
20
}
31
void block_job_enter(BlockJob *job)
21
32
{
22
mapping = qemu_vfio_add_mapping(s, host, size, index + 1, iova0);
33
- if (job->co && !job->busy) {
23
- if (!mapping) {
34
+ if (!block_job_started(job)) {
24
- ret = -ENOMEM;
35
+ return;
25
- goto out;
36
+ }
26
- }
37
+ if (job->deferred_to_main_loop) {
27
assert(qemu_vfio_verify_mappings(s));
38
+ return;
28
ret = qemu_vfio_do_mapping(s, host, size, iova0);
39
+ }
29
if (ret) {
40
+
41
+ if (!job->busy) {
42
bdrv_coroutine_enter(blk_bs(job->blk), job->co);
43
}
44
}
45
@@ -XXX,XX +XXX,XX @@ static void block_job_defer_to_main_loop_bh(void *opaque)
46
aio_context_acquire(aio_context);
47
}
48
49
- data->job->deferred_to_main_loop = false;
50
data->fn(data->job, data->opaque);
51
52
if (aio_context != data->aio_context) {
53
diff --git a/include/block/blockjob_int.h b/include/block/blockjob_int.h
54
index XXXXXXX..XXXXXXX 100644
55
--- a/include/block/blockjob_int.h
56
+++ b/include/block/blockjob_int.h
57
@@ -XXX,XX +XXX,XX @@ typedef void BlockJobDeferToMainLoopFn(BlockJob *job, void *opaque);
58
* @fn: The function to run in the main loop
59
* @opaque: The opaque value that is passed to @fn
60
*
61
- * Execute a given function in the main loop with the BlockDriverState
62
+ * This function must be called by the main job coroutine just before it
63
+ * returns. @fn is executed in the main loop with the BlockDriverState
64
* AioContext acquired. Block jobs must call bdrv_unref(), bdrv_close(), and
65
* anything that uses bdrv_drain_all() in the main loop.
66
*
67
--
30
--
68
2.9.3
31
2.31.1
69
32
70
33
diff view generated by jsdifflib
1
From: Paolo Bonzini <pbonzini@redhat.com>
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
2
2
3
This splits the part that touches job states from the part that invokes
3
nvme_create_queue_pair() does not return a boolean value (indicating
4
callbacks. It will make the code simpler to understand once job states will
4
eventual error) but a pointer, and is inconsistent in how it fills the
5
be protected by a different mutex than the AioContext lock.
5
error handler. To fulfill callers expectations, always set an error
6
message on failure.
6
7
7
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
8
Reported-by: Auger Eric <eric.auger@redhat.com>
8
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
9
Reviewed-by: Klaus Jensen <k.jensen@samsung.com>
9
Message-id: 20170508141310.8674-11-pbonzini@redhat.com
10
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
10
Signed-off-by: Jeff Cody <jcody@redhat.com>
11
Message-id: 20210902070025.197072-6-philmd@redhat.com
12
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
11
---
13
---
12
blockjob.c | 35 ++++++++++++++++++++++-------------
14
block/nvme.c | 3 +++
13
1 file changed, 22 insertions(+), 13 deletions(-)
15
1 file changed, 3 insertions(+)
14
16
15
diff --git a/blockjob.c b/blockjob.c
17
diff --git a/block/nvme.c b/block/nvme.c
16
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
17
--- a/blockjob.c
19
--- a/block/nvme.c
18
+++ b/blockjob.c
20
+++ b/block/nvme.c
19
@@ -XXX,XX +XXX,XX @@ void block_job_start(BlockJob *job)
21
@@ -XXX,XX +XXX,XX @@ static NVMeQueuePair *nvme_create_queue_pair(BDRVNVMeState *s,
20
22
21
static void block_job_completed_single(BlockJob *job)
23
q = g_try_new0(NVMeQueuePair, 1);
22
{
24
if (!q) {
23
+ assert(job->completed);
25
+ error_setg(errp, "Cannot allocate queue pair");
24
+
26
return NULL;
25
if (!job->ret) {
26
if (job->driver->commit) {
27
job->driver->commit(job);
28
@@ -XXX,XX +XXX,XX @@ static int block_job_finish_sync(BlockJob *job,
29
30
block_job_ref(job);
31
32
- finish(job, &local_err);
33
+ if (finish) {
34
+ finish(job, &local_err);
35
+ }
36
if (local_err) {
37
error_propagate(errp, local_err);
38
block_job_unref(job);
39
@@ -XXX,XX +XXX,XX @@ static void block_job_completed_txn_abort(BlockJob *job)
40
{
41
AioContext *ctx;
42
BlockJobTxn *txn = job->txn;
43
- BlockJob *other_job, *next;
44
+ BlockJob *other_job;
45
46
if (txn->aborting) {
47
/*
48
@@ -XXX,XX +XXX,XX @@ static void block_job_completed_txn_abort(BlockJob *job)
49
return;
50
}
27
}
51
txn->aborting = true;
28
trace_nvme_create_queue_pair(idx, q, size, aio_context,
52
+ block_job_txn_ref(txn);
29
@@ -XXX,XX +XXX,XX @@ static NVMeQueuePair *nvme_create_queue_pair(BDRVNVMeState *s,
53
+
30
qemu_real_host_page_size);
54
/* We are the first failed job. Cancel other jobs. */
31
q->prp_list_pages = qemu_try_memalign(qemu_real_host_page_size, bytes);
55
QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
32
if (!q->prp_list_pages) {
56
ctx = blk_get_aio_context(other_job->blk);
33
+ error_setg(errp, "Cannot allocate PRP page list");
57
aio_context_acquire(ctx);
34
goto fail;
58
}
35
}
59
+
36
memset(q->prp_list_pages, 0, bytes);
60
+ /* Other jobs are effectively cancelled by us, set the status for
37
@@ -XXX,XX +XXX,XX @@ static NVMeQueuePair *nvme_create_queue_pair(BDRVNVMeState *s,
61
+ * them; this job, however, may or may not be cancelled, depending
38
r = qemu_vfio_dma_map(s->vfio, q->prp_list_pages, bytes,
62
+ * on the caller, so leave it. */
39
false, &prp_list_iova);
63
QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
40
if (r) {
64
- if (other_job == job || other_job->completed) {
41
+ error_setg_errno(errp, -r, "Cannot map buffer for DMA");
65
- /* Other jobs are "effectively" cancelled by us, set the status for
42
goto fail;
66
- * them; this job, however, may or may not be cancelled, depending
67
- * on the caller, so leave it. */
68
- if (other_job != job) {
69
- block_job_cancel_async(other_job);
70
- }
71
- continue;
72
+ if (other_job != job) {
73
+ block_job_cancel_async(other_job);
74
}
75
- block_job_cancel_sync(other_job);
76
- assert(other_job->completed);
77
}
43
}
78
- QLIST_FOREACH_SAFE(other_job, &txn->jobs, txn_list, next) {
44
q->free_req_head = -1;
79
+ while (!QLIST_EMPTY(&txn->jobs)) {
80
+ other_job = QLIST_FIRST(&txn->jobs);
81
ctx = blk_get_aio_context(other_job->blk);
82
+ if (!other_job->completed) {
83
+ assert(other_job->cancelled);
84
+ block_job_finish_sync(other_job, NULL, NULL);
85
+ }
86
block_job_completed_single(other_job);
87
aio_context_release(ctx);
88
}
89
+
90
+ block_job_txn_unref(txn);
91
}
92
93
static void block_job_completed_txn_success(BlockJob *job)
94
--
45
--
95
2.9.3
46
2.31.1
96
47
97
48
diff view generated by jsdifflib
1
From: Paolo Bonzini <pbonzini@redhat.com>
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
2
2
3
We have two different headers for block job operations, blockjob.h
3
Currently qemu_vfio_dma_map() displays errors on stderr.
4
and blockjob_int.h. The former contains APIs called by the monitor,
4
When using management interface, this information is simply
5
the latter contains APIs called by the block job drivers and the
5
lost. Pass qemu_vfio_dma_map() an Error** handle so it can
6
block layer itself.
6
propagate the error to callers.
7
7
8
Keep the two APIs separate in the blockjob.c file too. This will
8
Reviewed-by: Fam Zheng <fam@euphon.net>
9
be useful when transitioning away from the AioContext lock, because
9
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
10
there will be locking policies for the two categories, too---the
10
Reviewed-by: Klaus Jensen <k.jensen@samsung.com>
11
monitor will have to call new block_job_lock/unlock APIs, while blockjob
11
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
12
APIs will take care of this for the users.
12
Message-id: 20210902070025.197072-7-philmd@redhat.com
13
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
14
---
15
include/qemu/vfio-helpers.h | 2 +-
16
block/nvme.c | 22 +++++++++++-----------
17
util/vfio-helpers.c | 10 ++++++----
18
3 files changed, 18 insertions(+), 16 deletions(-)
13
19
14
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
20
diff --git a/include/qemu/vfio-helpers.h b/include/qemu/vfio-helpers.h
15
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
16
Message-id: 20170508141310.8674-6-pbonzini@redhat.com
17
Signed-off-by: Jeff Cody <jcody@redhat.com>
18
---
19
blockjob.c | 390 ++++++++++++++++++++++++++++++++-----------------------------
20
1 file changed, 205 insertions(+), 185 deletions(-)
21
22
diff --git a/blockjob.c b/blockjob.c
23
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
24
--- a/blockjob.c
22
--- a/include/qemu/vfio-helpers.h
25
+++ b/blockjob.c
23
+++ b/include/qemu/vfio-helpers.h
26
@@ -XXX,XX +XXX,XX @@ struct BlockJobTxn {
24
@@ -XXX,XX +XXX,XX @@ typedef struct QEMUVFIOState QEMUVFIOState;
27
25
QEMUVFIOState *qemu_vfio_open_pci(const char *device, Error **errp);
28
static QLIST_HEAD(, BlockJob) block_jobs = QLIST_HEAD_INITIALIZER(block_jobs);
26
void qemu_vfio_close(QEMUVFIOState *s);
29
27
int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,
30
+/*
28
- bool temporary, uint64_t *iova_list);
31
+ * The block job API is composed of two categories of functions.
29
+ bool temporary, uint64_t *iova_list, Error **errp);
32
+ *
30
int qemu_vfio_dma_reset_temporary(QEMUVFIOState *s);
33
+ * The first includes functions used by the monitor. The monitor is
31
void qemu_vfio_dma_unmap(QEMUVFIOState *s, void *host);
34
+ * peculiar in that it accesses the block job list with block_job_get, and
32
void *qemu_vfio_pci_map_bar(QEMUVFIOState *s, int index,
35
+ * therefore needs consistency across block_job_get and the actual operation
33
diff --git a/block/nvme.c b/block/nvme.c
36
+ * (e.g. block_job_set_speed). The consistency is achieved with
34
index XXXXXXX..XXXXXXX 100644
37
+ * aio_context_acquire/release. These functions are declared in blockjob.h.
35
--- a/block/nvme.c
38
+ *
36
+++ b/block/nvme.c
39
+ * The second includes functions used by the block job drivers and sometimes
37
@@ -XXX,XX +XXX,XX @@ static bool nvme_init_queue(BDRVNVMeState *s, NVMeQueue *q,
40
+ * by the core block layer. These do not care about locking, because the
38
return false;
41
+ * whole coroutine runs under the AioContext lock, and are declared in
39
}
42
+ * blockjob_int.h.
40
memset(q->queue, 0, bytes);
43
+ */
41
- r = qemu_vfio_dma_map(s->vfio, q->queue, bytes, false, &q->iova);
44
+
42
+ r = qemu_vfio_dma_map(s->vfio, q->queue, bytes, false, &q->iova, errp);
45
BlockJob *block_job_next(BlockJob *job)
43
if (r) {
44
- error_setg(errp, "Cannot map queue");
45
- return false;
46
+ error_prepend(errp, "Cannot map queue: ");
47
}
48
- return true;
49
+ return r == 0;
50
}
51
52
static void nvme_free_queue_pair(NVMeQueuePair *q)
53
@@ -XXX,XX +XXX,XX @@ static NVMeQueuePair *nvme_create_queue_pair(BDRVNVMeState *s,
54
qemu_co_queue_init(&q->free_req_queue);
55
q->completion_bh = aio_bh_new(aio_context, nvme_process_completion_bh, q);
56
r = qemu_vfio_dma_map(s->vfio, q->prp_list_pages, bytes,
57
- false, &prp_list_iova);
58
+ false, &prp_list_iova, errp);
59
if (r) {
60
- error_setg_errno(errp, -r, "Cannot map buffer for DMA");
61
+ error_prepend(errp, "Cannot map buffer for DMA: ");
62
goto fail;
63
}
64
q->free_req_head = -1;
65
@@ -XXX,XX +XXX,XX @@ static bool nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
66
error_setg(errp, "Cannot allocate buffer for identify response");
67
goto out;
68
}
69
- r = qemu_vfio_dma_map(s->vfio, id, id_size, true, &iova);
70
+ r = qemu_vfio_dma_map(s->vfio, id, id_size, true, &iova, errp);
71
if (r) {
72
- error_setg(errp, "Cannot map buffer for DMA");
73
+ error_prepend(errp, "Cannot map buffer for DMA: ");
74
goto out;
75
}
76
77
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int nvme_cmd_map_qiov(BlockDriverState *bs, NvmeCmd *cmd,
78
try_map:
79
r = qemu_vfio_dma_map(s->vfio,
80
qiov->iov[i].iov_base,
81
- len, true, &iova);
82
+ len, true, &iova, NULL);
83
if (r == -ENOSPC) {
84
/*
85
* In addition to the -ENOMEM error, the VFIO_IOMMU_MAP_DMA
86
@@ -XXX,XX +XXX,XX @@ static void nvme_aio_unplug(BlockDriverState *bs)
87
static void nvme_register_buf(BlockDriverState *bs, void *host, size_t size)
46
{
88
{
47
if (!job) {
89
int ret;
48
@@ -XXX,XX +XXX,XX @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
90
+ Error *local_err = NULL;
49
return 0;
91
BDRVNVMeState *s = bs->opaque;
50
}
92
51
93
- ret = qemu_vfio_dma_map(s->vfio, host, size, false, NULL);
52
-void *block_job_create(const char *job_id, const BlockJobDriver *driver,
94
+ ret = qemu_vfio_dma_map(s->vfio, host, size, false, NULL, &local_err);
53
- BlockDriverState *bs, uint64_t perm,
95
if (ret) {
54
- uint64_t shared_perm, int64_t speed, int flags,
96
/* FIXME: we may run out of IOVA addresses after repeated
55
- BlockCompletionFunc *cb, void *opaque, Error **errp)
97
* bdrv_register_buf/bdrv_unregister_buf, because nvme_vfio_dma_unmap
56
-{
98
* doesn't reclaim addresses for fixed mappings. */
57
- BlockBackend *blk;
99
- error_report("nvme_register_buf failed: %s", strerror(-ret));
58
- BlockJob *job;
100
+ error_reportf_err(local_err, "nvme_register_buf failed: ");
59
- int ret;
60
-
61
- if (bs->job) {
62
- error_setg(errp, QERR_DEVICE_IN_USE, bdrv_get_device_name(bs));
63
- return NULL;
64
- }
65
-
66
- if (job_id == NULL && !(flags & BLOCK_JOB_INTERNAL)) {
67
- job_id = bdrv_get_device_name(bs);
68
- if (!*job_id) {
69
- error_setg(errp, "An explicit job ID is required for this node");
70
- return NULL;
71
- }
72
- }
73
-
74
- if (job_id) {
75
- if (flags & BLOCK_JOB_INTERNAL) {
76
- error_setg(errp, "Cannot specify job ID for internal block job");
77
- return NULL;
78
- }
79
-
80
- if (!id_wellformed(job_id)) {
81
- error_setg(errp, "Invalid job ID '%s'", job_id);
82
- return NULL;
83
- }
84
-
85
- if (block_job_get(job_id)) {
86
- error_setg(errp, "Job ID '%s' already in use", job_id);
87
- return NULL;
88
- }
89
- }
90
-
91
- blk = blk_new(perm, shared_perm);
92
- ret = blk_insert_bs(blk, bs, errp);
93
- if (ret < 0) {
94
- blk_unref(blk);
95
- return NULL;
96
- }
97
-
98
- job = g_malloc0(driver->instance_size);
99
- job->driver = driver;
100
- job->id = g_strdup(job_id);
101
- job->blk = blk;
102
- job->cb = cb;
103
- job->opaque = opaque;
104
- job->busy = false;
105
- job->paused = true;
106
- job->pause_count = 1;
107
- job->refcnt = 1;
108
-
109
- error_setg(&job->blocker, "block device is in use by block job: %s",
110
- BlockJobType_lookup[driver->job_type]);
111
- block_job_add_bdrv(job, "main node", bs, 0, BLK_PERM_ALL, &error_abort);
112
- bs->job = job;
113
-
114
- blk_set_dev_ops(blk, &block_job_dev_ops, job);
115
- bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker);
116
-
117
- QLIST_INSERT_HEAD(&block_jobs, job, job_list);
118
-
119
- blk_add_aio_context_notifier(blk, block_job_attached_aio_context,
120
- block_job_detach_aio_context, job);
121
-
122
- /* Only set speed when necessary to avoid NotSupported error */
123
- if (speed != 0) {
124
- Error *local_err = NULL;
125
-
126
- block_job_set_speed(job, speed, &local_err);
127
- if (local_err) {
128
- block_job_unref(job);
129
- error_propagate(errp, local_err);
130
- return NULL;
131
- }
132
- }
133
- return job;
134
-}
135
-
136
bool block_job_is_internal(BlockJob *job)
137
{
138
return (job->id == NULL);
139
@@ -XXX,XX +XXX,XX @@ void block_job_start(BlockJob *job)
140
bdrv_coroutine_enter(blk_bs(job->blk), job->co);
141
}
142
143
-void block_job_early_fail(BlockJob *job)
144
-{
145
- block_job_unref(job);
146
-}
147
-
148
static void block_job_completed_single(BlockJob *job)
149
{
150
if (!job->ret) {
151
@@ -XXX,XX +XXX,XX @@ static void block_job_completed_txn_success(BlockJob *job)
152
}
101
}
153
}
102
}
154
103
155
-void block_job_completed(BlockJob *job, int ret)
104
diff --git a/util/vfio-helpers.c b/util/vfio-helpers.c
156
-{
105
index XXXXXXX..XXXXXXX 100644
157
- assert(blk_bs(job->blk)->job == job);
106
--- a/util/vfio-helpers.c
158
- assert(!job->completed);
107
+++ b/util/vfio-helpers.c
159
- job->completed = true;
108
@@ -XXX,XX +XXX,XX @@ static void qemu_vfio_ram_block_added(RAMBlockNotifier *n, void *host,
160
- job->ret = ret;
109
size_t size, size_t max_size)
161
- if (!job->txn) {
162
- block_job_completed_single(job);
163
- } else if (ret < 0 || block_job_is_cancelled(job)) {
164
- block_job_completed_txn_abort(job);
165
- } else {
166
- block_job_completed_txn_success(job);
167
- }
168
-}
169
-
170
void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
171
{
110
{
172
Error *local_err = NULL;
111
QEMUVFIOState *s = container_of(n, QEMUVFIOState, ram_notifier);
173
@@ -XXX,XX +XXX,XX @@ void block_job_user_pause(BlockJob *job)
112
+ Error *local_err = NULL;
174
block_job_pause(job);
113
int ret;
175
}
114
176
115
trace_qemu_vfio_ram_block_added(s, host, max_size);
177
-static bool block_job_should_pause(BlockJob *job)
116
- ret = qemu_vfio_dma_map(s, host, max_size, false, NULL);
178
-{
117
+ ret = qemu_vfio_dma_map(s, host, max_size, false, NULL, &local_err);
179
- return job->pause_count > 0;
118
if (ret) {
180
-}
119
- error_report("qemu_vfio_dma_map(%p, %zu) failed: %s", host, max_size,
181
-
120
- strerror(-ret));
182
bool block_job_user_paused(BlockJob *job)
121
+ error_reportf_err(local_err,
183
{
122
+ "qemu_vfio_dma_map(%p, %zu) failed: ",
184
return job->user_paused;
123
+ host, max_size);
185
}
186
187
-void coroutine_fn block_job_pause_point(BlockJob *job)
188
-{
189
- assert(job && block_job_started(job));
190
-
191
- if (!block_job_should_pause(job)) {
192
- return;
193
- }
194
- if (block_job_is_cancelled(job)) {
195
- return;
196
- }
197
-
198
- if (job->driver->pause) {
199
- job->driver->pause(job);
200
- }
201
-
202
- if (block_job_should_pause(job) && !block_job_is_cancelled(job)) {
203
- job->paused = true;
204
- job->busy = false;
205
- qemu_coroutine_yield(); /* wait for block_job_resume() */
206
- job->busy = true;
207
- job->paused = false;
208
- }
209
-
210
- if (job->driver->resume) {
211
- job->driver->resume(job);
212
- }
213
-}
214
-
215
void block_job_user_resume(BlockJob *job)
216
{
217
if (job && job->user_paused && job->pause_count > 0) {
218
@@ -XXX,XX +XXX,XX @@ void block_job_user_resume(BlockJob *job)
219
}
124
}
220
}
125
}
221
126
222
-void block_job_enter(BlockJob *job)
127
@@ -XXX,XX +XXX,XX @@ qemu_vfio_find_temp_iova(QEMUVFIOState *s, size_t size, uint64_t *iova)
223
-{
128
* mapping status within this area is not allowed).
224
- if (job->co && !job->busy) {
129
*/
225
- bdrv_coroutine_enter(blk_bs(job->blk), job->co);
130
int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,
226
- }
131
- bool temporary, uint64_t *iova)
227
-}
132
+ bool temporary, uint64_t *iova, Error **errp)
228
-
229
void block_job_cancel(BlockJob *job)
230
{
133
{
231
if (block_job_started(job)) {
134
int ret = 0;
232
@@ -XXX,XX +XXX,XX @@ void block_job_cancel(BlockJob *job)
135
int index;
233
}
234
}
235
236
-bool block_job_is_cancelled(BlockJob *job)
237
-{
238
- return job->cancelled;
239
-}
240
-
241
void block_job_iostatus_reset(BlockJob *job)
242
{
243
job->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
244
@@ -XXX,XX +XXX,XX @@ int block_job_complete_sync(BlockJob *job, Error **errp)
245
return block_job_finish_sync(job, &block_job_complete, errp);
246
}
247
248
-void block_job_sleep_ns(BlockJob *job, QEMUClockType type, int64_t ns)
249
-{
250
- assert(job->busy);
251
-
252
- /* Check cancellation *before* setting busy = false, too! */
253
- if (block_job_is_cancelled(job)) {
254
- return;
255
- }
256
-
257
- job->busy = false;
258
- if (!block_job_should_pause(job)) {
259
- co_aio_sleep_ns(blk_get_aio_context(job->blk), type, ns);
260
- }
261
- job->busy = true;
262
-
263
- block_job_pause_point(job);
264
-}
265
-
266
-void block_job_yield(BlockJob *job)
267
-{
268
- assert(job->busy);
269
-
270
- /* Check cancellation *before* setting busy = false, too! */
271
- if (block_job_is_cancelled(job)) {
272
- return;
273
- }
274
-
275
- job->busy = false;
276
- if (!block_job_should_pause(job)) {
277
- qemu_coroutine_yield();
278
- }
279
- job->busy = true;
280
-
281
- block_job_pause_point(job);
282
-}
283
-
284
BlockJobInfo *block_job_query(BlockJob *job, Error **errp)
285
{
286
BlockJobInfo *info;
287
@@ -XXX,XX +XXX,XX @@ static void block_job_event_completed(BlockJob *job, const char *msg)
288
&error_abort);
289
}
290
291
+/*
292
+ * API for block job drivers and the block layer. These functions are
293
+ * declared in blockjob_int.h.
294
+ */
295
+
296
+void *block_job_create(const char *job_id, const BlockJobDriver *driver,
297
+ BlockDriverState *bs, uint64_t perm,
298
+ uint64_t shared_perm, int64_t speed, int flags,
299
+ BlockCompletionFunc *cb, void *opaque, Error **errp)
300
+{
301
+ BlockBackend *blk;
302
+ BlockJob *job;
303
+ int ret;
304
+
305
+ if (bs->job) {
306
+ error_setg(errp, QERR_DEVICE_IN_USE, bdrv_get_device_name(bs));
307
+ return NULL;
308
+ }
309
+
310
+ if (job_id == NULL && !(flags & BLOCK_JOB_INTERNAL)) {
311
+ job_id = bdrv_get_device_name(bs);
312
+ if (!*job_id) {
313
+ error_setg(errp, "An explicit job ID is required for this node");
314
+ return NULL;
315
+ }
316
+ }
317
+
318
+ if (job_id) {
319
+ if (flags & BLOCK_JOB_INTERNAL) {
320
+ error_setg(errp, "Cannot specify job ID for internal block job");
321
+ return NULL;
322
+ }
323
+
324
+ if (!id_wellformed(job_id)) {
325
+ error_setg(errp, "Invalid job ID '%s'", job_id);
326
+ return NULL;
327
+ }
328
+
329
+ if (block_job_get(job_id)) {
330
+ error_setg(errp, "Job ID '%s' already in use", job_id);
331
+ return NULL;
332
+ }
333
+ }
334
+
335
+ blk = blk_new(perm, shared_perm);
336
+ ret = blk_insert_bs(blk, bs, errp);
337
+ if (ret < 0) {
338
+ blk_unref(blk);
339
+ return NULL;
340
+ }
341
+
342
+ job = g_malloc0(driver->instance_size);
343
+ job->driver = driver;
344
+ job->id = g_strdup(job_id);
345
+ job->blk = blk;
346
+ job->cb = cb;
347
+ job->opaque = opaque;
348
+ job->busy = false;
349
+ job->paused = true;
350
+ job->pause_count = 1;
351
+ job->refcnt = 1;
352
+
353
+ error_setg(&job->blocker, "block device is in use by block job: %s",
354
+ BlockJobType_lookup[driver->job_type]);
355
+ block_job_add_bdrv(job, "main node", bs, 0, BLK_PERM_ALL, &error_abort);
356
+ bs->job = job;
357
+
358
+ blk_set_dev_ops(blk, &block_job_dev_ops, job);
359
+ bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker);
360
+
361
+ QLIST_INSERT_HEAD(&block_jobs, job, job_list);
362
+
363
+ blk_add_aio_context_notifier(blk, block_job_attached_aio_context,
364
+ block_job_detach_aio_context, job);
365
+
366
+ /* Only set speed when necessary to avoid NotSupported error */
367
+ if (speed != 0) {
368
+ Error *local_err = NULL;
369
+
370
+ block_job_set_speed(job, speed, &local_err);
371
+ if (local_err) {
372
+ block_job_unref(job);
373
+ error_propagate(errp, local_err);
374
+ return NULL;
375
+ }
376
+ }
377
+ return job;
378
+}
379
+
380
void block_job_pause_all(void)
381
{
382
BlockJob *job = NULL;
383
@@ -XXX,XX +XXX,XX @@ void block_job_pause_all(void)
384
}
385
}
386
387
+void block_job_early_fail(BlockJob *job)
388
+{
389
+ block_job_unref(job);
390
+}
391
+
392
+void block_job_completed(BlockJob *job, int ret)
393
+{
394
+ assert(blk_bs(job->blk)->job == job);
395
+ assert(!job->completed);
396
+ job->completed = true;
397
+ job->ret = ret;
398
+ if (!job->txn) {
399
+ block_job_completed_single(job);
400
+ } else if (ret < 0 || block_job_is_cancelled(job)) {
401
+ block_job_completed_txn_abort(job);
402
+ } else {
403
+ block_job_completed_txn_success(job);
404
+ }
405
+}
406
+
407
+static bool block_job_should_pause(BlockJob *job)
408
+{
409
+ return job->pause_count > 0;
410
+}
411
+
412
+void coroutine_fn block_job_pause_point(BlockJob *job)
413
+{
414
+ assert(job && block_job_started(job));
415
+
416
+ if (!block_job_should_pause(job)) {
417
+ return;
418
+ }
419
+ if (block_job_is_cancelled(job)) {
420
+ return;
421
+ }
422
+
423
+ if (job->driver->pause) {
424
+ job->driver->pause(job);
425
+ }
426
+
427
+ if (block_job_should_pause(job) && !block_job_is_cancelled(job)) {
428
+ job->paused = true;
429
+ job->busy = false;
430
+ qemu_coroutine_yield(); /* wait for block_job_resume() */
431
+ job->busy = true;
432
+ job->paused = false;
433
+ }
434
+
435
+ if (job->driver->resume) {
436
+ job->driver->resume(job);
437
+ }
438
+}
439
+
440
void block_job_resume_all(void)
441
{
442
BlockJob *job = NULL;
443
@@ -XXX,XX +XXX,XX @@ void block_job_resume_all(void)
444
}
445
}
446
447
+void block_job_enter(BlockJob *job)
448
+{
449
+ if (job->co && !job->busy) {
450
+ bdrv_coroutine_enter(blk_bs(job->blk), job->co);
451
+ }
452
+}
453
+
454
+bool block_job_is_cancelled(BlockJob *job)
455
+{
456
+ return job->cancelled;
457
+}
458
+
459
+void block_job_sleep_ns(BlockJob *job, QEMUClockType type, int64_t ns)
460
+{
461
+ assert(job->busy);
462
+
463
+ /* Check cancellation *before* setting busy = false, too! */
464
+ if (block_job_is_cancelled(job)) {
465
+ return;
466
+ }
467
+
468
+ job->busy = false;
469
+ if (!block_job_should_pause(job)) {
470
+ co_aio_sleep_ns(blk_get_aio_context(job->blk), type, ns);
471
+ }
472
+ job->busy = true;
473
+
474
+ block_job_pause_point(job);
475
+}
476
+
477
+void block_job_yield(BlockJob *job)
478
+{
479
+ assert(job->busy);
480
+
481
+ /* Check cancellation *before* setting busy = false, too! */
482
+ if (block_job_is_cancelled(job)) {
483
+ return;
484
+ }
485
+
486
+ job->busy = false;
487
+ if (!block_job_should_pause(job)) {
488
+ qemu_coroutine_yield();
489
+ }
490
+ job->busy = true;
491
+
492
+ block_job_pause_point(job);
493
+}
494
+
495
void block_job_event_ready(BlockJob *job)
496
{
497
job->ready = true;
498
--
136
--
499
2.9.3
137
2.31.1
500
138
501
139
diff view generated by jsdifflib
1
From: Paolo Bonzini <pbonzini@redhat.com>
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
2
2
3
The new functions helps respecting the invariant that the coroutine
3
Extract qemu_vfio_water_mark_reached() for readability,
4
is entered with false user_resume, zero pause count and no error
4
and have it provide an error hint it its Error* handle.
5
recorded in the iostatus.
6
5
7
Resetting the iostatus is now common to all of block_job_cancel_async,
6
Suggested-by: Klaus Jensen <k.jensen@samsung.com>
8
block_job_user_resume and block_job_iostatus_reset, albeit with slight
7
Reviewed-by: Klaus Jensen <k.jensen@samsung.com>
9
differences:
8
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Message-id: 20210902070025.197072-8-philmd@redhat.com
10
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
11
---
12
util/vfio-helpers.c | 17 ++++++++++++++++-
13
1 file changed, 16 insertions(+), 1 deletion(-)
10
14
11
- block_job_cancel_async resets the iostatus, and resumes the job if
15
diff --git a/util/vfio-helpers.c b/util/vfio-helpers.c
12
there was an error, but the coroutine is not restarted immediately.
13
For example the caller may continue with a call to block_job_finish_sync.
14
15
- block_job_user_resume resets the iostatus. It wants to resume the job
16
unconditionally, even if there was no error.
17
18
- block_job_iostatus_reset doesn't resume the job at all. Maybe that's
19
a bug but it should be fixed separately.
20
21
block_job_iostatus_reset does the least common denominator, so add some
22
checking but otherwise leave it as the entry point for resetting the
23
iostatus.
24
25
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
26
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
27
Message-id: 20170508141310.8674-8-pbonzini@redhat.com
28
Signed-off-by: Jeff Cody <jcody@redhat.com>
29
---
30
blockjob.c | 24 ++++++++++++++++++++----
31
1 file changed, 20 insertions(+), 4 deletions(-)
32
33
diff --git a/blockjob.c b/blockjob.c
34
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
35
--- a/blockjob.c
17
--- a/util/vfio-helpers.c
36
+++ b/blockjob.c
18
+++ b/util/vfio-helpers.c
37
@@ -XXX,XX +XXX,XX @@ static void block_job_completed_single(BlockJob *job)
19
@@ -XXX,XX +XXX,XX @@ qemu_vfio_find_temp_iova(QEMUVFIOState *s, size_t size, uint64_t *iova)
38
block_job_unref(job);
20
return -ENOMEM;
39
}
21
}
40
22
41
+static void block_job_cancel_async(BlockJob *job)
23
+/**
24
+ * qemu_vfio_water_mark_reached:
25
+ *
26
+ * Returns %true if high watermark has been reached, %false otherwise.
27
+ */
28
+static bool qemu_vfio_water_mark_reached(QEMUVFIOState *s, size_t size,
29
+ Error **errp)
42
+{
30
+{
43
+ if (job->iostatus != BLOCK_DEVICE_IO_STATUS_OK) {
31
+ if (s->high_water_mark - s->low_water_mark + 1 < size) {
44
+ block_job_iostatus_reset(job);
32
+ error_setg(errp, "iova exhausted (water mark reached)");
33
+ return true;
45
+ }
34
+ }
46
+ if (job->user_paused) {
35
+ return false;
47
+ /* Do not call block_job_enter here, the caller will handle it. */
48
+ job->user_paused = false;
49
+ job->pause_count--;
50
+ }
51
+ job->cancelled = true;
52
+}
36
+}
53
+
37
+
54
static void block_job_completed_txn_abort(BlockJob *job)
38
/* Map [host, host + size) area into a contiguous IOVA address space, and store
55
{
39
* the result in @iova if not NULL. The caller need to make sure the area is
56
AioContext *ctx;
40
* aligned to page size, and mustn't overlap with existing mapping areas (split
57
@@ -XXX,XX +XXX,XX @@ static void block_job_completed_txn_abort(BlockJob *job)
41
@@ -XXX,XX +XXX,XX @@ int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,
58
* them; this job, however, may or may not be cancelled, depending
42
if (mapping) {
59
* on the caller, so leave it. */
43
iova0 = mapping->iova + ((uint8_t *)host - (uint8_t *)mapping->host);
60
if (other_job != job) {
44
} else {
61
- other_job->cancelled = true;
45
- if (s->high_water_mark - s->low_water_mark + 1 < size) {
62
+ block_job_cancel_async(other_job);
46
+ if (qemu_vfio_water_mark_reached(s, size, errp)) {
63
}
47
ret = -ENOMEM;
64
continue;
48
goto out;
65
}
49
}
66
@@ -XXX,XX +XXX,XX @@ bool block_job_user_paused(BlockJob *job)
67
void block_job_user_resume(BlockJob *job)
68
{
69
if (job && job->user_paused && job->pause_count > 0) {
70
- job->user_paused = false;
71
block_job_iostatus_reset(job);
72
+ job->user_paused = false;
73
block_job_resume(job);
74
}
75
}
76
@@ -XXX,XX +XXX,XX @@ void block_job_user_resume(BlockJob *job)
77
void block_job_cancel(BlockJob *job)
78
{
79
if (block_job_started(job)) {
80
- job->cancelled = true;
81
- block_job_iostatus_reset(job);
82
+ block_job_cancel_async(job);
83
block_job_enter(job);
84
} else {
85
block_job_completed(job, -ECANCELED);
86
@@ -XXX,XX +XXX,XX @@ void block_job_yield(BlockJob *job)
87
88
void block_job_iostatus_reset(BlockJob *job)
89
{
90
+ if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
91
+ return;
92
+ }
93
+ assert(job->user_paused && job->pause_count > 0);
94
job->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
95
}
96
97
--
50
--
98
2.9.3
51
2.31.1
99
52
100
53
diff view generated by jsdifflib
1
From: Paolo Bonzini <pbonzini@redhat.com>
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
2
2
3
Remove use of block_job_pause/resume from outside blockjob.c, thus
3
Both qemu_vfio_find_fixed_iova() and qemu_vfio_find_temp_iova()
4
making them static. The new functions are used by the block layer,
4
return an errno which is unused (or overwritten). Have them propagate
5
so place them in blockjob_int.h.
5
eventual errors to callers, returning a boolean (which is what the
6
Error API recommends, see commit e3fe3988d78 "error: Document Error
7
API usage rules" for rationale).
6
8
7
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
9
Suggested-by: Klaus Jensen <k.jensen@samsung.com>
8
Reviewed-by: John Snow <jsnow@redhat.com>
10
Reviewed-by: Klaus Jensen <k.jensen@samsung.com>
9
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
11
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
10
Reviewed-by: Jeff Cody <jcody@redhat.com>
12
Message-id: 20210902070025.197072-9-philmd@redhat.com
11
Message-id: 20170508141310.8674-5-pbonzini@redhat.com
13
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
Signed-off-by: Jeff Cody <jcody@redhat.com>
13
---
14
---
14
block/io.c | 19 ++------
15
util/vfio-helpers.c | 24 ++++++++++++++----------
15
blockjob.c | 114 ++++++++++++++++++++++++++-----------------
16
1 file changed, 14 insertions(+), 10 deletions(-)
16
include/block/blockjob.h | 16 ------
17
include/block/blockjob_int.h | 14 ++++++
18
4 files changed, 86 insertions(+), 77 deletions(-)
19
17
20
diff --git a/block/io.c b/block/io.c
18
diff --git a/util/vfio-helpers.c b/util/vfio-helpers.c
21
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
22
--- a/block/io.c
20
--- a/util/vfio-helpers.c
23
+++ b/block/io.c
21
+++ b/util/vfio-helpers.c
24
@@ -XXX,XX +XXX,XX @@
22
@@ -XXX,XX +XXX,XX @@ static bool qemu_vfio_verify_mappings(QEMUVFIOState *s)
25
#include "trace.h"
23
return true;
26
#include "sysemu/block-backend.h"
24
}
27
#include "block/blockjob.h"
25
28
+#include "block/blockjob_int.h"
26
-static int
29
#include "block/block_int.h"
27
-qemu_vfio_find_fixed_iova(QEMUVFIOState *s, size_t size, uint64_t *iova)
30
#include "qemu/cutils.h"
28
+static bool qemu_vfio_find_fixed_iova(QEMUVFIOState *s, size_t size,
31
#include "qapi/error.h"
29
+ uint64_t *iova, Error **errp)
32
@@ -XXX,XX +XXX,XX @@ void bdrv_drain_all_begin(void)
33
bool waited = true;
34
BlockDriverState *bs;
35
BdrvNextIterator it;
36
- BlockJob *job = NULL;
37
GSList *aio_ctxs = NULL, *ctx;
38
39
- while ((job = block_job_next(job))) {
40
- AioContext *aio_context = blk_get_aio_context(job->blk);
41
-
42
- aio_context_acquire(aio_context);
43
- block_job_pause(job);
44
- aio_context_release(aio_context);
45
- }
46
+ block_job_pause_all();
47
48
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
49
AioContext *aio_context = bdrv_get_aio_context(bs);
50
@@ -XXX,XX +XXX,XX @@ void bdrv_drain_all_end(void)
51
{
30
{
52
BlockDriverState *bs;
31
int i;
53
BdrvNextIterator it;
32
54
- BlockJob *job = NULL;
33
@@ -XXX,XX +XXX,XX @@ qemu_vfio_find_fixed_iova(QEMUVFIOState *s, size_t size, uint64_t *iova)
55
34
s->usable_iova_ranges[i].end - s->low_water_mark + 1 == 0) {
56
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
35
*iova = s->low_water_mark;
57
AioContext *aio_context = bdrv_get_aio_context(bs);
36
s->low_water_mark += size;
58
@@ -XXX,XX +XXX,XX @@ void bdrv_drain_all_end(void)
37
- return 0;
59
aio_context_release(aio_context);
38
+ return true;
39
}
60
}
40
}
61
41
- return -ENOMEM;
62
- while ((job = block_job_next(job))) {
42
+ error_setg(errp, "fixed iova range not found");
63
- AioContext *aio_context = blk_get_aio_context(job->blk);
43
+
64
-
44
+ return false;
65
- aio_context_acquire(aio_context);
66
- block_job_resume(job);
67
- aio_context_release(aio_context);
68
- }
69
+ block_job_resume_all();
70
}
45
}
71
46
72
void bdrv_drain_all(void)
47
-static int
73
diff --git a/blockjob.c b/blockjob.c
48
-qemu_vfio_find_temp_iova(QEMUVFIOState *s, size_t size, uint64_t *iova)
74
index XXXXXXX..XXXXXXX 100644
49
+static bool qemu_vfio_find_temp_iova(QEMUVFIOState *s, size_t size,
75
--- a/blockjob.c
50
+ uint64_t *iova, Error **errp)
76
+++ b/blockjob.c
77
@@ -XXX,XX +XXX,XX @@ struct BlockJobTxn {
78
79
static QLIST_HEAD(, BlockJob) block_jobs = QLIST_HEAD_INITIALIZER(block_jobs);
80
81
-static char *child_job_get_parent_desc(BdrvChild *c)
82
-{
83
- BlockJob *job = c->opaque;
84
- return g_strdup_printf("%s job '%s'",
85
- BlockJobType_lookup[job->driver->job_type],
86
- job->id);
87
-}
88
-
89
-static const BdrvChildRole child_job = {
90
- .get_parent_desc = child_job_get_parent_desc,
91
- .stay_at_node = true,
92
-};
93
-
94
-static void block_job_drained_begin(void *opaque)
95
-{
96
- BlockJob *job = opaque;
97
- block_job_pause(job);
98
-}
99
-
100
-static void block_job_drained_end(void *opaque)
101
-{
102
- BlockJob *job = opaque;
103
- block_job_resume(job);
104
-}
105
-
106
-static const BlockDevOps block_job_dev_ops = {
107
- .drained_begin = block_job_drained_begin,
108
- .drained_end = block_job_drained_end,
109
-};
110
-
111
BlockJob *block_job_next(BlockJob *job)
112
{
51
{
113
if (!job) {
52
int i;
114
@@ -XXX,XX +XXX,XX @@ BlockJob *block_job_get(const char *id)
53
115
return NULL;
54
@@ -XXX,XX +XXX,XX @@ qemu_vfio_find_temp_iova(QEMUVFIOState *s, size_t size, uint64_t *iova)
55
s->high_water_mark - s->usable_iova_ranges[i].start + 1 == 0) {
56
*iova = s->high_water_mark - size;
57
s->high_water_mark = *iova;
58
- return 0;
59
+ return true;
60
}
61
}
62
- return -ENOMEM;
63
+ error_setg(errp, "temporary iova range not found");
64
+
65
+ return false;
116
}
66
}
117
67
118
+static void block_job_pause(BlockJob *job)
119
+{
120
+ job->pause_count++;
121
+}
122
+
123
+static void block_job_resume(BlockJob *job)
124
+{
125
+ assert(job->pause_count > 0);
126
+ job->pause_count--;
127
+ if (job->pause_count) {
128
+ return;
129
+ }
130
+ block_job_enter(job);
131
+}
132
+
133
static void block_job_ref(BlockJob *job)
134
{
135
++job->refcnt;
136
@@ -XXX,XX +XXX,XX @@ static void block_job_detach_aio_context(void *opaque)
137
block_job_unref(job);
138
}
139
140
+static char *child_job_get_parent_desc(BdrvChild *c)
141
+{
142
+ BlockJob *job = c->opaque;
143
+ return g_strdup_printf("%s job '%s'",
144
+ BlockJobType_lookup[job->driver->job_type],
145
+ job->id);
146
+}
147
+
148
+static const BdrvChildRole child_job = {
149
+ .get_parent_desc = child_job_get_parent_desc,
150
+ .stay_at_node = true,
151
+};
152
+
153
+static void block_job_drained_begin(void *opaque)
154
+{
155
+ BlockJob *job = opaque;
156
+ block_job_pause(job);
157
+}
158
+
159
+static void block_job_drained_end(void *opaque)
160
+{
161
+ BlockJob *job = opaque;
162
+ block_job_resume(job);
163
+}
164
+
165
+static const BlockDevOps block_job_dev_ops = {
166
+ .drained_begin = block_job_drained_begin,
167
+ .drained_end = block_job_drained_end,
168
+};
169
+
170
void block_job_remove_all_bdrv(BlockJob *job)
171
{
172
GSList *l;
173
@@ -XXX,XX +XXX,XX @@ void block_job_complete(BlockJob *job, Error **errp)
174
job->driver->complete(job, errp);
175
}
176
177
-void block_job_pause(BlockJob *job)
178
-{
179
- job->pause_count++;
180
-}
181
-
182
void block_job_user_pause(BlockJob *job)
183
{
184
job->user_paused = true;
185
@@ -XXX,XX +XXX,XX @@ void coroutine_fn block_job_pause_point(BlockJob *job)
186
}
187
}
188
189
-void block_job_resume(BlockJob *job)
190
-{
191
- assert(job->pause_count > 0);
192
- job->pause_count--;
193
- if (job->pause_count) {
194
- return;
195
- }
196
- block_job_enter(job);
197
-}
198
-
199
void block_job_user_resume(BlockJob *job)
200
{
201
if (job && job->user_paused && job->pause_count > 0) {
202
@@ -XXX,XX +XXX,XX @@ static void block_job_event_completed(BlockJob *job, const char *msg)
203
&error_abort);
204
}
205
206
+void block_job_pause_all(void)
207
+{
208
+ BlockJob *job = NULL;
209
+ while ((job = block_job_next(job))) {
210
+ AioContext *aio_context = blk_get_aio_context(job->blk);
211
+
212
+ aio_context_acquire(aio_context);
213
+ block_job_pause(job);
214
+ aio_context_release(aio_context);
215
+ }
216
+}
217
+
218
+void block_job_resume_all(void)
219
+{
220
+ BlockJob *job = NULL;
221
+ while ((job = block_job_next(job))) {
222
+ AioContext *aio_context = blk_get_aio_context(job->blk);
223
+
224
+ aio_context_acquire(aio_context);
225
+ block_job_resume(job);
226
+ aio_context_release(aio_context);
227
+ }
228
+}
229
+
230
void block_job_event_ready(BlockJob *job)
231
{
232
job->ready = true;
233
diff --git a/include/block/blockjob.h b/include/block/blockjob.h
234
index XXXXXXX..XXXXXXX 100644
235
--- a/include/block/blockjob.h
236
+++ b/include/block/blockjob.h
237
@@ -XXX,XX +XXX,XX @@ void block_job_complete(BlockJob *job, Error **errp);
238
BlockJobInfo *block_job_query(BlockJob *job, Error **errp);
239
240
/**
68
/**
241
- * block_job_pause:
69
@@ -XXX,XX +XXX,XX @@ int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,
242
- * @job: The job to be paused.
70
goto out;
243
- *
71
}
244
- * Asynchronously pause the specified job.
72
if (!temporary) {
245
- */
73
- if (qemu_vfio_find_fixed_iova(s, size, &iova0)) {
246
-void block_job_pause(BlockJob *job);
74
+ if (!qemu_vfio_find_fixed_iova(s, size, &iova0, errp)) {
247
-
75
ret = -ENOMEM;
248
-/**
76
goto out;
249
* block_job_user_pause:
77
}
250
* @job: The job to be paused.
78
@@ -XXX,XX +XXX,XX @@ int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,
251
*
79
}
252
@@ -XXX,XX +XXX,XX @@ void block_job_user_pause(BlockJob *job);
80
qemu_vfio_dump_mappings(s);
253
bool block_job_user_paused(BlockJob *job);
81
} else {
254
82
- if (qemu_vfio_find_temp_iova(s, size, &iova0)) {
255
/**
83
+ if (!qemu_vfio_find_temp_iova(s, size, &iova0, errp)) {
256
- * block_job_resume:
84
ret = -ENOMEM;
257
- * @job: The job to be resumed.
85
goto out;
258
- *
86
}
259
- * Resume the specified job. Must be paired with a preceding block_job_pause.
260
- */
261
-void block_job_resume(BlockJob *job);
262
-
263
-/**
264
* block_job_user_resume:
265
* @job: The job to be resumed.
266
*
267
diff --git a/include/block/blockjob_int.h b/include/block/blockjob_int.h
268
index XXXXXXX..XXXXXXX 100644
269
--- a/include/block/blockjob_int.h
270
+++ b/include/block/blockjob_int.h
271
@@ -XXX,XX +XXX,XX @@ void block_job_sleep_ns(BlockJob *job, QEMUClockType type, int64_t ns);
272
void block_job_yield(BlockJob *job);
273
274
/**
275
+ * block_job_pause_all:
276
+ *
277
+ * Asynchronously pause all jobs.
278
+ */
279
+void block_job_pause_all(void);
280
+
281
+/**
282
+ * block_job_resume_all:
283
+ *
284
+ * Resume all block jobs. Must be paired with a preceding block_job_pause_all.
285
+ */
286
+void block_job_resume_all(void);
287
+
288
+/**
289
* block_job_early_fail:
290
* @bs: The block device.
291
*
292
--
87
--
293
2.9.3
88
2.31.1
294
89
295
90
diff view generated by jsdifflib
1
From: Paolo Bonzini <pbonzini@redhat.com>
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
2
2
3
Unlike test-blockjob-txn, QMP releases the reference to the transaction
3
To simplify qemu_vfio_dma_map():
4
before the jobs finish. Thus, qemu-iotest 124 showed a failure while
4
- reduce 'ret' (returned value) scope by returning errno directly,
5
working on the next patch that the unit tests did not have. Make
5
- remove the goto 'out' label.
6
the test a little nastier.
7
6
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
7
Reviewed-by: Klaus Jensen <k.jensen@samsung.com>
9
Reviewed-by: John Snow <jsnow@redhat.com>
8
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
10
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
9
Message-id: 20210902070025.197072-10-philmd@redhat.com
11
Message-id: 20170508141310.8674-10-pbonzini@redhat.com
10
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
Signed-off-by: Jeff Cody <jcody@redhat.com>
13
---
11
---
14
tests/test-blockjob-txn.c | 7 +++++--
12
util/vfio-helpers.c | 23 ++++++++++-------------
15
1 file changed, 5 insertions(+), 2 deletions(-)
13
1 file changed, 10 insertions(+), 13 deletions(-)
16
14
17
diff --git a/tests/test-blockjob-txn.c b/tests/test-blockjob-txn.c
15
diff --git a/util/vfio-helpers.c b/util/vfio-helpers.c
18
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
19
--- a/tests/test-blockjob-txn.c
17
--- a/util/vfio-helpers.c
20
+++ b/tests/test-blockjob-txn.c
18
+++ b/util/vfio-helpers.c
21
@@ -XXX,XX +XXX,XX @@ static void test_pair_jobs(int expected1, int expected2)
19
@@ -XXX,XX +XXX,XX @@ static bool qemu_vfio_water_mark_reached(QEMUVFIOState *s, size_t size,
22
block_job_start(job1);
20
int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,
23
block_job_start(job2);
21
bool temporary, uint64_t *iova, Error **errp)
24
22
{
25
+ /* Release our reference now to trigger as many nice
23
- int ret = 0;
26
+ * use-after-free bugs as possible.
24
int index;
27
+ */
25
IOVAMapping *mapping;
28
+ block_job_txn_unref(txn);
26
uint64_t iova0;
27
@@ -XXX,XX +XXX,XX @@ int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,
28
if (mapping) {
29
iova0 = mapping->iova + ((uint8_t *)host - (uint8_t *)mapping->host);
30
} else {
31
+ int ret;
29
+
32
+
30
if (expected1 == -ECANCELED) {
33
if (qemu_vfio_water_mark_reached(s, size, errp)) {
31
block_job_cancel(job1);
34
- ret = -ENOMEM;
35
- goto out;
36
+ return -ENOMEM;
37
}
38
if (!temporary) {
39
if (!qemu_vfio_find_fixed_iova(s, size, &iova0, errp)) {
40
- ret = -ENOMEM;
41
- goto out;
42
+ return -ENOMEM;
43
}
44
45
mapping = qemu_vfio_add_mapping(s, host, size, index + 1, iova0);
46
assert(qemu_vfio_verify_mappings(s));
47
ret = qemu_vfio_do_mapping(s, host, size, iova0);
48
- if (ret) {
49
+ if (ret < 0) {
50
qemu_vfio_undo_mapping(s, mapping, NULL);
51
- goto out;
52
+ return ret;
53
}
54
qemu_vfio_dump_mappings(s);
55
} else {
56
if (!qemu_vfio_find_temp_iova(s, size, &iova0, errp)) {
57
- ret = -ENOMEM;
58
- goto out;
59
+ return -ENOMEM;
60
}
61
ret = qemu_vfio_do_mapping(s, host, size, iova0);
62
- if (ret) {
63
- goto out;
64
+ if (ret < 0) {
65
+ return ret;
66
}
67
}
32
}
68
}
33
@@ -XXX,XX +XXX,XX @@ static void test_pair_jobs(int expected1, int expected2)
69
@@ -XXX,XX +XXX,XX @@ int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,
34
70
if (iova) {
35
g_assert_cmpint(result1, ==, expected1);
71
*iova = iova0;
36
g_assert_cmpint(result2, ==, expected2);
72
}
37
-
73
-out:
38
- block_job_txn_unref(txn);
74
- return ret;
75
+ return 0;
39
}
76
}
40
77
41
static void test_pair_jobs_success(void)
78
/* Reset the high watermark and free all "temporary" mappings. */
42
--
79
--
43
2.9.3
80
2.31.1
44
81
45
82
diff view generated by jsdifflib
1
From: Paolo Bonzini <pbonzini@redhat.com>
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
2
2
3
Outside blockjob.c, the block_job_iostatus_reset function is used once
3
Pass qemu_vfio_do_mapping() an Error* argument so it can propagate
4
in the monitor and once in BlockBackend. When we introduce the block
4
any error to callers. Replace error_report() which only report
5
job mutex, block_job_iostatus_reset's client is going to be the block
5
to the monitor by the more generic error_setg_errno().
6
layer (for which blockjob.c will take the block job mutex) rather than
7
the monitor (which will take the block job mutex by itself).
8
6
9
The monitor's call to block_job_iostatus_reset from the monitor comes
7
Reviewed-by: Fam Zheng <fam@euphon.net>
10
just before the sole call to block_job_user_resume, so reset the
8
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
11
iostatus directly from block_job_iostatus_reset. This will avoid
9
Reviewed-by: Klaus Jensen <k.jensen@samsung.com>
12
the need to introduce separate block_job_iostatus_reset and
10
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
13
block_job_iostatus_reset_locked APIs.
11
Message-id: 20210902070025.197072-11-philmd@redhat.com
12
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
13
---
14
util/vfio-helpers.c | 8 ++++----
15
1 file changed, 4 insertions(+), 4 deletions(-)
14
16
15
After making this change, move the function together with the others
17
diff --git a/util/vfio-helpers.c b/util/vfio-helpers.c
16
that were moved in the previous patch.
17
18
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
19
Reviewed-by: John Snow <jsnow@redhat.com>
20
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
21
Reviewed-by: Jeff Cody <jcody@redhat.com>
22
Message-id: 20170508141310.8674-7-pbonzini@redhat.com
23
Signed-off-by: Jeff Cody <jcody@redhat.com>
24
---
25
blockdev.c | 1 -
26
blockjob.c | 11 ++++++-----
27
2 files changed, 6 insertions(+), 6 deletions(-)
28
29
diff --git a/blockdev.c b/blockdev.c
30
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
31
--- a/blockdev.c
19
--- a/util/vfio-helpers.c
32
+++ b/blockdev.c
20
+++ b/util/vfio-helpers.c
33
@@ -XXX,XX +XXX,XX @@ void qmp_block_job_resume(const char *device, Error **errp)
21
@@ -XXX,XX +XXX,XX @@ static IOVAMapping *qemu_vfio_add_mapping(QEMUVFIOState *s,
22
23
/* Do the DMA mapping with VFIO. */
24
static int qemu_vfio_do_mapping(QEMUVFIOState *s, void *host, size_t size,
25
- uint64_t iova)
26
+ uint64_t iova, Error **errp)
27
{
28
struct vfio_iommu_type1_dma_map dma_map = {
29
.argsz = sizeof(dma_map),
30
@@ -XXX,XX +XXX,XX @@ static int qemu_vfio_do_mapping(QEMUVFIOState *s, void *host, size_t size,
31
trace_qemu_vfio_do_mapping(s, host, iova, size);
32
33
if (ioctl(s->container, VFIO_IOMMU_MAP_DMA, &dma_map)) {
34
- error_report("VFIO_MAP_DMA failed: %s", strerror(errno));
35
+ error_setg_errno(errp, errno, "VFIO_MAP_DMA failed");
36
return -errno;
34
}
37
}
35
38
return 0;
36
trace_qmp_block_job_resume(job);
39
@@ -XXX,XX +XXX,XX @@ int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,
37
- block_job_iostatus_reset(job);
40
38
block_job_user_resume(job);
41
mapping = qemu_vfio_add_mapping(s, host, size, index + 1, iova0);
39
aio_context_release(aio_context);
42
assert(qemu_vfio_verify_mappings(s));
40
}
43
- ret = qemu_vfio_do_mapping(s, host, size, iova0);
41
diff --git a/blockjob.c b/blockjob.c
44
+ ret = qemu_vfio_do_mapping(s, host, size, iova0, errp);
42
index XXXXXXX..XXXXXXX 100644
45
if (ret < 0) {
43
--- a/blockjob.c
46
qemu_vfio_undo_mapping(s, mapping, NULL);
44
+++ b/blockjob.c
47
return ret;
45
@@ -XXX,XX +XXX,XX @@ void block_job_user_resume(BlockJob *job)
48
@@ -XXX,XX +XXX,XX @@ int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,
46
{
49
if (!qemu_vfio_find_temp_iova(s, size, &iova0, errp)) {
47
if (job && job->user_paused && job->pause_count > 0) {
50
return -ENOMEM;
48
job->user_paused = false;
51
}
49
+ block_job_iostatus_reset(job);
52
- ret = qemu_vfio_do_mapping(s, host, size, iova0);
50
block_job_resume(job);
53
+ ret = qemu_vfio_do_mapping(s, host, size, iova0, errp);
51
}
54
if (ret < 0) {
52
}
55
return ret;
53
@@ -XXX,XX +XXX,XX @@ void block_job_cancel(BlockJob *job)
56
}
54
}
55
}
56
57
-void block_job_iostatus_reset(BlockJob *job)
58
-{
59
- job->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
60
-}
61
-
62
static int block_job_finish_sync(BlockJob *job,
63
void (*finish)(BlockJob *, Error **errp),
64
Error **errp)
65
@@ -XXX,XX +XXX,XX @@ void block_job_yield(BlockJob *job)
66
block_job_pause_point(job);
67
}
68
69
+void block_job_iostatus_reset(BlockJob *job)
70
+{
71
+ job->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
72
+}
73
+
74
void block_job_event_ready(BlockJob *job)
75
{
76
job->ready = true;
77
--
57
--
78
2.9.3
58
2.31.1
79
59
80
60
diff view generated by jsdifflib
1
From: Paolo Bonzini <pbonzini@redhat.com>
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
2
2
3
Outside blockjob.c, block_job_unref is only used when a block job fails
3
We expect the first qemu_vfio_dma_map() to fail (indicating
4
to start, and block_job_ref is not used at all. The reference counting
4
DMA mappings exhaustion, see commit 15a730e7a3a). Do not
5
thus is pretty well hidden. Introduce a separate function to be used
5
report the first failure as error, since we are going to
6
by block jobs; because block_job_ref and block_job_unref now become
6
flush the mappings and retry.
7
static, move them earlier in blockjob.c.
8
7
9
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
8
This removes spurious error message displayed on the monitor:
10
Reviewed-by: John Snow <jsnow@redhat.com>
9
11
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
10
(qemu) c
12
Reviewed-by: Jeff Cody <jcody@redhat.com>
11
(qemu) qemu-kvm: VFIO_MAP_DMA failed: No space left on device
13
Message-id: 20170508141310.8674-4-pbonzini@redhat.com
12
(qemu) info status
14
Signed-off-by: Jeff Cody <jcody@redhat.com>
13
VM status: running
14
15
Reported-by: Tingting Mao <timao@redhat.com>
16
Reviewed-by: Klaus Jensen <k.jensen@samsung.com>
17
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
18
Message-id: 20210902070025.197072-12-philmd@redhat.com
19
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
15
---
20
---
16
block/backup.c | 2 +-
21
block/nvme.c | 8 +++++++-
17
block/commit.c | 2 +-
22
1 file changed, 7 insertions(+), 1 deletion(-)
18
block/mirror.c | 2 +-
19
blockjob.c | 47 ++++++++++++++++++++++++++------------------
20
include/block/blockjob_int.h | 15 +++-----------
21
tests/test-blockjob.c | 10 +++++-----
22
6 files changed, 39 insertions(+), 39 deletions(-)
23
23
24
diff --git a/block/backup.c b/block/backup.c
24
diff --git a/block/nvme.c b/block/nvme.c
25
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
26
--- a/block/backup.c
26
--- a/block/nvme.c
27
+++ b/block/backup.c
27
+++ b/block/nvme.c
28
@@ -XXX,XX +XXX,XX @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
28
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int nvme_cmd_map_qiov(BlockDriverState *bs, NvmeCmd *cmd,
29
}
29
uint64_t *pagelist = req->prp_list_page;
30
if (job) {
30
int i, j, r;
31
backup_clean(&job->common);
31
int entries = 0;
32
- block_job_unref(&job->common);
32
+ Error *local_err = NULL, **errp = NULL;
33
+ block_job_early_fail(&job->common);
33
34
}
34
assert(qiov->size);
35
35
assert(QEMU_IS_ALIGNED(qiov->size, s->page_size));
36
return NULL;
36
@@ -XXX,XX +XXX,XX @@ static coroutine_fn int nvme_cmd_map_qiov(BlockDriverState *bs, NvmeCmd *cmd,
37
diff --git a/block/commit.c b/block/commit.c
37
try_map:
38
index XXXXXXX..XXXXXXX 100644
38
r = qemu_vfio_dma_map(s->vfio,
39
--- a/block/commit.c
39
qiov->iov[i].iov_base,
40
+++ b/block/commit.c
40
- len, true, &iova, NULL);
41
+ len, true, &iova, errp);
42
if (r == -ENOSPC) {
43
/*
44
* In addition to the -ENOMEM error, the VFIO_IOMMU_MAP_DMA
45
@@ -XXX,XX +XXX,XX @@ try_map:
46
goto fail;
47
}
48
}
49
+ errp = &local_err;
50
+
51
goto try_map;
52
}
53
if (r) {
41
@@ -XXX,XX +XXX,XX @@ fail:
54
@@ -XXX,XX +XXX,XX @@ fail:
42
if (commit_top_bs) {
55
* because they are already mapped before calling this function; for
43
bdrv_set_backing_hd(overlay_bs, top, &error_abort);
56
* temporary mappings, a later nvme_cmd_(un)map_qiov will reclaim by
44
}
57
* calling qemu_vfio_dma_reset_temporary when necessary. */
45
- block_job_unref(&s->common);
58
+ if (local_err) {
46
+ block_job_early_fail(&s->common);
59
+ error_reportf_err(local_err, "Cannot map buffer for DMA: ");
60
+ }
61
return r;
47
}
62
}
48
63
49
50
diff --git a/block/mirror.c b/block/mirror.c
51
index XXXXXXX..XXXXXXX 100644
52
--- a/block/mirror.c
53
+++ b/block/mirror.c
54
@@ -XXX,XX +XXX,XX @@ fail:
55
56
g_free(s->replaces);
57
blk_unref(s->target);
58
- block_job_unref(&s->common);
59
+ block_job_early_fail(&s->common);
60
}
61
62
bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
63
diff --git a/blockjob.c b/blockjob.c
64
index XXXXXXX..XXXXXXX 100644
65
--- a/blockjob.c
66
+++ b/blockjob.c
67
@@ -XXX,XX +XXX,XX @@ BlockJob *block_job_get(const char *id)
68
return NULL;
69
}
70
71
+static void block_job_ref(BlockJob *job)
72
+{
73
+ ++job->refcnt;
74
+}
75
+
76
+static void block_job_attached_aio_context(AioContext *new_context,
77
+ void *opaque);
78
+static void block_job_detach_aio_context(void *opaque);
79
+
80
+static void block_job_unref(BlockJob *job)
81
+{
82
+ if (--job->refcnt == 0) {
83
+ BlockDriverState *bs = blk_bs(job->blk);
84
+ bs->job = NULL;
85
+ block_job_remove_all_bdrv(job);
86
+ blk_remove_aio_context_notifier(job->blk,
87
+ block_job_attached_aio_context,
88
+ block_job_detach_aio_context, job);
89
+ blk_unref(job->blk);
90
+ error_free(job->blocker);
91
+ g_free(job->id);
92
+ QLIST_REMOVE(job, job_list);
93
+ g_free(job);
94
+ }
95
+}
96
+
97
static void block_job_attached_aio_context(AioContext *new_context,
98
void *opaque)
99
{
100
@@ -XXX,XX +XXX,XX @@ void block_job_start(BlockJob *job)
101
bdrv_coroutine_enter(blk_bs(job->blk), job->co);
102
}
103
104
-void block_job_ref(BlockJob *job)
105
+void block_job_early_fail(BlockJob *job)
106
{
107
- ++job->refcnt;
108
-}
109
-
110
-void block_job_unref(BlockJob *job)
111
-{
112
- if (--job->refcnt == 0) {
113
- BlockDriverState *bs = blk_bs(job->blk);
114
- bs->job = NULL;
115
- block_job_remove_all_bdrv(job);
116
- blk_remove_aio_context_notifier(job->blk,
117
- block_job_attached_aio_context,
118
- block_job_detach_aio_context, job);
119
- blk_unref(job->blk);
120
- error_free(job->blocker);
121
- g_free(job->id);
122
- QLIST_REMOVE(job, job_list);
123
- g_free(job);
124
- }
125
+ block_job_unref(job);
126
}
127
128
static void block_job_completed_single(BlockJob *job)
129
diff --git a/include/block/blockjob_int.h b/include/block/blockjob_int.h
130
index XXXXXXX..XXXXXXX 100644
131
--- a/include/block/blockjob_int.h
132
+++ b/include/block/blockjob_int.h
133
@@ -XXX,XX +XXX,XX @@ void block_job_sleep_ns(BlockJob *job, QEMUClockType type, int64_t ns);
134
void block_job_yield(BlockJob *job);
135
136
/**
137
- * block_job_ref:
138
+ * block_job_early_fail:
139
* @bs: The block device.
140
*
141
- * Grab a reference to the block job. Should be paired with block_job_unref.
142
+ * The block job could not be started, free it.
143
*/
144
-void block_job_ref(BlockJob *job);
145
-
146
-/**
147
- * block_job_unref:
148
- * @bs: The block device.
149
- *
150
- * Release reference to the block job and release resources if it is the last
151
- * reference.
152
- */
153
-void block_job_unref(BlockJob *job);
154
+void block_job_early_fail(BlockJob *job);
155
156
/**
157
* block_job_completed:
158
diff --git a/tests/test-blockjob.c b/tests/test-blockjob.c
159
index XXXXXXX..XXXXXXX 100644
160
--- a/tests/test-blockjob.c
161
+++ b/tests/test-blockjob.c
162
@@ -XXX,XX +XXX,XX @@ static void test_job_ids(void)
163
job[1] = do_test_id(blk[1], "id0", false);
164
165
/* But once job[0] finishes we can reuse its ID */
166
- block_job_unref(job[0]);
167
+ block_job_early_fail(job[0]);
168
job[1] = do_test_id(blk[1], "id0", true);
169
170
/* No job ID specified, defaults to the backend name ('drive1') */
171
- block_job_unref(job[1]);
172
+ block_job_early_fail(job[1]);
173
job[1] = do_test_id(blk[1], NULL, true);
174
175
/* Duplicate job ID */
176
@@ -XXX,XX +XXX,XX @@ static void test_job_ids(void)
177
/* This one is valid */
178
job[2] = do_test_id(blk[2], "id_2", true);
179
180
- block_job_unref(job[0]);
181
- block_job_unref(job[1]);
182
- block_job_unref(job[2]);
183
+ block_job_early_fail(job[0]);
184
+ block_job_early_fail(job[1]);
185
+ block_job_early_fail(job[2]);
186
187
destroy_blk(blk[0]);
188
destroy_blk(blk[1]);
189
--
64
--
190
2.9.3
65
2.31.1
191
66
192
67
diff view generated by jsdifflib