1
The following changes since commit 19b599f7664b2ebfd0f405fb79c14dd241557452:
1
The following changes since commit 469e72ab7dbbd7ff4ee601e5ea7c29545d46593b:
2
2
3
Merge remote-tracking branch 'remotes/armbru/tags/pull-error-2018-08-27-v2' into staging (2018-08-27 16:44:20 +0100)
3
Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging (2020-10-02 16:19:42 +0100)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://git.xanclic.moe/XanClic/qemu.git tags/pull-block-2018-08-31-v2
7
https://gitlab.com/stefanha/qemu.git tags/block-pull-request
8
8
9
for you to fetch changes up to e21a1c9831fc80ae3f3c1affdfa43350035d8588:
9
for you to fetch changes up to 9ab5741164b1727d22f69fe7001382baf0d56977:
10
10
11
jobs: remove job_defer_to_main_loop (2018-08-31 16:28:33 +0200)
11
util/vfio-helpers: Rework the IOVA allocator to avoid IOVA reserved regions (2020-10-05 10:59:42 +0100)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Block patches:
14
Pull request
15
- (Block) job exit refactoring, part 1
15
16
(removing job_defer_to_main_loop())
16
v2:
17
- test-bdrv-drain leak fix
17
* Removed clang-format call from scripts/block-coroutine-wrapper.py. This
18
avoids the issue with clang version incompatibility. It could be added back
19
in the future but the code is readable without reformatting and it also
20
makes the build less dependent on the environment.
18
21
19
----------------------------------------------------------------
22
----------------------------------------------------------------
20
John Snow (9):
21
jobs: change start callback to run callback
22
jobs: canonize Error object
23
jobs: add exit shim
24
block/commit: utilize job_exit shim
25
block/mirror: utilize job_exit shim
26
jobs: utilize job_exit shim
27
block/backup: make function variables consistently named
28
jobs: remove ret argument to job_completed; privatize it
29
jobs: remove job_defer_to_main_loop
30
23
31
Marc-André Lureau (1):
24
Eric Auger (2):
32
tests: fix bdrv-drain leak
25
util/vfio-helpers: Collect IOVA reserved regions
26
util/vfio-helpers: Rework the IOVA allocator to avoid IOVA reserved
27
regions
33
28
34
include/qemu/job.h | 70 ++++++++++++++++-----------------
29
Philippe Mathieu-Daudé (6):
35
block/backup.c | 81 ++++++++++++++++-----------------------
30
util/vfio-helpers: Pass page protections to qemu_vfio_pci_map_bar()
36
block/commit.c | 29 +++++---------
31
block/nvme: Map doorbells pages write-only
37
block/create.c | 19 +++------
32
block/nvme: Reduce I/O registers scope
38
block/mirror.c | 39 ++++++++-----------
33
block/nvme: Drop NVMeRegs structure, directly use NvmeBar
39
block/stream.c | 29 ++++++--------
34
block/nvme: Use register definitions from 'block/nvme.h'
40
job-qmp.c | 5 ++-
35
block/nvme: Replace magic value by SCALE_MS definition
41
job.c | 73 ++++++++++++-----------------------
36
42
tests/test-bdrv-drain.c | 14 +++----
37
Stefano Garzarella (1):
43
tests/test-blockjob-txn.c | 25 +++++-------
38
docs: add 'io_uring' option to 'aio' param in qemu-options.hx
44
tests/test-blockjob.c | 17 ++++----
39
45
trace-events | 2 +-
40
Vladimir Sementsov-Ogievskiy (8):
46
12 files changed, 161 insertions(+), 242 deletions(-)
41
block: return error-code from bdrv_invalidate_cache
42
block/io: refactor coroutine wrappers
43
block: declare some coroutine functions in block/coroutines.h
44
scripts: add block-coroutine-wrapper.py
45
block: generate coroutine-wrapper code
46
block: drop bdrv_prwv
47
block/io: refactor save/load vmstate
48
include/block/block.h: drop non-ascii quotation mark
49
50
block/block-gen.h | 49 ++++
51
block/coroutines.h | 65 +++++
52
include/block/block.h | 36 ++-
53
include/qemu/vfio-helpers.h | 2 +-
54
block.c | 97 +------
55
block/io.c | 339 ++++---------------------
56
block/nvme.c | 73 +++---
57
tests/test-bdrv-drain.c | 2 +-
58
util/vfio-helpers.c | 133 +++++++++-
59
block/meson.build | 8 +
60
docs/devel/block-coroutine-wrapper.rst | 54 ++++
61
docs/devel/index.rst | 1 +
62
qemu-options.hx | 10 +-
63
scripts/block-coroutine-wrapper.py | 167 ++++++++++++
64
14 files changed, 608 insertions(+), 428 deletions(-)
65
create mode 100644 block/block-gen.h
66
create mode 100644 block/coroutines.h
67
create mode 100644 docs/devel/block-coroutine-wrapper.rst
68
create mode 100644 scripts/block-coroutine-wrapper.py
47
69
48
--
70
--
49
2.17.1
71
2.26.2
50
72
51
diff view generated by jsdifflib
New patch
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
1
2
3
Pages are currently mapped READ/WRITE. To be able to use different
4
protections, add a new argument to qemu_vfio_pci_map_bar().
5
6
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
8
Message-Id: <20200922083821.578519-2-philmd@redhat.com>
9
---
10
include/qemu/vfio-helpers.h | 2 +-
11
block/nvme.c | 3 ++-
12
util/vfio-helpers.c | 4 ++--
13
3 files changed, 5 insertions(+), 4 deletions(-)
14
15
diff --git a/include/qemu/vfio-helpers.h b/include/qemu/vfio-helpers.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/include/qemu/vfio-helpers.h
18
+++ b/include/qemu/vfio-helpers.h
19
@@ -XXX,XX +XXX,XX @@ int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,
20
int qemu_vfio_dma_reset_temporary(QEMUVFIOState *s);
21
void qemu_vfio_dma_unmap(QEMUVFIOState *s, void *host);
22
void *qemu_vfio_pci_map_bar(QEMUVFIOState *s, int index,
23
- uint64_t offset, uint64_t size,
24
+ uint64_t offset, uint64_t size, int prot,
25
Error **errp);
26
void qemu_vfio_pci_unmap_bar(QEMUVFIOState *s, int index, void *bar,
27
uint64_t offset, uint64_t size);
28
diff --git a/block/nvme.c b/block/nvme.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/block/nvme.c
31
+++ b/block/nvme.c
32
@@ -XXX,XX +XXX,XX @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
33
goto out;
34
}
35
36
- s->regs = qemu_vfio_pci_map_bar(s->vfio, 0, 0, NVME_BAR_SIZE, errp);
37
+ s->regs = qemu_vfio_pci_map_bar(s->vfio, 0, 0, NVME_BAR_SIZE,
38
+ PROT_READ | PROT_WRITE, errp);
39
if (!s->regs) {
40
ret = -EINVAL;
41
goto out;
42
diff --git a/util/vfio-helpers.c b/util/vfio-helpers.c
43
index XXXXXXX..XXXXXXX 100644
44
--- a/util/vfio-helpers.c
45
+++ b/util/vfio-helpers.c
46
@@ -XXX,XX +XXX,XX @@ static int qemu_vfio_pci_init_bar(QEMUVFIOState *s, int index, Error **errp)
47
* Map a PCI bar area.
48
*/
49
void *qemu_vfio_pci_map_bar(QEMUVFIOState *s, int index,
50
- uint64_t offset, uint64_t size,
51
+ uint64_t offset, uint64_t size, int prot,
52
Error **errp)
53
{
54
void *p;
55
assert_bar_index_valid(s, index);
56
p = mmap(NULL, MIN(size, s->bar_region_info[index].size - offset),
57
- PROT_READ | PROT_WRITE, MAP_SHARED,
58
+ prot, MAP_SHARED,
59
s->device, s->bar_region_info[index].offset + offset);
60
if (p == MAP_FAILED) {
61
error_setg_errno(errp, errno, "Failed to map BAR region");
62
--
63
2.26.2
64
diff view generated by jsdifflib
New patch
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
1
2
3
Per the datasheet sections 3.1.13/3.1.14:
4
"The host should not read the doorbell registers."
5
6
As we don't need read access, map the doorbells with write-only
7
permission. We keep a reference to this mapped address in the
8
BDRVNVMeState structure.
9
10
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
Message-Id: <20200922083821.578519-3-philmd@redhat.com>
13
---
14
block/nvme.c | 29 +++++++++++++++++++----------
15
1 file changed, 19 insertions(+), 10 deletions(-)
16
17
diff --git a/block/nvme.c b/block/nvme.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/block/nvme.c
20
+++ b/block/nvme.c
21
@@ -XXX,XX +XXX,XX @@
22
#define NVME_SQ_ENTRY_BYTES 64
23
#define NVME_CQ_ENTRY_BYTES 16
24
#define NVME_QUEUE_SIZE 128
25
-#define NVME_BAR_SIZE 8192
26
+#define NVME_DOORBELL_SIZE 4096
27
28
/*
29
* We have to leave one slot empty as that is the full queue case where
30
@@ -XXX,XX +XXX,XX @@ typedef struct {
31
/* Memory mapped registers */
32
typedef volatile struct {
33
NvmeBar ctrl;
34
- struct {
35
- uint32_t sq_tail;
36
- uint32_t cq_head;
37
- } doorbells[];
38
} NVMeRegs;
39
40
#define INDEX_ADMIN 0
41
@@ -XXX,XX +XXX,XX @@ struct BDRVNVMeState {
42
AioContext *aio_context;
43
QEMUVFIOState *vfio;
44
NVMeRegs *regs;
45
+ /* Memory mapped registers */
46
+ volatile struct {
47
+ uint32_t sq_tail;
48
+ uint32_t cq_head;
49
+ } *doorbells;
50
/* The submission/completion queue pairs.
51
* [0]: admin queue.
52
* [1..]: io queues.
53
@@ -XXX,XX +XXX,XX @@ static NVMeQueuePair *nvme_create_queue_pair(BDRVNVMeState *s,
54
error_propagate(errp, local_err);
55
goto fail;
56
}
57
- q->sq.doorbell = &s->regs->doorbells[idx * s->doorbell_scale].sq_tail;
58
+ q->sq.doorbell = &s->doorbells[idx * s->doorbell_scale].sq_tail;
59
60
nvme_init_queue(s, &q->cq, size, NVME_CQ_ENTRY_BYTES, &local_err);
61
if (local_err) {
62
error_propagate(errp, local_err);
63
goto fail;
64
}
65
- q->cq.doorbell = &s->regs->doorbells[idx * s->doorbell_scale].cq_head;
66
+ q->cq.doorbell = &s->doorbells[idx * s->doorbell_scale].cq_head;
67
68
return q;
69
fail:
70
@@ -XXX,XX +XXX,XX @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
71
goto out;
72
}
73
74
- s->regs = qemu_vfio_pci_map_bar(s->vfio, 0, 0, NVME_BAR_SIZE,
75
+ s->regs = qemu_vfio_pci_map_bar(s->vfio, 0, 0, sizeof(NvmeBar),
76
PROT_READ | PROT_WRITE, errp);
77
if (!s->regs) {
78
ret = -EINVAL;
79
goto out;
80
}
81
-
82
/* Perform initialize sequence as described in NVMe spec "7.6.1
83
* Initialization". */
84
85
@@ -XXX,XX +XXX,XX @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
86
}
87
}
88
89
+ s->doorbells = qemu_vfio_pci_map_bar(s->vfio, 0, sizeof(NvmeBar),
90
+ NVME_DOORBELL_SIZE, PROT_WRITE, errp);
91
+ if (!s->doorbells) {
92
+ ret = -EINVAL;
93
+ goto out;
94
+ }
95
+
96
/* Set up admin queue. */
97
s->queues = g_new(NVMeQueuePair *, 1);
98
s->queues[INDEX_ADMIN] = nvme_create_queue_pair(s, aio_context, 0,
99
@@ -XXX,XX +XXX,XX @@ static void nvme_close(BlockDriverState *bs)
100
&s->irq_notifier[MSIX_SHARED_IRQ_IDX],
101
false, NULL, NULL);
102
event_notifier_cleanup(&s->irq_notifier[MSIX_SHARED_IRQ_IDX]);
103
- qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)s->regs, 0, NVME_BAR_SIZE);
104
+ qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)s->doorbells,
105
+ sizeof(NvmeBar), NVME_DOORBELL_SIZE);
106
+ qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)s->regs, 0, sizeof(NvmeBar));
107
qemu_vfio_close(s->vfio);
108
109
g_free(s->device);
110
--
111
2.26.2
112
diff view generated by jsdifflib
New patch
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
1
2
3
We only access the I/O register in nvme_init().
4
Remove the reference in BDRVNVMeState and reduce its scope.
5
6
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
8
Message-Id: <20200922083821.578519-4-philmd@redhat.com>
9
---
10
block/nvme.c | 29 ++++++++++++++++-------------
11
1 file changed, 16 insertions(+), 13 deletions(-)
12
13
diff --git a/block/nvme.c b/block/nvme.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/block/nvme.c
16
+++ b/block/nvme.c
17
@@ -XXX,XX +XXX,XX @@ enum {
18
struct BDRVNVMeState {
19
AioContext *aio_context;
20
QEMUVFIOState *vfio;
21
- NVMeRegs *regs;
22
/* Memory mapped registers */
23
volatile struct {
24
uint32_t sq_tail;
25
@@ -XXX,XX +XXX,XX @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
26
uint64_t timeout_ms;
27
uint64_t deadline, now;
28
Error *local_err = NULL;
29
+ NVMeRegs *regs;
30
31
qemu_co_mutex_init(&s->dma_map_lock);
32
qemu_co_queue_init(&s->dma_flush_queue);
33
@@ -XXX,XX +XXX,XX @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
34
goto out;
35
}
36
37
- s->regs = qemu_vfio_pci_map_bar(s->vfio, 0, 0, sizeof(NvmeBar),
38
- PROT_READ | PROT_WRITE, errp);
39
- if (!s->regs) {
40
+ regs = qemu_vfio_pci_map_bar(s->vfio, 0, 0, sizeof(NvmeBar),
41
+ PROT_READ | PROT_WRITE, errp);
42
+ if (!regs) {
43
ret = -EINVAL;
44
goto out;
45
}
46
/* Perform initialize sequence as described in NVMe spec "7.6.1
47
* Initialization". */
48
49
- cap = le64_to_cpu(s->regs->ctrl.cap);
50
+ cap = le64_to_cpu(regs->ctrl.cap);
51
if (!(cap & (1ULL << 37))) {
52
error_setg(errp, "Device doesn't support NVMe command set");
53
ret = -EINVAL;
54
@@ -XXX,XX +XXX,XX @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
55
timeout_ms = MIN(500 * ((cap >> 24) & 0xFF), 30000);
56
57
/* Reset device to get a clean state. */
58
- s->regs->ctrl.cc = cpu_to_le32(le32_to_cpu(s->regs->ctrl.cc) & 0xFE);
59
+ regs->ctrl.cc = cpu_to_le32(le32_to_cpu(regs->ctrl.cc) & 0xFE);
60
/* Wait for CSTS.RDY = 0. */
61
deadline = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + timeout_ms * SCALE_MS;
62
- while (le32_to_cpu(s->regs->ctrl.csts) & 0x1) {
63
+ while (le32_to_cpu(regs->ctrl.csts) & 0x1) {
64
if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) {
65
error_setg(errp, "Timeout while waiting for device to reset (%"
66
PRId64 " ms)",
67
@@ -XXX,XX +XXX,XX @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
68
}
69
s->nr_queues = 1;
70
QEMU_BUILD_BUG_ON(NVME_QUEUE_SIZE & 0xF000);
71
- s->regs->ctrl.aqa = cpu_to_le32((NVME_QUEUE_SIZE << 16) | NVME_QUEUE_SIZE);
72
- s->regs->ctrl.asq = cpu_to_le64(s->queues[INDEX_ADMIN]->sq.iova);
73
- s->regs->ctrl.acq = cpu_to_le64(s->queues[INDEX_ADMIN]->cq.iova);
74
+ regs->ctrl.aqa = cpu_to_le32((NVME_QUEUE_SIZE << 16) | NVME_QUEUE_SIZE);
75
+ regs->ctrl.asq = cpu_to_le64(s->queues[INDEX_ADMIN]->sq.iova);
76
+ regs->ctrl.acq = cpu_to_le64(s->queues[INDEX_ADMIN]->cq.iova);
77
78
/* After setting up all control registers we can enable device now. */
79
- s->regs->ctrl.cc = cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES) << 20) |
80
+ regs->ctrl.cc = cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES) << 20) |
81
(ctz32(NVME_SQ_ENTRY_BYTES) << 16) |
82
0x1);
83
/* Wait for CSTS.RDY = 1. */
84
now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
85
deadline = now + timeout_ms * 1000000;
86
- while (!(le32_to_cpu(s->regs->ctrl.csts) & 0x1)) {
87
+ while (!(le32_to_cpu(regs->ctrl.csts) & 0x1)) {
88
if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) {
89
error_setg(errp, "Timeout while waiting for device to start (%"
90
PRId64 " ms)",
91
@@ -XXX,XX +XXX,XX @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
92
ret = -EIO;
93
}
94
out:
95
+ if (regs) {
96
+ qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)regs, 0, sizeof(NvmeBar));
97
+ }
98
+
99
/* Cleaning up is done in nvme_file_open() upon error. */
100
return ret;
101
}
102
@@ -XXX,XX +XXX,XX @@ static void nvme_close(BlockDriverState *bs)
103
event_notifier_cleanup(&s->irq_notifier[MSIX_SHARED_IRQ_IDX]);
104
qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)s->doorbells,
105
sizeof(NvmeBar), NVME_DOORBELL_SIZE);
106
- qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)s->regs, 0, sizeof(NvmeBar));
107
qemu_vfio_close(s->vfio);
108
109
g_free(s->device);
110
--
111
2.26.2
112
diff view generated by jsdifflib
New patch
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
1
2
3
NVMeRegs only contains NvmeBar. Simplify the code by using NvmeBar
4
directly.
5
6
This triggers a checkpatch.pl error:
7
8
ERROR: Use of volatile is usually wrong, please add a comment
9
#30: FILE: block/nvme.c:691:
10
+ volatile NvmeBar *regs;
11
12
This is a false positive as in our case we are using I/O registers,
13
so the 'volatile' use is justified.
14
15
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
16
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
17
Message-Id: <20200922083821.578519-5-philmd@redhat.com>
18
---
19
block/nvme.c | 23 +++++++++--------------
20
1 file changed, 9 insertions(+), 14 deletions(-)
21
22
diff --git a/block/nvme.c b/block/nvme.c
23
index XXXXXXX..XXXXXXX 100644
24
--- a/block/nvme.c
25
+++ b/block/nvme.c
26
@@ -XXX,XX +XXX,XX @@ typedef struct {
27
QEMUBH *completion_bh;
28
} NVMeQueuePair;
29
30
-/* Memory mapped registers */
31
-typedef volatile struct {
32
- NvmeBar ctrl;
33
-} NVMeRegs;
34
-
35
#define INDEX_ADMIN 0
36
#define INDEX_IO(n) (1 + n)
37
38
@@ -XXX,XX +XXX,XX @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
39
uint64_t timeout_ms;
40
uint64_t deadline, now;
41
Error *local_err = NULL;
42
- NVMeRegs *regs;
43
+ volatile NvmeBar *regs = NULL;
44
45
qemu_co_mutex_init(&s->dma_map_lock);
46
qemu_co_queue_init(&s->dma_flush_queue);
47
@@ -XXX,XX +XXX,XX @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
48
/* Perform initialize sequence as described in NVMe spec "7.6.1
49
* Initialization". */
50
51
- cap = le64_to_cpu(regs->ctrl.cap);
52
+ cap = le64_to_cpu(regs->cap);
53
if (!(cap & (1ULL << 37))) {
54
error_setg(errp, "Device doesn't support NVMe command set");
55
ret = -EINVAL;
56
@@ -XXX,XX +XXX,XX @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
57
timeout_ms = MIN(500 * ((cap >> 24) & 0xFF), 30000);
58
59
/* Reset device to get a clean state. */
60
- regs->ctrl.cc = cpu_to_le32(le32_to_cpu(regs->ctrl.cc) & 0xFE);
61
+ regs->cc = cpu_to_le32(le32_to_cpu(regs->cc) & 0xFE);
62
/* Wait for CSTS.RDY = 0. */
63
deadline = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + timeout_ms * SCALE_MS;
64
- while (le32_to_cpu(regs->ctrl.csts) & 0x1) {
65
+ while (le32_to_cpu(regs->csts) & 0x1) {
66
if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) {
67
error_setg(errp, "Timeout while waiting for device to reset (%"
68
PRId64 " ms)",
69
@@ -XXX,XX +XXX,XX @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
70
}
71
s->nr_queues = 1;
72
QEMU_BUILD_BUG_ON(NVME_QUEUE_SIZE & 0xF000);
73
- regs->ctrl.aqa = cpu_to_le32((NVME_QUEUE_SIZE << 16) | NVME_QUEUE_SIZE);
74
- regs->ctrl.asq = cpu_to_le64(s->queues[INDEX_ADMIN]->sq.iova);
75
- regs->ctrl.acq = cpu_to_le64(s->queues[INDEX_ADMIN]->cq.iova);
76
+ regs->aqa = cpu_to_le32((NVME_QUEUE_SIZE << 16) | NVME_QUEUE_SIZE);
77
+ regs->asq = cpu_to_le64(s->queues[INDEX_ADMIN]->sq.iova);
78
+ regs->acq = cpu_to_le64(s->queues[INDEX_ADMIN]->cq.iova);
79
80
/* After setting up all control registers we can enable device now. */
81
- regs->ctrl.cc = cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES) << 20) |
82
+ regs->cc = cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES) << 20) |
83
(ctz32(NVME_SQ_ENTRY_BYTES) << 16) |
84
0x1);
85
/* Wait for CSTS.RDY = 1. */
86
now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
87
deadline = now + timeout_ms * 1000000;
88
- while (!(le32_to_cpu(regs->ctrl.csts) & 0x1)) {
89
+ while (!(le32_to_cpu(regs->csts) & 0x1)) {
90
if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) {
91
error_setg(errp, "Timeout while waiting for device to start (%"
92
PRId64 " ms)",
93
--
94
2.26.2
95
diff view generated by jsdifflib
New patch
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
1
2
3
Use the NVMe register definitions from "block/nvme.h" which
4
ease a bit reviewing the code while matching the datasheet.
5
6
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
8
Message-Id: <20200922083821.578519-6-philmd@redhat.com>
9
---
10
block/nvme.c | 21 +++++++++++----------
11
1 file changed, 11 insertions(+), 10 deletions(-)
12
13
diff --git a/block/nvme.c b/block/nvme.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/block/nvme.c
16
+++ b/block/nvme.c
17
@@ -XXX,XX +XXX,XX @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
18
* Initialization". */
19
20
cap = le64_to_cpu(regs->cap);
21
- if (!(cap & (1ULL << 37))) {
22
+ if (!NVME_CAP_CSS(cap)) {
23
error_setg(errp, "Device doesn't support NVMe command set");
24
ret = -EINVAL;
25
goto out;
26
}
27
28
- s->page_size = MAX(4096, 1 << (12 + ((cap >> 48) & 0xF)));
29
- s->doorbell_scale = (4 << (((cap >> 32) & 0xF))) / sizeof(uint32_t);
30
+ s->page_size = MAX(4096, 1 << NVME_CAP_MPSMIN(cap));
31
+ s->doorbell_scale = (4 << NVME_CAP_DSTRD(cap)) / sizeof(uint32_t);
32
bs->bl.opt_mem_alignment = s->page_size;
33
- timeout_ms = MIN(500 * ((cap >> 24) & 0xFF), 30000);
34
+ timeout_ms = MIN(500 * NVME_CAP_TO(cap), 30000);
35
36
/* Reset device to get a clean state. */
37
regs->cc = cpu_to_le32(le32_to_cpu(regs->cc) & 0xFE);
38
/* Wait for CSTS.RDY = 0. */
39
deadline = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + timeout_ms * SCALE_MS;
40
- while (le32_to_cpu(regs->csts) & 0x1) {
41
+ while (NVME_CSTS_RDY(le32_to_cpu(regs->csts))) {
42
if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) {
43
error_setg(errp, "Timeout while waiting for device to reset (%"
44
PRId64 " ms)",
45
@@ -XXX,XX +XXX,XX @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
46
}
47
s->nr_queues = 1;
48
QEMU_BUILD_BUG_ON(NVME_QUEUE_SIZE & 0xF000);
49
- regs->aqa = cpu_to_le32((NVME_QUEUE_SIZE << 16) | NVME_QUEUE_SIZE);
50
+ regs->aqa = cpu_to_le32((NVME_QUEUE_SIZE << AQA_ACQS_SHIFT) |
51
+ (NVME_QUEUE_SIZE << AQA_ASQS_SHIFT));
52
regs->asq = cpu_to_le64(s->queues[INDEX_ADMIN]->sq.iova);
53
regs->acq = cpu_to_le64(s->queues[INDEX_ADMIN]->cq.iova);
54
55
/* After setting up all control registers we can enable device now. */
56
- regs->cc = cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES) << 20) |
57
- (ctz32(NVME_SQ_ENTRY_BYTES) << 16) |
58
- 0x1);
59
+ regs->cc = cpu_to_le32((ctz32(NVME_CQ_ENTRY_BYTES) << CC_IOCQES_SHIFT) |
60
+ (ctz32(NVME_SQ_ENTRY_BYTES) << CC_IOSQES_SHIFT) |
61
+ CC_EN_MASK);
62
/* Wait for CSTS.RDY = 1. */
63
now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
64
deadline = now + timeout_ms * 1000000;
65
- while (!(le32_to_cpu(regs->csts) & 0x1)) {
66
+ while (!NVME_CSTS_RDY(le32_to_cpu(regs->csts))) {
67
if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) {
68
error_setg(errp, "Timeout while waiting for device to start (%"
69
PRId64 " ms)",
70
--
71
2.26.2
72
diff view generated by jsdifflib
New patch
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
1
2
3
Use self-explicit SCALE_MS definition instead of magic value
4
(missed in similar commit e4f310fe7f5).
5
6
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
8
Message-Id: <20200922083821.578519-7-philmd@redhat.com>
9
---
10
block/nvme.c | 2 +-
11
1 file changed, 1 insertion(+), 1 deletion(-)
12
13
diff --git a/block/nvme.c b/block/nvme.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/block/nvme.c
16
+++ b/block/nvme.c
17
@@ -XXX,XX +XXX,XX @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
18
CC_EN_MASK);
19
/* Wait for CSTS.RDY = 1. */
20
now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
21
- deadline = now + timeout_ms * 1000000;
22
+ deadline = now + timeout_ms * SCALE_MS;
23
while (!NVME_CSTS_RDY(le32_to_cpu(regs->csts))) {
24
if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) > deadline) {
25
error_setg(errp, "Timeout while waiting for device to start (%"
26
--
27
2.26.2
28
diff view generated by jsdifflib
1
From: John Snow <jsnow@redhat.com>
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
2
3
Utilize the job_exit shim by not calling job_defer_to_main_loop, and
3
This is the only coroutine wrapper from block.c and block/io.c which
4
where applicable, converting the deferred callback into the job_exit
4
doesn't return a value, so let's convert it to the common behavior, to
5
callback.
5
simplify moving to generated coroutine wrappers in a further commit.
6
6
7
This converts backup, stream, create, and the unit tests all at once.
7
Also, bdrv_invalidate_cache is a void function, returning error only
8
Most of these jobs do not see any changes to the order in which they
8
through **errp parameter, which is considered to be bad practice, as
9
clean up their resources, except the test-blockjob-txn test, which
9
it forces callers to define and propagate local_err variable, so
10
now puts down its bs before job_completed is called.
10
conversion is good anyway.
11
11
12
This is safe for the same reason the reordering in the mirror job is
12
This patch leaves the conversion of .bdrv_co_invalidate_cache() driver
13
safe, because job_completed no longer runs under two locks, making
13
callbacks and bdrv_invalidate_cache_all() for another day.
14
the unref safe even if it causes a flush.
15
14
16
Signed-off-by: John Snow <jsnow@redhat.com>
15
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
17
Reviewed-by: Max Reitz <mreitz@redhat.com>
16
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
18
Message-id: 20180830015734.19765-7-jsnow@redhat.com
17
Reviewed-by: Eric Blake <eblake@redhat.com>
19
Signed-off-by: Max Reitz <mreitz@redhat.com>
18
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
19
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
20
Message-Id: <20200924185414.28642-2-vsementsov@virtuozzo.com>
20
---
21
---
21
block/backup.c | 16 ----------------
22
include/block/block.h | 2 +-
22
block/create.c | 14 +++-----------
23
block.c | 32 ++++++++++++++++++--------------
23
block/stream.c | 22 +++++++---------------
24
2 files changed, 19 insertions(+), 15 deletions(-)
24
tests/test-bdrv-drain.c | 6 ------
25
tests/test-blockjob-txn.c | 11 ++---------
26
tests/test-blockjob.c | 10 ++++------
27
6 files changed, 16 insertions(+), 63 deletions(-)
28
25
29
diff --git a/block/backup.c b/block/backup.c
26
diff --git a/include/block/block.h b/include/block/block.h
30
index XXXXXXX..XXXXXXX 100644
27
index XXXXXXX..XXXXXXX 100644
31
--- a/block/backup.c
28
--- a/include/block/block.h
32
+++ b/block/backup.c
29
+++ b/include/block/block.h
33
@@ -XXX,XX +XXX,XX @@ static BlockErrorAction backup_error_action(BackupBlockJob *job,
30
@@ -XXX,XX +XXX,XX @@ void bdrv_aio_cancel_async(BlockAIOCB *acb);
31
int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf);
32
33
/* Invalidate any cached metadata used by image formats */
34
-void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp);
35
+int bdrv_invalidate_cache(BlockDriverState *bs, Error **errp);
36
void bdrv_invalidate_cache_all(Error **errp);
37
int bdrv_inactivate_all(void);
38
39
diff --git a/block.c b/block.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/block.c
42
+++ b/block.c
43
@@ -XXX,XX +XXX,XX @@ void bdrv_init_with_whitelist(void)
44
bdrv_init();
45
}
46
47
-static void coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs,
48
- Error **errp)
49
+static int coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs,
50
+ Error **errp)
51
{
52
BdrvChild *child, *parent;
53
uint64_t perm, shared_perm;
54
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs,
55
BdrvDirtyBitmap *bm;
56
57
if (!bs->drv) {
58
- return;
59
+ return -ENOMEDIUM;
34
}
60
}
35
}
61
36
62
QLIST_FOREACH(child, &bs->children, next) {
37
-typedef struct {
63
bdrv_co_invalidate_cache(child->bs, &local_err);
38
- int ret;
64
if (local_err) {
39
-} BackupCompleteData;
65
error_propagate(errp, local_err);
40
-
66
- return;
41
-static void backup_complete(Job *job, void *opaque)
67
+ return -EINVAL;
42
-{
68
}
43
- BackupCompleteData *data = opaque;
69
}
44
-
70
45
- job_completed(job, data->ret);
71
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs,
46
- g_free(data);
72
ret = bdrv_check_perm(bs, NULL, perm, shared_perm, NULL, NULL, errp);
47
-}
73
if (ret < 0) {
48
-
74
bs->open_flags |= BDRV_O_INACTIVE;
49
static bool coroutine_fn yield_and_check(BackupBlockJob *job)
75
- return;
50
{
76
+ return ret;
51
uint64_t delay_ns;
77
}
52
@@ -XXX,XX +XXX,XX @@ static void backup_incremental_init_copy_bitmap(BackupBlockJob *job)
78
bdrv_set_perm(bs, perm, shared_perm);
53
static int coroutine_fn backup_run(Job *opaque_job, Error **errp)
79
54
{
80
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs,
55
BackupBlockJob *job = container_of(opaque_job, BackupBlockJob, common.job);
81
if (local_err) {
56
- BackupCompleteData *data;
82
bs->open_flags |= BDRV_O_INACTIVE;
57
BlockDriverState *bs = blk_bs(job->common.blk);
83
error_propagate(errp, local_err);
58
int64_t offset, nb_clusters;
84
- return;
59
int ret = 0;
85
+ return -EINVAL;
60
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn backup_run(Job *opaque_job, Error **errp)
61
qemu_co_rwlock_unlock(&job->flush_rwlock);
62
hbitmap_free(job->copy_bitmap);
63
64
- data = g_malloc(sizeof(*data));
65
- data->ret = ret;
66
- job_defer_to_main_loop(&job->common.job, backup_complete, data);
67
return ret;
68
}
69
70
diff --git a/block/create.c b/block/create.c
71
index XXXXXXX..XXXXXXX 100644
72
--- a/block/create.c
73
+++ b/block/create.c
74
@@ -XXX,XX +XXX,XX @@ typedef struct BlockdevCreateJob {
75
Job common;
76
BlockDriver *drv;
77
BlockdevCreateOptions *opts;
78
- int ret;
79
} BlockdevCreateJob;
80
81
-static void blockdev_create_complete(Job *job, void *opaque)
82
-{
83
- BlockdevCreateJob *s = container_of(job, BlockdevCreateJob, common);
84
-
85
- job_completed(job, s->ret);
86
-}
87
-
88
static int coroutine_fn blockdev_create_run(Job *job, Error **errp)
89
{
90
BlockdevCreateJob *s = container_of(job, BlockdevCreateJob, common);
91
+ int ret;
92
93
job_progress_set_remaining(&s->common, 1);
94
- s->ret = s->drv->bdrv_co_create(s->opts, errp);
95
+ ret = s->drv->bdrv_co_create(s->opts, errp);
96
job_progress_update(&s->common, 1);
97
98
qapi_free_BlockdevCreateOptions(s->opts);
99
- job_defer_to_main_loop(&s->common, blockdev_create_complete, NULL);
100
101
- return s->ret;
102
+ return ret;
103
}
104
105
static const JobDriver blockdev_create_job_driver = {
106
diff --git a/block/stream.c b/block/stream.c
107
index XXXXXXX..XXXXXXX 100644
108
--- a/block/stream.c
109
+++ b/block/stream.c
110
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn stream_populate(BlockBackend *blk,
111
return blk_co_preadv(blk, offset, qiov.size, &qiov, BDRV_REQ_COPY_ON_READ);
112
}
113
114
-typedef struct {
115
- int ret;
116
-} StreamCompleteData;
117
-
118
-static void stream_complete(Job *job, void *opaque)
119
+static void stream_exit(Job *job)
120
{
121
StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
122
BlockJob *bjob = &s->common;
123
- StreamCompleteData *data = opaque;
124
BlockDriverState *bs = blk_bs(bjob->blk);
125
BlockDriverState *base = s->base;
126
Error *local_err = NULL;
127
+ int ret = job->ret;
128
129
- if (!job_is_cancelled(job) && bs->backing && data->ret == 0) {
130
+ if (!job_is_cancelled(job) && bs->backing && ret == 0) {
131
const char *base_id = NULL, *base_fmt = NULL;
132
if (base) {
133
base_id = s->backing_file_str;
134
@@ -XXX,XX +XXX,XX @@ static void stream_complete(Job *job, void *opaque)
135
base_fmt = base->drv->format_name;
136
}
86
}
137
}
87
}
138
- data->ret = bdrv_change_backing_file(bs, base_id, base_fmt);
88
139
+ ret = bdrv_change_backing_file(bs, base_id, base_fmt);
89
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs,
140
bdrv_set_backing_hd(bs, base, &local_err);
90
if (ret < 0) {
141
if (local_err) {
91
bs->open_flags |= BDRV_O_INACTIVE;
142
error_report_err(local_err);
92
error_setg_errno(errp, -ret, "Could not refresh total sector count");
143
- data->ret = -EPERM;
93
- return;
144
+ ret = -EPERM;
94
+ return ret;
145
goto out;
146
}
95
}
147
}
96
}
148
@@ -XXX,XX +XXX,XX @@ out:
97
149
}
98
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs,
150
99
if (local_err) {
151
g_free(s->backing_file_str);
100
bs->open_flags |= BDRV_O_INACTIVE;
152
- job_completed(job, data->ret);
101
error_propagate(errp, local_err);
153
- g_free(data);
102
- return;
154
+ job->ret = ret;
103
+ return -EINVAL;
155
}
104
}
156
157
static int coroutine_fn stream_run(Job *job, Error **errp)
158
{
159
StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
160
- StreamCompleteData *data;
161
BlockBackend *blk = s->common.blk;
162
BlockDriverState *bs = blk_bs(blk);
163
BlockDriverState *base = s->base;
164
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn stream_run(Job *job, Error **errp)
165
166
out:
167
/* Modify backing chain and close BDSes in main loop */
168
- data = g_malloc(sizeof(*data));
169
- data->ret = ret;
170
- job_defer_to_main_loop(&s->common.job, stream_complete, data);
171
return ret;
172
}
173
174
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver stream_job_driver = {
175
.job_type = JOB_TYPE_STREAM,
176
.free = block_job_free,
177
.run = stream_run,
178
+ .exit = stream_exit,
179
.user_resume = block_job_user_resume,
180
.drain = block_job_drain,
181
},
182
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
183
index XXXXXXX..XXXXXXX 100644
184
--- a/tests/test-bdrv-drain.c
185
+++ b/tests/test-bdrv-drain.c
186
@@ -XXX,XX +XXX,XX @@ typedef struct TestBlockJob {
187
bool should_complete;
188
} TestBlockJob;
189
190
-static void test_job_completed(Job *job, void *opaque)
191
-{
192
- job_completed(job, 0);
193
-}
194
-
195
static int coroutine_fn test_job_run(Job *job, Error **errp)
196
{
197
TestBlockJob *s = container_of(job, TestBlockJob, common.job);
198
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn test_job_run(Job *job, Error **errp)
199
job_pause_point(&s->common.job);
200
}
201
202
- job_defer_to_main_loop(&s->common.job, test_job_completed, NULL);
203
return 0;
204
}
205
206
diff --git a/tests/test-blockjob-txn.c b/tests/test-blockjob-txn.c
207
index XXXXXXX..XXXXXXX 100644
208
--- a/tests/test-blockjob-txn.c
209
+++ b/tests/test-blockjob-txn.c
210
@@ -XXX,XX +XXX,XX @@ typedef struct {
211
int *result;
212
} TestBlockJob;
213
214
-static void test_block_job_complete(Job *job, void *opaque)
215
+static void test_block_job_exit(Job *job)
216
{
217
BlockJob *bjob = container_of(job, BlockJob, job);
218
BlockDriverState *bs = blk_bs(bjob->blk);
219
- int rc = (intptr_t)opaque;
220
221
- if (job_is_cancelled(job)) {
222
- rc = -ECANCELED;
223
- }
224
-
225
- job_completed(job, rc);
226
bdrv_unref(bs);
227
}
228
229
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn test_block_job_run(Job *job, Error **errp)
230
}
105
}
231
}
106
}
232
107
+
233
- job_defer_to_main_loop(job, test_block_job_complete,
108
+ return 0;
234
- (void *)(intptr_t)s->rc);
235
return s->rc;
236
}
109
}
237
110
238
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver test_block_job_driver = {
111
typedef struct InvalidateCacheCo {
239
.user_resume = block_job_user_resume,
112
BlockDriverState *bs;
240
.drain = block_job_drain,
113
Error **errp;
241
.run = test_block_job_run,
114
bool done;
242
+ .exit = test_block_job_exit,
115
+ int ret;
243
},
116
} InvalidateCacheCo;
244
};
117
245
118
static void coroutine_fn bdrv_invalidate_cache_co_entry(void *opaque)
246
diff --git a/tests/test-blockjob.c b/tests/test-blockjob.c
247
index XXXXXXX..XXXXXXX 100644
248
--- a/tests/test-blockjob.c
249
+++ b/tests/test-blockjob.c
250
@@ -XXX,XX +XXX,XX @@ typedef struct CancelJob {
251
bool completed;
252
} CancelJob;
253
254
-static void cancel_job_completed(Job *job, void *opaque)
255
+static void cancel_job_exit(Job *job)
256
{
119
{
257
- CancelJob *s = opaque;
120
InvalidateCacheCo *ico = opaque;
258
+ CancelJob *s = container_of(job, CancelJob, common.job);
121
- bdrv_co_invalidate_cache(ico->bs, ico->errp);
259
s->completed = true;
122
+ ico->ret = bdrv_co_invalidate_cache(ico->bs, ico->errp);
260
- job_completed(job, 0);
123
ico->done = true;
124
aio_wait_kick();
261
}
125
}
262
126
263
static void cancel_job_complete(Job *job, Error **errp)
127
-void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp)
264
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn cancel_job_run(Job *job, Error **errp)
128
+int bdrv_invalidate_cache(BlockDriverState *bs, Error **errp)
265
129
{
266
while (!s->should_complete) {
130
Coroutine *co;
267
if (job_is_cancelled(&s->common.job)) {
131
InvalidateCacheCo ico = {
268
- goto defer;
132
@@ -XXX,XX +XXX,XX @@ void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp)
269
+ return 0;
133
bdrv_coroutine_enter(bs, co);
134
BDRV_POLL_WHILE(bs, !ico.done);
135
}
136
+
137
+ return ico.ret;
138
}
139
140
void bdrv_invalidate_cache_all(Error **errp)
141
{
142
BlockDriverState *bs;
143
- Error *local_err = NULL;
144
BdrvNextIterator it;
145
146
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
147
AioContext *aio_context = bdrv_get_aio_context(bs);
148
+ int ret;
149
150
aio_context_acquire(aio_context);
151
- bdrv_invalidate_cache(bs, &local_err);
152
+ ret = bdrv_invalidate_cache(bs, errp);
153
aio_context_release(aio_context);
154
- if (local_err) {
155
- error_propagate(errp, local_err);
156
+ if (ret < 0) {
157
bdrv_next_cleanup(&it);
158
return;
270
}
159
}
271
272
if (!job_is_ready(&s->common.job) && s->should_converge) {
273
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn cancel_job_run(Job *job, Error **errp)
274
job_sleep_ns(&s->common.job, 100000);
275
}
276
277
- defer:
278
- job_defer_to_main_loop(&s->common.job, cancel_job_completed, s);
279
return 0;
280
}
281
282
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver test_cancel_driver = {
283
.user_resume = block_job_user_resume,
284
.drain = block_job_drain,
285
.run = cancel_job_run,
286
+ .exit = cancel_job_exit,
287
.complete = cancel_job_complete,
288
},
289
};
290
--
160
--
291
2.17.1
161
2.26.2
292
162
293
diff view generated by jsdifflib
1
From: John Snow <jsnow@redhat.com>
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
2
3
Jobs are now expected to return their retcode on the stack, from the
3
Most of our coroutine wrappers already follow this convention:
4
.run callback, so we can remove that argument.
5
4
6
job_cancel does not need to set -ECANCELED because job_completed will
5
We have 'coroutine_fn bdrv_co_<something>(<normal argument list>)' as
7
update the return code itself if the job was canceled.
6
the core function, and a wrapper 'bdrv_<something>(<same argument
7
list>)' which does parameter packing and calls bdrv_run_co().
8
8
9
While we're here, make job_completed static to job.c and remove it from
9
The only outsiders are the bdrv_prwv_co and
10
job.h; move the documentation of return code to the .run() callback and
10
bdrv_common_block_status_above wrappers. Let's refactor them to behave
11
to the job->ret property, accordingly.
11
as the others, it simplifies further conversion of coroutine wrappers.
12
12
13
Signed-off-by: John Snow <jsnow@redhat.com>
13
This patch adds an indirection layer, but it will be compensated by
14
Message-id: 20180830015734.19765-9-jsnow@redhat.com
14
a further commit, which will drop bdrv_co_prwv together with the
15
Reviewed-by: Max Reitz <mreitz@redhat.com>
15
is_write logic, to keep the read and write paths separate.
16
Signed-off-by: Max Reitz <mreitz@redhat.com>
16
17
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
18
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
19
Reviewed-by: Eric Blake <eblake@redhat.com>
20
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
21
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
22
Message-Id: <20200924185414.28642-3-vsementsov@virtuozzo.com>
17
---
23
---
18
include/qemu/job.h | 28 +++++++++++++++-------------
24
block/io.c | 60 +++++++++++++++++++++++++++++-------------------------
19
job.c | 11 ++++++-----
25
1 file changed, 32 insertions(+), 28 deletions(-)
20
trace-events | 2 +-
21
3 files changed, 22 insertions(+), 19 deletions(-)
22
26
23
diff --git a/include/qemu/job.h b/include/qemu/job.h
27
diff --git a/block/io.c b/block/io.c
24
index XXXXXXX..XXXXXXX 100644
28
index XXXXXXX..XXXXXXX 100644
25
--- a/include/qemu/job.h
29
--- a/block/io.c
26
+++ b/include/qemu/job.h
30
+++ b/block/io.c
27
@@ -XXX,XX +XXX,XX @@ typedef struct Job {
31
@@ -XXX,XX +XXX,XX @@ typedef struct RwCo {
28
/** Estimated progress_current value at the completion of the job */
32
BdrvRequestFlags flags;
29
int64_t progress_total;
33
} RwCo;
30
34
31
- /** ret code passed to job_completed. */
35
+static int coroutine_fn bdrv_co_prwv(BdrvChild *child, int64_t offset,
32
+ /**
36
+ QEMUIOVector *qiov, bool is_write,
33
+ * Return code from @run and/or @prepare callback(s).
37
+ BdrvRequestFlags flags)
34
+ * Not final until the job has reached the CONCLUDED status.
38
+{
35
+ * 0 on success, -errno on failure.
39
+ if (is_write) {
36
+ */
40
+ return bdrv_co_pwritev(child, offset, qiov->size, qiov, flags);
41
+ } else {
42
+ return bdrv_co_preadv(child, offset, qiov->size, qiov, flags);
43
+ }
44
+}
45
+
46
static int coroutine_fn bdrv_rw_co_entry(void *opaque)
47
{
48
RwCo *rwco = opaque;
49
50
- if (!rwco->is_write) {
51
- return bdrv_co_preadv(rwco->child, rwco->offset,
52
- rwco->qiov->size, rwco->qiov,
53
- rwco->flags);
54
- } else {
55
- return bdrv_co_pwritev(rwco->child, rwco->offset,
56
- rwco->qiov->size, rwco->qiov,
57
- rwco->flags);
58
- }
59
+ return bdrv_co_prwv(rwco->child, rwco->offset, rwco->qiov,
60
+ rwco->is_write, rwco->flags);
61
}
62
63
/*
64
* Process a vectored synchronous request using coroutines
65
*/
66
-static int bdrv_prwv_co(BdrvChild *child, int64_t offset,
67
- QEMUIOVector *qiov, bool is_write,
68
- BdrvRequestFlags flags)
69
+static int bdrv_prwv(BdrvChild *child, int64_t offset,
70
+ QEMUIOVector *qiov, bool is_write,
71
+ BdrvRequestFlags flags)
72
{
73
RwCo rwco = {
74
.child = child,
75
@@ -XXX,XX +XXX,XX @@ int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
76
{
77
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, bytes);
78
79
- return bdrv_prwv_co(child, offset, &qiov, true,
80
- BDRV_REQ_ZERO_WRITE | flags);
81
+ return bdrv_prwv(child, offset, &qiov, true, BDRV_REQ_ZERO_WRITE | flags);
82
}
83
84
/*
85
@@ -XXX,XX +XXX,XX @@ int bdrv_preadv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
86
{
37
int ret;
87
int ret;
38
88
39
/**
89
- ret = bdrv_prwv_co(child, offset, qiov, false, 0);
40
@@ -XXX,XX +XXX,XX @@ struct JobDriver {
90
+ ret = bdrv_prwv(child, offset, qiov, false, 0);
41
/** Enum describing the operation */
91
if (ret < 0) {
42
JobType job_type;
92
return ret;
43
44
- /** Mandatory: Entrypoint for the Coroutine. */
45
+ /**
46
+ * Mandatory: Entrypoint for the Coroutine.
47
+ *
48
+ * This callback will be invoked when moving from CREATED to RUNNING.
49
+ *
50
+ * If this callback returns nonzero, the job transaction it is part of is
51
+ * aborted. If it returns zero, the job moves into the WAITING state. If it
52
+ * is the last job to complete in its transaction, all jobs in the
53
+ * transaction move from WAITING to PENDING.
54
+ */
55
int coroutine_fn (*run)(Job *job, Error **errp);
56
57
/**
58
@@ -XXX,XX +XXX,XX @@ void job_early_fail(Job *job);
59
/** Moves the @job from RUNNING to READY */
60
void job_transition_to_ready(Job *job);
61
62
-/**
63
- * @job: The job being completed.
64
- * @ret: The status code.
65
- *
66
- * Marks @job as completed. If @ret is non-zero, the job transaction it is part
67
- * of is aborted. If @ret is zero, the job moves into the WAITING state. If it
68
- * is the last job to complete in its transaction, all jobs in the transaction
69
- * move from WAITING to PENDING.
70
- */
71
-void job_completed(Job *job, int ret);
72
-
73
/** Asynchronously complete the specified @job. */
74
void job_complete(Job *job, Error **errp);
75
76
diff --git a/job.c b/job.c
77
index XXXXXXX..XXXXXXX 100644
78
--- a/job.c
79
+++ b/job.c
80
@@ -XXX,XX +XXX,XX @@ void job_drain(Job *job)
81
}
93
}
94
@@ -XXX,XX +XXX,XX @@ int bdrv_pwritev(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
95
{
96
int ret;
97
98
- ret = bdrv_prwv_co(child, offset, qiov, true, 0);
99
+ ret = bdrv_prwv(child, offset, qiov, true, 0);
100
if (ret < 0) {
101
return ret;
102
}
103
@@ -XXX,XX +XXX,XX @@ early_out:
104
return ret;
82
}
105
}
83
106
84
+static void job_completed(Job *job);
107
-static int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs,
85
+
108
- BlockDriverState *base,
86
static void job_exit(void *opaque)
109
- bool want_zero,
110
- int64_t offset,
111
- int64_t bytes,
112
- int64_t *pnum,
113
- int64_t *map,
114
- BlockDriverState **file)
115
+static int coroutine_fn
116
+bdrv_co_common_block_status_above(BlockDriverState *bs,
117
+ BlockDriverState *base,
118
+ bool want_zero,
119
+ int64_t offset,
120
+ int64_t bytes,
121
+ int64_t *pnum,
122
+ int64_t *map,
123
+ BlockDriverState **file)
87
{
124
{
88
Job *job = (Job *)opaque;
125
BlockDriverState *p;
89
@@ -XXX,XX +XXX,XX @@ static void job_exit(void *opaque)
126
int ret = 0;
90
job->driver->exit(job);
127
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_block_status_above_co_entry(void *opaque)
91
aio_context_release(aio_context);
128
{
92
}
129
BdrvCoBlockStatusData *data = opaque;
93
- job_completed(job, job->ret);
130
94
+ job_completed(job);
131
- return bdrv_co_block_status_above(data->bs, data->base,
132
- data->want_zero,
133
- data->offset, data->bytes,
134
- data->pnum, data->map, data->file);
135
+ return bdrv_co_common_block_status_above(data->bs, data->base,
136
+ data->want_zero,
137
+ data->offset, data->bytes,
138
+ data->pnum, data->map, data->file);
95
}
139
}
96
140
97
/**
141
/*
98
@@ -XXX,XX +XXX,XX @@ static void job_completed_txn_success(Job *job)
99
}
100
}
101
102
-void job_completed(Job *job, int ret)
103
+static void job_completed(Job *job)
104
{
105
assert(job && job->txn && !job_is_completed(job));
106
107
- job->ret = ret;
108
job_update_rc(job);
109
- trace_job_completed(job, ret, job->ret);
110
+ trace_job_completed(job, job->ret);
111
if (job->ret) {
112
job_completed_txn_abort(job);
113
} else {
114
@@ -XXX,XX +XXX,XX @@ void job_cancel(Job *job, bool force)
115
}
116
job_cancel_async(job, force);
117
if (!job_started(job)) {
118
- job_completed(job, -ECANCELED);
119
+ job_completed(job);
120
} else if (job->deferred_to_main_loop) {
121
job_completed_txn_abort(job);
122
} else {
123
diff --git a/trace-events b/trace-events
124
index XXXXXXX..XXXXXXX 100644
125
--- a/trace-events
126
+++ b/trace-events
127
@@ -XXX,XX +XXX,XX @@ gdbstub_err_checksum_incorrect(uint8_t expected, uint8_t got) "got command packe
128
# job.c
129
job_state_transition(void *job, int ret, const char *legal, const char *s0, const char *s1) "job %p (ret: %d) attempting %s transition (%s-->%s)"
130
job_apply_verb(void *job, const char *state, const char *verb, const char *legal) "job %p in state %s; applying verb %s (%s)"
131
-job_completed(void *job, int ret, int jret) "job %p ret %d corrected ret %d"
132
+job_completed(void *job, int ret) "job %p ret %d"
133
134
# job-qmp.c
135
qmp_job_cancel(void *job) "job %p"
136
--
142
--
137
2.17.1
143
2.26.2
138
144
139
diff view generated by jsdifflib
1
From: John Snow <jsnow@redhat.com>
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
2
3
Change the manual deferment to commit_complete into the implicit
3
We are going to keep coroutine-wrappers code (structure-packing
4
callback to job_exit, renaming commit_complete to commit_exit.
4
parameters, BDRV_POLL wrapper functions) in separate auto-generated
5
5
files. So, we'll need a header with declaration of original _co_
6
This conversion does change the timing of when job_completed is
6
functions, for those which are static now. As well, we'll need
7
called to after the bdrv_replace_node and bdrv_unref calls, which
7
declarations for wrapper functions. Do these declarations now, as a
8
could have implications for bjob->blk which will now be put down
8
preparation step.
9
after this cleanup.
9
10
10
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
11
Kevin highlights that we did not take any permissions for that backend
11
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
12
at job creation time, so it is safe to reorder these operations.
12
Reviewed-by: Eric Blake <eblake@redhat.com>
13
13
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
14
Signed-off-by: John Snow <jsnow@redhat.com>
14
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
15
Reviewed-by: Max Reitz <mreitz@redhat.com>
15
Message-Id: <20200924185414.28642-4-vsementsov@virtuozzo.com>
16
Message-id: 20180830015734.19765-5-jsnow@redhat.com
17
Reviewed-by: Jeff Cody <jcody@redhat.com>
18
Signed-off-by: Max Reitz <mreitz@redhat.com>
19
---
16
---
20
block/commit.c | 22 +++++-----------------
17
block/coroutines.h | 67 ++++++++++++++++++++++++++++++++++++++++++++++
21
1 file changed, 5 insertions(+), 17 deletions(-)
18
block.c | 8 +++---
22
19
block/io.c | 34 +++++++++++------------
23
diff --git a/block/commit.c b/block/commit.c
20
3 files changed, 88 insertions(+), 21 deletions(-)
21
create mode 100644 block/coroutines.h
22
23
diff --git a/block/coroutines.h b/block/coroutines.h
24
new file mode 100644
25
index XXXXXXX..XXXXXXX
26
--- /dev/null
27
+++ b/block/coroutines.h
28
@@ -XXX,XX +XXX,XX @@
29
+/*
30
+ * Block layer I/O functions
31
+ *
32
+ * Copyright (c) 2003 Fabrice Bellard
33
+ *
34
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
35
+ * of this software and associated documentation files (the "Software"), to deal
36
+ * in the Software without restriction, including without limitation the rights
37
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
38
+ * copies of the Software, and to permit persons to whom the Software is
39
+ * furnished to do so, subject to the following conditions:
40
+ *
41
+ * The above copyright notice and this permission notice shall be included in
42
+ * all copies or substantial portions of the Software.
43
+ *
44
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
45
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
46
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
47
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
48
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
49
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
50
+ * THE SOFTWARE.
51
+ */
52
+
53
+#ifndef BLOCK_COROUTINES_INT_H
54
+#define BLOCK_COROUTINES_INT_H
55
+
56
+#include "block/block_int.h"
57
+
58
+int coroutine_fn bdrv_co_check(BlockDriverState *bs,
59
+ BdrvCheckResult *res, BdrvCheckMode fix);
60
+int coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs, Error **errp);
61
+
62
+int coroutine_fn
63
+bdrv_co_prwv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov,
64
+ bool is_write, BdrvRequestFlags flags);
65
+int
66
+bdrv_prwv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov,
67
+ bool is_write, BdrvRequestFlags flags);
68
+
69
+int coroutine_fn
70
+bdrv_co_common_block_status_above(BlockDriverState *bs,
71
+ BlockDriverState *base,
72
+ bool want_zero,
73
+ int64_t offset,
74
+ int64_t bytes,
75
+ int64_t *pnum,
76
+ int64_t *map,
77
+ BlockDriverState **file);
78
+int
79
+bdrv_common_block_status_above(BlockDriverState *bs,
80
+ BlockDriverState *base,
81
+ bool want_zero,
82
+ int64_t offset,
83
+ int64_t bytes,
84
+ int64_t *pnum,
85
+ int64_t *map,
86
+ BlockDriverState **file);
87
+
88
+int coroutine_fn
89
+bdrv_co_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
90
+ bool is_read);
91
+int
92
+bdrv_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
93
+ bool is_read);
94
+
95
+#endif /* BLOCK_COROUTINES_INT_H */
96
diff --git a/block.c b/block.c
24
index XXXXXXX..XXXXXXX 100644
97
index XXXXXXX..XXXXXXX 100644
25
--- a/block/commit.c
98
--- a/block.c
26
+++ b/block/commit.c
99
+++ b/block.c
27
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn commit_populate(BlockBackend *bs, BlockBackend *base,
100
@@ -XXX,XX +XXX,XX @@
28
return 0;
101
#include "qemu/timer.h"
102
#include "qemu/cutils.h"
103
#include "qemu/id.h"
104
+#include "block/coroutines.h"
105
106
#ifdef CONFIG_BSD
107
#include <sys/ioctl.h>
108
@@ -XXX,XX +XXX,XX @@ static void bdrv_delete(BlockDriverState *bs)
109
* free of errors) or -errno when an internal error occurred. The results of the
110
* check are stored in res.
111
*/
112
-static int coroutine_fn bdrv_co_check(BlockDriverState *bs,
113
- BdrvCheckResult *res, BdrvCheckMode fix)
114
+int coroutine_fn bdrv_co_check(BlockDriverState *bs,
115
+ BdrvCheckResult *res, BdrvCheckMode fix)
116
{
117
if (bs->drv == NULL) {
118
return -ENOMEDIUM;
119
@@ -XXX,XX +XXX,XX @@ void bdrv_init_with_whitelist(void)
120
bdrv_init();
29
}
121
}
30
122
31
-typedef struct {
123
-static int coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs,
32
- int ret;
124
- Error **errp)
33
-} CommitCompleteData;
125
+int coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs, Error **errp)
34
-
126
{
35
-static void commit_complete(Job *job, void *opaque)
127
BdrvChild *child, *parent;
36
+static void commit_exit(Job *job)
128
uint64_t perm, shared_perm;
37
{
129
diff --git a/block/io.c b/block/io.c
38
CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
130
index XXXXXXX..XXXXXXX 100644
39
BlockJob *bjob = &s->common;
131
--- a/block/io.c
40
- CommitCompleteData *data = opaque;
132
+++ b/block/io.c
41
BlockDriverState *top = blk_bs(s->top);
133
@@ -XXX,XX +XXX,XX @@
42
BlockDriverState *base = blk_bs(s->base);
134
#include "block/blockjob.h"
43
BlockDriverState *commit_top_bs = s->commit_top_bs;
135
#include "block/blockjob_int.h"
44
- int ret = data->ret;
136
#include "block/block_int.h"
45
bool remove_commit_top_bs = false;
137
+#include "block/coroutines.h"
46
138
#include "qemu/cutils.h"
47
/* Make sure commit_top_bs and top stay around until bdrv_replace_node() */
139
#include "qapi/error.h"
48
@@ -XXX,XX +XXX,XX @@ static void commit_complete(Job *job, void *opaque)
140
#include "qemu/error-report.h"
49
* the normal backing chain can be restored. */
141
@@ -XXX,XX +XXX,XX @@ typedef struct RwCo {
50
blk_unref(s->base);
142
BdrvRequestFlags flags;
51
143
} RwCo;
52
- if (!job_is_cancelled(job) && ret == 0) {
144
53
+ if (!job_is_cancelled(job) && job->ret == 0) {
145
-static int coroutine_fn bdrv_co_prwv(BdrvChild *child, int64_t offset,
54
/* success */
146
- QEMUIOVector *qiov, bool is_write,
55
- ret = bdrv_drop_intermediate(s->commit_top_bs, base,
147
- BdrvRequestFlags flags)
56
- s->backing_file_str);
148
+int coroutine_fn bdrv_co_prwv(BdrvChild *child, int64_t offset,
57
+ job->ret = bdrv_drop_intermediate(s->commit_top_bs, base,
149
+ QEMUIOVector *qiov, bool is_write,
58
+ s->backing_file_str);
150
+ BdrvRequestFlags flags)
59
} else {
151
{
60
/* XXX Can (or should) we somehow keep 'consistent read' blocked even
152
if (is_write) {
61
* after the failed/cancelled commit job is gone? If we already wrote
153
return bdrv_co_pwritev(child, offset, qiov->size, qiov, flags);
62
@@ -XXX,XX +XXX,XX @@ static void commit_complete(Job *job, void *opaque)
154
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_rw_co_entry(void *opaque)
63
* bdrv_set_backing_hd() to fail. */
155
/*
64
block_job_remove_all_bdrv(bjob);
156
* Process a vectored synchronous request using coroutines
65
157
*/
66
- job_completed(job, ret);
158
-static int bdrv_prwv(BdrvChild *child, int64_t offset,
67
- g_free(data);
159
- QEMUIOVector *qiov, bool is_write,
68
-
160
- BdrvRequestFlags flags)
69
/* If bdrv_drop_intermediate() didn't already do that, remove the commit
161
+int bdrv_prwv(BdrvChild *child, int64_t offset,
70
* filter driver from the backing chain. Do this as the final step so that
162
+ QEMUIOVector *qiov, bool is_write,
71
* the 'consistent read' permission can be granted. */
163
+ BdrvRequestFlags flags)
72
@@ -XXX,XX +XXX,XX @@ static void commit_complete(Job *job, void *opaque)
164
{
73
static int coroutine_fn commit_run(Job *job, Error **errp)
165
RwCo rwco = {
74
{
166
.child = child,
75
CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
167
@@ -XXX,XX +XXX,XX @@ early_out:
76
- CommitCompleteData *data;
77
int64_t offset;
78
uint64_t delay_ns = 0;
79
int ret = 0;
80
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn commit_run(Job *job, Error **errp)
81
out:
82
qemu_vfree(buf);
83
84
- data = g_malloc(sizeof(*data));
85
- data->ret = ret;
86
- job_defer_to_main_loop(&s->common.job, commit_complete, data);
87
return ret;
168
return ret;
88
}
169
}
89
170
90
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver commit_job_driver = {
171
-static int coroutine_fn
91
.user_resume = block_job_user_resume,
172
+int coroutine_fn
92
.drain = block_job_drain,
173
bdrv_co_common_block_status_above(BlockDriverState *bs,
93
.run = commit_run,
174
BlockDriverState *base,
94
+ .exit = commit_exit,
175
bool want_zero,
95
},
176
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_block_status_above_co_entry(void *opaque)
96
};
177
*
97
178
* See bdrv_co_block_status_above() for details.
179
*/
180
-static int bdrv_common_block_status_above(BlockDriverState *bs,
181
- BlockDriverState *base,
182
- bool want_zero, int64_t offset,
183
- int64_t bytes, int64_t *pnum,
184
- int64_t *map,
185
- BlockDriverState **file)
186
+int bdrv_common_block_status_above(BlockDriverState *bs,
187
+ BlockDriverState *base,
188
+ bool want_zero, int64_t offset,
189
+ int64_t bytes, int64_t *pnum,
190
+ int64_t *map,
191
+ BlockDriverState **file)
192
{
193
BdrvCoBlockStatusData data = {
194
.bs = bs,
195
@@ -XXX,XX +XXX,XX @@ typedef struct BdrvVmstateCo {
196
bool is_read;
197
} BdrvVmstateCo;
198
199
-static int coroutine_fn
200
+int coroutine_fn
201
bdrv_co_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
202
bool is_read)
203
{
204
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_co_rw_vmstate_entry(void *opaque)
205
return bdrv_co_rw_vmstate(co->bs, co->qiov, co->pos, co->is_read);
206
}
207
208
-static inline int
209
-bdrv_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
210
- bool is_read)
211
+int bdrv_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
212
+ bool is_read)
213
{
214
BdrvVmstateCo data = {
215
.bs = bs,
98
--
216
--
99
2.17.1
217
2.26.2
100
218
101
diff view generated by jsdifflib
New patch
1
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
3
We have a very frequent pattern of creating a coroutine from a function
4
with several arguments:
5
6
- create a structure to pack parameters
7
- create _entry function to call original function taking parameters
8
from struct
9
- do different magic to handle completion: set ret to NOT_DONE or
10
EINPROGRESS or use separate bool field
11
- fill the struct and create coroutine from _entry function with this
12
struct as a parameter
13
- do coroutine enter and BDRV_POLL_WHILE loop
14
15
Let's reduce code duplication by generating coroutine wrappers.
16
17
This patch adds scripts/block-coroutine-wrapper.py together with some
18
friends, which will generate functions with declared prototypes marked
19
by the 'generated_co_wrapper' specifier.
20
21
The usage of new code generation is as follows:
22
23
1. define the coroutine function somewhere
24
25
int coroutine_fn bdrv_co_NAME(...) {...}
26
27
2. declare in some header file
28
29
int generated_co_wrapper bdrv_NAME(...);
30
31
with same list of parameters (generated_co_wrapper is
32
defined in "include/block/block.h").
33
34
3. Make sure the block_gen_c declaration in block/meson.build
35
mentions the file with your marker function.
36
37
Still, no function is now marked, this work is for the following
38
commit.
39
40
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
41
Reviewed-by: Eric Blake <eblake@redhat.com>
42
Message-Id: <20200924185414.28642-5-vsementsov@virtuozzo.com>
43
[Added encoding='utf-8' to open() calls as requested by Vladimir. Fixed
44
typo and grammar issues pointed out by Eric Blake. Removed clang-format
45
dependency that caused build test issues.
46
--Stefan]
47
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
48
---
49
block/block-gen.h | 49 ++++++++
50
include/block/block.h | 10 ++
51
block/meson.build | 8 ++
52
docs/devel/block-coroutine-wrapper.rst | 54 ++++++++
53
docs/devel/index.rst | 1 +
54
scripts/block-coroutine-wrapper.py | 167 +++++++++++++++++++++++++
55
6 files changed, 289 insertions(+)
56
create mode 100644 block/block-gen.h
57
create mode 100644 docs/devel/block-coroutine-wrapper.rst
58
create mode 100644 scripts/block-coroutine-wrapper.py
59
60
diff --git a/block/block-gen.h b/block/block-gen.h
61
new file mode 100644
62
index XXXXXXX..XXXXXXX
63
--- /dev/null
64
+++ b/block/block-gen.h
65
@@ -XXX,XX +XXX,XX @@
66
+/*
67
+ * Block coroutine wrapping core, used by auto-generated block/block-gen.c
68
+ *
69
+ * Copyright (c) 2003 Fabrice Bellard
70
+ * Copyright (c) 2020 Virtuozzo International GmbH
71
+ *
72
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
73
+ * of this software and associated documentation files (the "Software"), to deal
74
+ * in the Software without restriction, including without limitation the rights
75
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
76
+ * copies of the Software, and to permit persons to whom the Software is
77
+ * furnished to do so, subject to the following conditions:
78
+ *
79
+ * The above copyright notice and this permission notice shall be included in
80
+ * all copies or substantial portions of the Software.
81
+ *
82
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
83
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
84
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
85
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
86
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
87
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
88
+ * THE SOFTWARE.
89
+ */
90
+
91
+#ifndef BLOCK_BLOCK_GEN_H
92
+#define BLOCK_BLOCK_GEN_H
93
+
94
+#include "block/block_int.h"
95
+
96
+/* Base structure for argument packing structures */
97
+typedef struct BdrvPollCo {
98
+ BlockDriverState *bs;
99
+ bool in_progress;
100
+ int ret;
101
+ Coroutine *co; /* Keep pointer here for debugging */
102
+} BdrvPollCo;
103
+
104
+static inline int bdrv_poll_co(BdrvPollCo *s)
105
+{
106
+ assert(!qemu_in_coroutine());
107
+
108
+ bdrv_coroutine_enter(s->bs, s->co);
109
+ BDRV_POLL_WHILE(s->bs, s->in_progress);
110
+
111
+ return s->ret;
112
+}
113
+
114
+#endif /* BLOCK_BLOCK_GEN_H */
115
diff --git a/include/block/block.h b/include/block/block.h
116
index XXXXXXX..XXXXXXX 100644
117
--- a/include/block/block.h
118
+++ b/include/block/block.h
119
@@ -XXX,XX +XXX,XX @@
120
#include "block/blockjob.h"
121
#include "qemu/hbitmap.h"
122
123
+/*
124
+ * generated_co_wrapper
125
+ *
126
+ * Function specifier, which does nothing but mark functions to be
127
+ * generated by scripts/block-coroutine-wrapper.py
128
+ *
129
+ * Read more in docs/devel/block-coroutine-wrapper.rst
130
+ */
131
+#define generated_co_wrapper
132
+
133
/* block.c */
134
typedef struct BlockDriver BlockDriver;
135
typedef struct BdrvChild BdrvChild;
136
diff --git a/block/meson.build b/block/meson.build
137
index XXXXXXX..XXXXXXX 100644
138
--- a/block/meson.build
139
+++ b/block/meson.build
140
@@ -XXX,XX +XXX,XX @@ module_block_h = custom_target('module_block.h',
141
command: [module_block_py, '@OUTPUT0@', modsrc])
142
block_ss.add(module_block_h)
143
144
+wrapper_py = find_program('../scripts/block-coroutine-wrapper.py')
145
+block_gen_c = custom_target('block-gen.c',
146
+ output: 'block-gen.c',
147
+ input: files('../include/block/block.h',
148
+ 'coroutines.h'),
149
+ command: [wrapper_py, '@OUTPUT@', '@INPUT@'])
150
+block_ss.add(block_gen_c)
151
+
152
block_ss.add(files('stream.c'))
153
154
softmmu_ss.add(files('qapi-sysemu.c'))
155
diff --git a/docs/devel/block-coroutine-wrapper.rst b/docs/devel/block-coroutine-wrapper.rst
156
new file mode 100644
157
index XXXXXXX..XXXXXXX
158
--- /dev/null
159
+++ b/docs/devel/block-coroutine-wrapper.rst
160
@@ -XXX,XX +XXX,XX @@
161
+=======================
162
+block-coroutine-wrapper
163
+=======================
164
+
165
+A lot of functions in QEMU block layer (see ``block/*``) can only be
166
+called in coroutine context. Such functions are normally marked by the
167
+coroutine_fn specifier. Still, sometimes we need to call them from
168
+non-coroutine context; for this we need to start a coroutine, run the
169
+needed function from it and wait for the coroutine to finish in a
170
+BDRV_POLL_WHILE() loop. To run a coroutine we need a function with one
171
+void* argument. So for each coroutine_fn function which needs a
172
+non-coroutine interface, we should define a structure to pack the
173
+parameters, define a separate function to unpack the parameters and
174
+call the original function and finally define a new interface function
175
+with same list of arguments as original one, which will pack the
176
+parameters into a struct, create a coroutine, run it and wait in
177
+BDRV_POLL_WHILE() loop. It's boring to create such wrappers by hand,
178
+so we have a script to generate them.
179
+
180
+Usage
181
+=====
182
+
183
+Assume we have defined the ``coroutine_fn`` function
184
+``bdrv_co_foo(<some args>)`` and need a non-coroutine interface for it,
185
+called ``bdrv_foo(<same args>)``. In this case the script can help. To
186
+trigger the generation:
187
+
188
+1. You need ``bdrv_foo`` declaration somewhere (for example, in
189
+ ``block/coroutines.h``) with the ``generated_co_wrapper`` mark,
190
+ like this:
191
+
192
+.. code-block:: c
193
+
194
+ int generated_co_wrapper bdrv_foo(<some args>);
195
+
196
+2. You need to feed this declaration to block-coroutine-wrapper script.
197
+ For this, add the .h (or .c) file with the declaration to the
198
+ ``input: files(...)`` list of ``block_gen_c`` target declaration in
199
+ ``block/meson.build``
200
+
201
+You are done. During the build, coroutine wrappers will be generated in
202
+``<BUILD_DIR>/block/block-gen.c``.
203
+
204
+Links
205
+=====
206
+
207
+1. The script location is ``scripts/block-coroutine-wrapper.py``.
208
+
209
+2. Generic place for private ``generated_co_wrapper`` declarations is
210
+ ``block/coroutines.h``, for public declarations:
211
+ ``include/block/block.h``
212
+
213
+3. The core API of generated coroutine wrappers is placed in
214
+ (not generated) ``block/block-gen.h``
215
diff --git a/docs/devel/index.rst b/docs/devel/index.rst
216
index XXXXXXX..XXXXXXX 100644
217
--- a/docs/devel/index.rst
218
+++ b/docs/devel/index.rst
219
@@ -XXX,XX +XXX,XX @@ Contents:
220
s390-dasd-ipl
221
clocks
222
qom
223
+ block-coroutine-wrapper
224
diff --git a/scripts/block-coroutine-wrapper.py b/scripts/block-coroutine-wrapper.py
225
new file mode 100644
226
index XXXXXXX..XXXXXXX
227
--- /dev/null
228
+++ b/scripts/block-coroutine-wrapper.py
229
@@ -XXX,XX +XXX,XX @@
230
+#! /usr/bin/env python3
231
+"""Generate coroutine wrappers for block subsystem.
232
+
233
+The program parses one or several concatenated c files from stdin,
234
+searches for functions with the 'generated_co_wrapper' specifier
235
+and generates corresponding wrappers on stdout.
236
+
237
+Usage: block-coroutine-wrapper.py generated-file.c FILE.[ch]...
238
+
239
+Copyright (c) 2020 Virtuozzo International GmbH.
240
+
241
+This program is free software; you can redistribute it and/or modify
242
+it under the terms of the GNU General Public License as published by
243
+the Free Software Foundation; either version 2 of the License, or
244
+(at your option) any later version.
245
+
246
+This program is distributed in the hope that it will be useful,
247
+but WITHOUT ANY WARRANTY; without even the implied warranty of
248
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
249
+GNU General Public License for more details.
250
+
251
+You should have received a copy of the GNU General Public License
252
+along with this program. If not, see <http://www.gnu.org/licenses/>.
253
+"""
254
+
255
+import sys
256
+import re
257
+from typing import Iterator
258
+
259
+
260
+def gen_header():
261
+ copyright = re.sub('^.*Copyright', 'Copyright', __doc__, flags=re.DOTALL)
262
+ copyright = re.sub('^(?=.)', ' * ', copyright.strip(), flags=re.MULTILINE)
263
+ copyright = re.sub('^$', ' *', copyright, flags=re.MULTILINE)
264
+ return f"""\
265
+/*
266
+ * File is generated by scripts/block-coroutine-wrapper.py
267
+ *
268
+{copyright}
269
+ */
270
+
271
+#include "qemu/osdep.h"
272
+#include "block/coroutines.h"
273
+#include "block/block-gen.h"
274
+#include "block/block_int.h"\
275
+"""
276
+
277
+
278
+class ParamDecl:
279
+ param_re = re.compile(r'(?P<decl>'
280
+ r'(?P<type>.*[ *])'
281
+ r'(?P<name>[a-z][a-z0-9_]*)'
282
+ r')')
283
+
284
+ def __init__(self, param_decl: str) -> None:
285
+ m = self.param_re.match(param_decl.strip())
286
+ if m is None:
287
+ raise ValueError(f'Wrong parameter declaration: "{param_decl}"')
288
+ self.decl = m.group('decl')
289
+ self.type = m.group('type')
290
+ self.name = m.group('name')
291
+
292
+
293
+class FuncDecl:
294
+ def __init__(self, return_type: str, name: str, args: str) -> None:
295
+ self.return_type = return_type.strip()
296
+ self.name = name.strip()
297
+ self.args = [ParamDecl(arg.strip()) for arg in args.split(',')]
298
+
299
+ def gen_list(self, format: str) -> str:
300
+ return ', '.join(format.format_map(arg.__dict__) for arg in self.args)
301
+
302
+ def gen_block(self, format: str) -> str:
303
+ return '\n'.join(format.format_map(arg.__dict__) for arg in self.args)
304
+
305
+
306
+# Match wrappers declared with a generated_co_wrapper mark
307
+func_decl_re = re.compile(r'^int\s*generated_co_wrapper\s*'
308
+ r'(?P<wrapper_name>[a-z][a-z0-9_]*)'
309
+ r'\((?P<args>[^)]*)\);$', re.MULTILINE)
310
+
311
+
312
+def func_decl_iter(text: str) -> Iterator:
313
+ for m in func_decl_re.finditer(text):
314
+ yield FuncDecl(return_type='int',
315
+ name=m.group('wrapper_name'),
316
+ args=m.group('args'))
317
+
318
+
319
+def snake_to_camel(func_name: str) -> str:
320
+ """
321
+ Convert underscore names like 'some_function_name' to camel-case like
322
+ 'SomeFunctionName'
323
+ """
324
+ words = func_name.split('_')
325
+ words = [w[0].upper() + w[1:] for w in words]
326
+ return ''.join(words)
327
+
328
+
329
+def gen_wrapper(func: FuncDecl) -> str:
330
+ assert func.name.startswith('bdrv_')
331
+ assert not func.name.startswith('bdrv_co_')
332
+ assert func.return_type == 'int'
333
+ assert func.args[0].type in ['BlockDriverState *', 'BdrvChild *']
334
+
335
+ name = 'bdrv_co_' + func.name[5:]
336
+ bs = 'bs' if func.args[0].type == 'BlockDriverState *' else 'child->bs'
337
+ struct_name = snake_to_camel(name)
338
+
339
+ return f"""\
340
+/*
341
+ * Wrappers for {name}
342
+ */
343
+
344
+typedef struct {struct_name} {{
345
+ BdrvPollCo poll_state;
346
+{ func.gen_block(' {decl};') }
347
+}} {struct_name};
348
+
349
+static void coroutine_fn {name}_entry(void *opaque)
350
+{{
351
+ {struct_name} *s = opaque;
352
+
353
+ s->poll_state.ret = {name}({ func.gen_list('s->{name}') });
354
+ s->poll_state.in_progress = false;
355
+
356
+ aio_wait_kick();
357
+}}
358
+
359
+int {func.name}({ func.gen_list('{decl}') })
360
+{{
361
+ if (qemu_in_coroutine()) {{
362
+ return {name}({ func.gen_list('{name}') });
363
+ }} else {{
364
+ {struct_name} s = {{
365
+ .poll_state.bs = {bs},
366
+ .poll_state.in_progress = true,
367
+
368
+{ func.gen_block(' .{name} = {name},') }
369
+ }};
370
+
371
+ s.poll_state.co = qemu_coroutine_create({name}_entry, &s);
372
+
373
+ return bdrv_poll_co(&s.poll_state);
374
+ }}
375
+}}"""
376
+
377
+
378
+def gen_wrappers(input_code: str) -> str:
379
+ res = ''
380
+ for func in func_decl_iter(input_code):
381
+ res += '\n\n\n'
382
+ res += gen_wrapper(func)
383
+
384
+ return res
385
+
386
+
387
+if __name__ == '__main__':
388
+ if len(sys.argv) < 3:
389
+ exit(f'Usage: {sys.argv[0]} OUT_FILE.c IN_FILE.[ch]...')
390
+
391
+ with open(sys.argv[1], 'w', encoding='utf-8') as f_out:
392
+ f_out.write(gen_header())
393
+ for fname in sys.argv[2:]:
394
+ with open(fname, encoding='utf-8') as f_in:
395
+ f_out.write(gen_wrappers(f_in.read()))
396
+ f_out.write('\n')
397
--
398
2.26.2
399
diff view generated by jsdifflib
1
From: John Snow <jsnow@redhat.com>
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
2
3
Now that the job infrastructure is handling the job_completed call for
3
Use code generation implemented in previous commit to generated
4
all implemented jobs, we can remove the interface that allowed jobs to
4
coroutine wrappers in block.c and block/io.c
5
schedule their own completion.
6
5
7
Signed-off-by: John Snow <jsnow@redhat.com>
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
8
Reviewed-by: Max Reitz <mreitz@redhat.com>
7
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
9
Message-id: 20180830015734.19765-10-jsnow@redhat.com
8
Reviewed-by: Eric Blake <eblake@redhat.com>
10
Signed-off-by: Max Reitz <mreitz@redhat.com>
9
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
10
Message-Id: <20200924185414.28642-6-vsementsov@virtuozzo.com>
11
---
11
---
12
include/qemu/job.h | 17 -----------------
12
block/coroutines.h | 6 +-
13
job.c | 40 ++--------------------------------------
13
include/block/block.h | 16 ++--
14
2 files changed, 2 insertions(+), 55 deletions(-)
14
block.c | 73 ---------------
15
block/io.c | 212 ------------------------------------------
16
4 files changed, 13 insertions(+), 294 deletions(-)
15
17
16
diff --git a/include/qemu/job.h b/include/qemu/job.h
18
diff --git a/block/coroutines.h b/block/coroutines.h
17
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
18
--- a/include/qemu/job.h
20
--- a/block/coroutines.h
19
+++ b/include/qemu/job.h
21
+++ b/block/coroutines.h
20
@@ -XXX,XX +XXX,XX @@ void job_finalize(Job *job, Error **errp);
22
@@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs, Error **errp);
21
*/
23
int coroutine_fn
22
void job_dismiss(Job **job, Error **errp);
24
bdrv_co_prwv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov,
23
25
bool is_write, BdrvRequestFlags flags);
24
-typedef void JobDeferToMainLoopFn(Job *job, void *opaque);
26
-int
25
-
27
+int generated_co_wrapper
26
-/**
28
bdrv_prwv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov,
27
- * @job: The job
29
bool is_write, BdrvRequestFlags flags);
28
- * @fn: The function to run in the main loop
30
29
- * @opaque: The opaque value that is passed to @fn
31
@@ -XXX,XX +XXX,XX @@ bdrv_co_common_block_status_above(BlockDriverState *bs,
32
int64_t *pnum,
33
int64_t *map,
34
BlockDriverState **file);
35
-int
36
+int generated_co_wrapper
37
bdrv_common_block_status_above(BlockDriverState *bs,
38
BlockDriverState *base,
39
bool want_zero,
40
@@ -XXX,XX +XXX,XX @@ bdrv_common_block_status_above(BlockDriverState *bs,
41
int coroutine_fn
42
bdrv_co_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
43
bool is_read);
44
-int
45
+int generated_co_wrapper
46
bdrv_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
47
bool is_read);
48
49
diff --git a/include/block/block.h b/include/block/block.h
50
index XXXXXXX..XXXXXXX 100644
51
--- a/include/block/block.h
52
+++ b/include/block/block.h
53
@@ -XXX,XX +XXX,XX @@ void bdrv_refresh_filename(BlockDriverState *bs);
54
int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact,
55
PreallocMode prealloc, BdrvRequestFlags flags,
56
Error **errp);
57
-int bdrv_truncate(BdrvChild *child, int64_t offset, bool exact,
58
- PreallocMode prealloc, BdrvRequestFlags flags, Error **errp);
59
+int generated_co_wrapper
60
+bdrv_truncate(BdrvChild *child, int64_t offset, bool exact,
61
+ PreallocMode prealloc, BdrvRequestFlags flags, Error **errp);
62
63
int64_t bdrv_nb_sectors(BlockDriverState *bs);
64
int64_t bdrv_getlength(BlockDriverState *bs);
65
@@ -XXX,XX +XXX,XX @@ typedef enum {
66
BDRV_FIX_ERRORS = 2,
67
} BdrvCheckMode;
68
69
-int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix);
70
+int generated_co_wrapper bdrv_check(BlockDriverState *bs, BdrvCheckResult *res,
71
+ BdrvCheckMode fix);
72
73
/* The units of offset and total_work_size may be chosen arbitrarily by the
74
* block driver; total_work_size may change during the course of the amendment
75
@@ -XXX,XX +XXX,XX @@ void bdrv_aio_cancel_async(BlockAIOCB *acb);
76
int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf);
77
78
/* Invalidate any cached metadata used by image formats */
79
-int bdrv_invalidate_cache(BlockDriverState *bs, Error **errp);
80
+int generated_co_wrapper bdrv_invalidate_cache(BlockDriverState *bs,
81
+ Error **errp);
82
void bdrv_invalidate_cache_all(Error **errp);
83
int bdrv_inactivate_all(void);
84
85
/* Ensure contents are flushed to disk. */
86
-int bdrv_flush(BlockDriverState *bs);
87
+int generated_co_wrapper bdrv_flush(BlockDriverState *bs);
88
int coroutine_fn bdrv_co_flush(BlockDriverState *bs);
89
int bdrv_flush_all(void);
90
void bdrv_close_all(void);
91
@@ -XXX,XX +XXX,XX @@ void bdrv_drain_all(void);
92
AIO_WAIT_WHILE(bdrv_get_aio_context(bs_), \
93
cond); })
94
95
-int bdrv_pdiscard(BdrvChild *child, int64_t offset, int64_t bytes);
96
+int generated_co_wrapper bdrv_pdiscard(BdrvChild *child, int64_t offset,
97
+ int64_t bytes);
98
int bdrv_co_pdiscard(BdrvChild *child, int64_t offset, int64_t bytes);
99
int bdrv_has_zero_init_1(BlockDriverState *bs);
100
int bdrv_has_zero_init(BlockDriverState *bs);
101
diff --git a/block.c b/block.c
102
index XXXXXXX..XXXXXXX 100644
103
--- a/block.c
104
+++ b/block.c
105
@@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_check(BlockDriverState *bs,
106
return bs->drv->bdrv_co_check(bs, res, fix);
107
}
108
109
-typedef struct CheckCo {
110
- BlockDriverState *bs;
111
- BdrvCheckResult *res;
112
- BdrvCheckMode fix;
113
- int ret;
114
-} CheckCo;
115
-
116
-static void coroutine_fn bdrv_check_co_entry(void *opaque)
117
-{
118
- CheckCo *cco = opaque;
119
- cco->ret = bdrv_co_check(cco->bs, cco->res, cco->fix);
120
- aio_wait_kick();
121
-}
122
-
123
-int bdrv_check(BlockDriverState *bs,
124
- BdrvCheckResult *res, BdrvCheckMode fix)
125
-{
126
- Coroutine *co;
127
- CheckCo cco = {
128
- .bs = bs,
129
- .res = res,
130
- .ret = -EINPROGRESS,
131
- .fix = fix,
132
- };
133
-
134
- if (qemu_in_coroutine()) {
135
- /* Fast-path if already in coroutine context */
136
- bdrv_check_co_entry(&cco);
137
- } else {
138
- co = qemu_coroutine_create(bdrv_check_co_entry, &cco);
139
- bdrv_coroutine_enter(bs, co);
140
- BDRV_POLL_WHILE(bs, cco.ret == -EINPROGRESS);
141
- }
142
-
143
- return cco.ret;
144
-}
145
-
146
/*
147
* Return values:
148
* 0 - success
149
@@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs, Error **errp)
150
return 0;
151
}
152
153
-typedef struct InvalidateCacheCo {
154
- BlockDriverState *bs;
155
- Error **errp;
156
- bool done;
157
- int ret;
158
-} InvalidateCacheCo;
159
-
160
-static void coroutine_fn bdrv_invalidate_cache_co_entry(void *opaque)
161
-{
162
- InvalidateCacheCo *ico = opaque;
163
- ico->ret = bdrv_co_invalidate_cache(ico->bs, ico->errp);
164
- ico->done = true;
165
- aio_wait_kick();
166
-}
167
-
168
-int bdrv_invalidate_cache(BlockDriverState *bs, Error **errp)
169
-{
170
- Coroutine *co;
171
- InvalidateCacheCo ico = {
172
- .bs = bs,
173
- .done = false,
174
- .errp = errp
175
- };
176
-
177
- if (qemu_in_coroutine()) {
178
- /* Fast-path if already in coroutine context */
179
- bdrv_invalidate_cache_co_entry(&ico);
180
- } else {
181
- co = qemu_coroutine_create(bdrv_invalidate_cache_co_entry, &ico);
182
- bdrv_coroutine_enter(bs, co);
183
- BDRV_POLL_WHILE(bs, !ico.done);
184
- }
185
-
186
- return ico.ret;
187
-}
188
-
189
void bdrv_invalidate_cache_all(Error **errp)
190
{
191
BlockDriverState *bs;
192
diff --git a/block/io.c b/block/io.c
193
index XXXXXXX..XXXXXXX 100644
194
--- a/block/io.c
195
+++ b/block/io.c
196
@@ -XXX,XX +XXX,XX @@ static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
197
return 0;
198
}
199
200
-typedef int coroutine_fn BdrvRequestEntry(void *opaque);
201
-typedef struct BdrvRunCo {
202
- BdrvRequestEntry *entry;
203
- void *opaque;
204
- int ret;
205
- bool done;
206
- Coroutine *co; /* Coroutine, running bdrv_run_co_entry, for debugging */
207
-} BdrvRunCo;
208
-
209
-static void coroutine_fn bdrv_run_co_entry(void *opaque)
210
-{
211
- BdrvRunCo *arg = opaque;
212
-
213
- arg->ret = arg->entry(arg->opaque);
214
- arg->done = true;
215
- aio_wait_kick();
216
-}
217
-
218
-static int bdrv_run_co(BlockDriverState *bs, BdrvRequestEntry *entry,
219
- void *opaque)
220
-{
221
- if (qemu_in_coroutine()) {
222
- /* Fast-path if already in coroutine context */
223
- return entry(opaque);
224
- } else {
225
- BdrvRunCo s = { .entry = entry, .opaque = opaque };
226
-
227
- s.co = qemu_coroutine_create(bdrv_run_co_entry, &s);
228
- bdrv_coroutine_enter(bs, s.co);
229
-
230
- BDRV_POLL_WHILE(bs, !s.done);
231
-
232
- return s.ret;
233
- }
234
-}
235
-
236
-typedef struct RwCo {
237
- BdrvChild *child;
238
- int64_t offset;
239
- QEMUIOVector *qiov;
240
- bool is_write;
241
- BdrvRequestFlags flags;
242
-} RwCo;
243
-
244
int coroutine_fn bdrv_co_prwv(BdrvChild *child, int64_t offset,
245
QEMUIOVector *qiov, bool is_write,
246
BdrvRequestFlags flags)
247
@@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_prwv(BdrvChild *child, int64_t offset,
248
}
249
}
250
251
-static int coroutine_fn bdrv_rw_co_entry(void *opaque)
252
-{
253
- RwCo *rwco = opaque;
254
-
255
- return bdrv_co_prwv(rwco->child, rwco->offset, rwco->qiov,
256
- rwco->is_write, rwco->flags);
257
-}
258
-
259
-/*
260
- * Process a vectored synchronous request using coroutines
261
- */
262
-int bdrv_prwv(BdrvChild *child, int64_t offset,
263
- QEMUIOVector *qiov, bool is_write,
264
- BdrvRequestFlags flags)
265
-{
266
- RwCo rwco = {
267
- .child = child,
268
- .offset = offset,
269
- .qiov = qiov,
270
- .is_write = is_write,
271
- .flags = flags,
272
- };
273
-
274
- return bdrv_run_co(child->bs, bdrv_rw_co_entry, &rwco);
275
-}
276
-
277
int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
278
int bytes, BdrvRequestFlags flags)
279
{
280
@@ -XXX,XX +XXX,XX @@ int bdrv_flush_all(void)
281
return result;
282
}
283
284
-
285
-typedef struct BdrvCoBlockStatusData {
286
- BlockDriverState *bs;
287
- BlockDriverState *base;
288
- bool want_zero;
289
- int64_t offset;
290
- int64_t bytes;
291
- int64_t *pnum;
292
- int64_t *map;
293
- BlockDriverState **file;
294
-} BdrvCoBlockStatusData;
295
-
296
/*
297
* Returns the allocation status of the specified sectors.
298
* Drivers not implementing the functionality are assumed to not support
299
@@ -XXX,XX +XXX,XX @@ bdrv_co_common_block_status_above(BlockDriverState *bs,
300
return ret;
301
}
302
303
-/* Coroutine wrapper for bdrv_block_status_above() */
304
-static int coroutine_fn bdrv_block_status_above_co_entry(void *opaque)
305
-{
306
- BdrvCoBlockStatusData *data = opaque;
307
-
308
- return bdrv_co_common_block_status_above(data->bs, data->base,
309
- data->want_zero,
310
- data->offset, data->bytes,
311
- data->pnum, data->map, data->file);
312
-}
313
-
314
-/*
315
- * Synchronous wrapper around bdrv_co_block_status_above().
30
- *
316
- *
31
- * This function must be called by the main job coroutine just before it
317
- * See bdrv_co_block_status_above() for details.
32
- * returns. @fn is executed in the main loop with the job AioContext acquired.
33
- *
34
- * Block jobs must call bdrv_unref(), bdrv_close(), and anything that uses
35
- * bdrv_drain_all() in the main loop.
36
- *
37
- * The @job AioContext is held while @fn executes.
38
- */
318
- */
39
-void job_defer_to_main_loop(Job *job, JobDeferToMainLoopFn *fn, void *opaque);
319
-int bdrv_common_block_status_above(BlockDriverState *bs,
40
-
320
- BlockDriverState *base,
41
/**
321
- bool want_zero, int64_t offset,
42
* Synchronously finishes the given @job. If @finish is given, it is called to
322
- int64_t bytes, int64_t *pnum,
43
* trigger completion or cancellation of the job.
323
- int64_t *map,
44
diff --git a/job.c b/job.c
324
- BlockDriverState **file)
45
index XXXXXXX..XXXXXXX 100644
325
-{
46
--- a/job.c
326
- BdrvCoBlockStatusData data = {
47
+++ b/job.c
327
- .bs = bs,
48
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn job_co_entry(void *opaque)
328
- .base = base,
49
assert(job && job->driver && job->driver->run);
329
- .want_zero = want_zero,
50
job_pause_point(job);
330
- .offset = offset,
51
job->ret = job->driver->run(job, &job->err);
331
- .bytes = bytes,
52
- if (!job->deferred_to_main_loop) {
332
- .pnum = pnum,
53
- job->deferred_to_main_loop = true;
333
- .map = map,
54
- aio_bh_schedule_oneshot(qemu_get_aio_context(),
334
- .file = file,
55
- job_exit,
335
- };
56
- job);
336
-
57
- }
337
- return bdrv_run_co(bs, bdrv_block_status_above_co_entry, &data);
58
+ job->deferred_to_main_loop = true;
338
-}
59
+ aio_bh_schedule_oneshot(qemu_get_aio_context(), job_exit, job);
339
-
60
}
340
int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
61
341
int64_t offset, int64_t bytes, int64_t *pnum,
62
342
int64_t *map, BlockDriverState **file)
63
@@ -XXX,XX +XXX,XX @@ void job_complete(Job *job, Error **errp)
343
@@ -XXX,XX +XXX,XX @@ int bdrv_is_allocated_above(BlockDriverState *top,
64
job->driver->complete(job, errp);
344
return 0;
65
}
345
}
66
346
67
-
347
-typedef struct BdrvVmstateCo {
68
-typedef struct {
348
- BlockDriverState *bs;
69
- Job *job;
349
- QEMUIOVector *qiov;
70
- JobDeferToMainLoopFn *fn;
350
- int64_t pos;
71
- void *opaque;
351
- bool is_read;
72
-} JobDeferToMainLoopData;
352
-} BdrvVmstateCo;
73
-
353
-
74
-static void job_defer_to_main_loop_bh(void *opaque)
354
int coroutine_fn
75
-{
355
bdrv_co_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
76
- JobDeferToMainLoopData *data = opaque;
356
bool is_read)
77
- Job *job = data->job;
357
@@ -XXX,XX +XXX,XX @@ bdrv_co_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
78
- AioContext *aio_context = job->aio_context;
358
return ret;
79
-
359
}
80
- aio_context_acquire(aio_context);
360
81
- data->fn(data->job, data->opaque);
361
-static int coroutine_fn bdrv_co_rw_vmstate_entry(void *opaque)
82
- aio_context_release(aio_context);
362
-{
83
-
363
- BdrvVmstateCo *co = opaque;
84
- g_free(data);
364
-
85
-}
365
- return bdrv_co_rw_vmstate(co->bs, co->qiov, co->pos, co->is_read);
86
-
366
-}
87
-void job_defer_to_main_loop(Job *job, JobDeferToMainLoopFn *fn, void *opaque)
367
-
88
-{
368
-int bdrv_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
89
- JobDeferToMainLoopData *data = g_malloc(sizeof(*data));
369
- bool is_read)
90
- data->job = job;
370
-{
91
- data->fn = fn;
371
- BdrvVmstateCo data = {
92
- data->opaque = opaque;
372
- .bs = bs,
93
- job->deferred_to_main_loop = true;
373
- .qiov = qiov,
94
-
374
- .pos = pos,
95
- aio_bh_schedule_oneshot(qemu_get_aio_context(),
375
- .is_read = is_read,
96
- job_defer_to_main_loop_bh, data);
376
- };
97
-}
377
-
98
-
378
- return bdrv_run_co(bs, bdrv_co_rw_vmstate_entry, &data);
99
int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp)
379
-}
100
{
380
-
101
Error *local_err = NULL;
381
int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
382
int64_t pos, int size)
383
{
384
@@ -XXX,XX +XXX,XX @@ void bdrv_aio_cancel_async(BlockAIOCB *acb)
385
/**************************************************************/
386
/* Coroutine block device emulation */
387
388
-static int coroutine_fn bdrv_flush_co_entry(void *opaque)
389
-{
390
- return bdrv_co_flush(opaque);
391
-}
392
-
393
int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
394
{
395
BdrvChild *primary_child = bdrv_primary_child(bs);
396
@@ -XXX,XX +XXX,XX @@ early_exit:
397
return ret;
398
}
399
400
-int bdrv_flush(BlockDriverState *bs)
401
-{
402
- return bdrv_run_co(bs, bdrv_flush_co_entry, bs);
403
-}
404
-
405
-typedef struct DiscardCo {
406
- BdrvChild *child;
407
- int64_t offset;
408
- int64_t bytes;
409
-} DiscardCo;
410
-
411
-static int coroutine_fn bdrv_pdiscard_co_entry(void *opaque)
412
-{
413
- DiscardCo *rwco = opaque;
414
-
415
- return bdrv_co_pdiscard(rwco->child, rwco->offset, rwco->bytes);
416
-}
417
-
418
int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset,
419
int64_t bytes)
420
{
421
@@ -XXX,XX +XXX,XX @@ out:
422
return ret;
423
}
424
425
-int bdrv_pdiscard(BdrvChild *child, int64_t offset, int64_t bytes)
426
-{
427
- DiscardCo rwco = {
428
- .child = child,
429
- .offset = offset,
430
- .bytes = bytes,
431
- };
432
-
433
- return bdrv_run_co(child->bs, bdrv_pdiscard_co_entry, &rwco);
434
-}
435
-
436
int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
437
{
438
BlockDriver *drv = bs->drv;
439
@@ -XXX,XX +XXX,XX @@ out:
440
441
return ret;
442
}
443
-
444
-typedef struct TruncateCo {
445
- BdrvChild *child;
446
- int64_t offset;
447
- bool exact;
448
- PreallocMode prealloc;
449
- BdrvRequestFlags flags;
450
- Error **errp;
451
-} TruncateCo;
452
-
453
-static int coroutine_fn bdrv_truncate_co_entry(void *opaque)
454
-{
455
- TruncateCo *tco = opaque;
456
-
457
- return bdrv_co_truncate(tco->child, tco->offset, tco->exact,
458
- tco->prealloc, tco->flags, tco->errp);
459
-}
460
-
461
-int bdrv_truncate(BdrvChild *child, int64_t offset, bool exact,
462
- PreallocMode prealloc, BdrvRequestFlags flags, Error **errp)
463
-{
464
- TruncateCo tco = {
465
- .child = child,
466
- .offset = offset,
467
- .exact = exact,
468
- .prealloc = prealloc,
469
- .flags = flags,
470
- .errp = errp,
471
- };
472
-
473
- return bdrv_run_co(child->bs, bdrv_truncate_co_entry, &tco);
474
-}
102
--
475
--
103
2.17.1
476
2.26.2
104
477
105
diff view generated by jsdifflib
1
From: John Snow <jsnow@redhat.com>
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
2
3
Jobs presently use both an Error object in the case of the create job,
3
Now that we are not maintaining boilerplate code for coroutine
4
and char strings in the case of generic errors elsewhere.
4
wrappers, there is no more sense in keeping the extra indirection layer
5
of bdrv_prwv(). Let's drop it and instead generate pure bdrv_preadv()
6
and bdrv_pwritev().
5
7
6
Unify the two paths as just j->err, and remove the extra argument from
8
Currently, bdrv_pwritev() and bdrv_preadv() are returning bytes on
7
job_completed. The integer error code for job_completed is kept for now,
9
success, auto generated functions will instead return zero, as their
8
to be removed shortly in a separate patch.
10
_co_ prototype. Still, it's simple to make the conversion safe: the
11
only external user of bdrv_pwritev() is test-bdrv-drain, and it is
12
comfortable enough with bdrv_co_pwritev() instead. So prototypes are
13
moved to local block/coroutines.h. Next, the only internal use is
14
bdrv_pread() and bdrv_pwrite(), which are modified to return bytes on
15
success.
9
16
10
Signed-off-by: John Snow <jsnow@redhat.com>
17
Of course, it would be great to convert bdrv_pread() and bdrv_pwrite()
11
Message-id: 20180830015734.19765-3-jsnow@redhat.com
18
to return 0 on success. But this requires audit (and probably
12
[mreitz: Dropped a superfluous g_strdup()]
19
conversion) of all their users, let's leave it for another day
20
refactoring.
21
22
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
23
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
13
Reviewed-by: Eric Blake <eblake@redhat.com>
24
Reviewed-by: Eric Blake <eblake@redhat.com>
14
Signed-off-by: Max Reitz <mreitz@redhat.com>
25
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
26
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
27
Message-Id: <20200924185414.28642-7-vsementsov@virtuozzo.com>
15
---
28
---
16
include/qemu/job.h | 14 ++++++++------
29
block/coroutines.h | 10 ++++-----
17
block/backup.c | 2 +-
30
include/block/block.h | 2 --
18
block/commit.c | 2 +-
31
block/io.c | 49 ++++++++---------------------------------
19
block/create.c | 5 ++---
32
tests/test-bdrv-drain.c | 2 +-
20
block/mirror.c | 2 +-
33
4 files changed, 15 insertions(+), 48 deletions(-)
21
block/stream.c | 2 +-
22
job-qmp.c | 5 +++--
23
job.c | 18 ++++++------------
24
tests/test-bdrv-drain.c | 2 +-
25
tests/test-blockjob-txn.c | 2 +-
26
tests/test-blockjob.c | 2 +-
27
11 files changed, 26 insertions(+), 30 deletions(-)
28
34
29
diff --git a/include/qemu/job.h b/include/qemu/job.h
35
diff --git a/block/coroutines.h b/block/coroutines.h
30
index XXXXXXX..XXXXXXX 100644
36
index XXXXXXX..XXXXXXX 100644
31
--- a/include/qemu/job.h
37
--- a/block/coroutines.h
32
+++ b/include/qemu/job.h
38
+++ b/block/coroutines.h
33
@@ -XXX,XX +XXX,XX @@ typedef struct Job {
39
@@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_check(BlockDriverState *bs,
34
/** Estimated progress_current value at the completion of the job */
40
BdrvCheckResult *res, BdrvCheckMode fix);
35
int64_t progress_total;
41
int coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs, Error **errp);
36
42
37
- /** Error string for a failed job (NULL if, and only if, job->ret == 0) */
43
-int coroutine_fn
38
- char *error;
44
-bdrv_co_prwv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov,
45
- bool is_write, BdrvRequestFlags flags);
46
int generated_co_wrapper
47
-bdrv_prwv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov,
48
- bool is_write, BdrvRequestFlags flags);
49
+bdrv_preadv(BdrvChild *child, int64_t offset, unsigned int bytes,
50
+ QEMUIOVector *qiov, BdrvRequestFlags flags);
51
+int generated_co_wrapper
52
+bdrv_pwritev(BdrvChild *child, int64_t offset, unsigned int bytes,
53
+ QEMUIOVector *qiov, BdrvRequestFlags flags);
54
55
int coroutine_fn
56
bdrv_co_common_block_status_above(BlockDriverState *bs,
57
diff --git a/include/block/block.h b/include/block/block.h
58
index XXXXXXX..XXXXXXX 100644
59
--- a/include/block/block.h
60
+++ b/include/block/block.h
61
@@ -XXX,XX +XXX,XX @@ int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
62
int bytes, BdrvRequestFlags flags);
63
int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags);
64
int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes);
65
-int bdrv_preadv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov);
66
int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes);
67
-int bdrv_pwritev(BdrvChild *child, int64_t offset, QEMUIOVector *qiov);
68
int bdrv_pwrite_sync(BdrvChild *child, int64_t offset,
69
const void *buf, int count);
70
/*
71
diff --git a/block/io.c b/block/io.c
72
index XXXXXXX..XXXXXXX 100644
73
--- a/block/io.c
74
+++ b/block/io.c
75
@@ -XXX,XX +XXX,XX @@ static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
76
return 0;
77
}
78
79
-int coroutine_fn bdrv_co_prwv(BdrvChild *child, int64_t offset,
80
- QEMUIOVector *qiov, bool is_write,
81
- BdrvRequestFlags flags)
82
-{
83
- if (is_write) {
84
- return bdrv_co_pwritev(child, offset, qiov->size, qiov, flags);
85
- } else {
86
- return bdrv_co_preadv(child, offset, qiov->size, qiov, flags);
87
- }
88
-}
39
-
89
-
40
/** ret code passed to job_completed. */
90
int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
41
int ret;
91
int bytes, BdrvRequestFlags flags)
42
43
+ /**
44
+ * Error object for a failed job.
45
+ * If job->ret is nonzero and an error object was not set, it will be set
46
+ * to strerror(-job->ret) during job_completed.
47
+ */
48
+ Error *err;
49
+
50
/** The completion function that will be called when the job completes. */
51
BlockCompletionFunc *cb;
52
53
@@ -XXX,XX +XXX,XX @@ void job_transition_to_ready(Job *job);
54
/**
55
* @job: The job being completed.
56
* @ret: The status code.
57
- * @error: The error message for a failing job (only with @ret < 0). If @ret is
58
- * negative, but NULL is given for @error, strerror() is used.
59
*
60
* Marks @job as completed. If @ret is non-zero, the job transaction it is part
61
* of is aborted. If @ret is zero, the job moves into the WAITING state. If it
62
* is the last job to complete in its transaction, all jobs in the transaction
63
* move from WAITING to PENDING.
64
*/
65
-void job_completed(Job *job, int ret, Error *error);
66
+void job_completed(Job *job, int ret);
67
68
/** Asynchronously complete the specified @job. */
69
void job_complete(Job *job, Error **errp);
70
diff --git a/block/backup.c b/block/backup.c
71
index XXXXXXX..XXXXXXX 100644
72
--- a/block/backup.c
73
+++ b/block/backup.c
74
@@ -XXX,XX +XXX,XX @@ static void backup_complete(Job *job, void *opaque)
75
{
92
{
76
BackupCompleteData *data = opaque;
93
- QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, bytes);
77
94
-
78
- job_completed(job, data->ret, NULL);
95
- return bdrv_prwv(child, offset, &qiov, true, BDRV_REQ_ZERO_WRITE | flags);
79
+ job_completed(job, data->ret);
96
+ return bdrv_pwritev(child, offset, bytes, NULL,
80
g_free(data);
97
+ BDRV_REQ_ZERO_WRITE | flags);
81
}
98
}
82
99
83
diff --git a/block/commit.c b/block/commit.c
100
/*
84
index XXXXXXX..XXXXXXX 100644
101
@@ -XXX,XX +XXX,XX @@ int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags)
85
--- a/block/commit.c
86
+++ b/block/commit.c
87
@@ -XXX,XX +XXX,XX @@ static void commit_complete(Job *job, void *opaque)
88
* bdrv_set_backing_hd() to fail. */
89
block_job_remove_all_bdrv(bjob);
90
91
- job_completed(job, ret, NULL);
92
+ job_completed(job, ret);
93
g_free(data);
94
95
/* If bdrv_drop_intermediate() didn't already do that, remove the commit
96
diff --git a/block/create.c b/block/create.c
97
index XXXXXXX..XXXXXXX 100644
98
--- a/block/create.c
99
+++ b/block/create.c
100
@@ -XXX,XX +XXX,XX @@ typedef struct BlockdevCreateJob {
101
BlockDriver *drv;
102
BlockdevCreateOptions *opts;
103
int ret;
104
- Error *err;
105
} BlockdevCreateJob;
106
107
static void blockdev_create_complete(Job *job, void *opaque)
108
{
109
BlockdevCreateJob *s = container_of(job, BlockdevCreateJob, common);
110
111
- job_completed(job, s->ret, s->err);
112
+ job_completed(job, s->ret);
113
}
114
115
static int coroutine_fn blockdev_create_run(Job *job, Error **errp)
116
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn blockdev_create_run(Job *job, Error **errp)
117
BlockdevCreateJob *s = container_of(job, BlockdevCreateJob, common);
118
119
job_progress_set_remaining(&s->common, 1);
120
- s->ret = s->drv->bdrv_co_create(s->opts, &s->err);
121
+ s->ret = s->drv->bdrv_co_create(s->opts, errp);
122
job_progress_update(&s->common, 1);
123
124
qapi_free_BlockdevCreateOptions(s->opts);
125
diff --git a/block/mirror.c b/block/mirror.c
126
index XXXXXXX..XXXXXXX 100644
127
--- a/block/mirror.c
128
+++ b/block/mirror.c
129
@@ -XXX,XX +XXX,XX @@ static void mirror_exit(Job *job, void *opaque)
130
blk_insert_bs(bjob->blk, mirror_top_bs, &error_abort);
131
132
bs_opaque->job = NULL;
133
- job_completed(job, data->ret, NULL);
134
+ job_completed(job, data->ret);
135
136
g_free(data);
137
bdrv_drained_end(src);
138
diff --git a/block/stream.c b/block/stream.c
139
index XXXXXXX..XXXXXXX 100644
140
--- a/block/stream.c
141
+++ b/block/stream.c
142
@@ -XXX,XX +XXX,XX @@ out:
143
}
144
145
g_free(s->backing_file_str);
146
- job_completed(job, data->ret, NULL);
147
+ job_completed(job, data->ret);
148
g_free(data);
149
}
150
151
diff --git a/job-qmp.c b/job-qmp.c
152
index XXXXXXX..XXXXXXX 100644
153
--- a/job-qmp.c
154
+++ b/job-qmp.c
155
@@ -XXX,XX +XXX,XX @@ static JobInfo *job_query_single(Job *job, Error **errp)
156
.status = job->status,
157
.current_progress = job->progress_current,
158
.total_progress = job->progress_total,
159
- .has_error = !!job->error,
160
- .error = g_strdup(job->error),
161
+ .has_error = !!job->err,
162
+ .error = job->err ? \
163
+ g_strdup(error_get_pretty(job->err)) : NULL,
164
};
165
166
return info;
167
diff --git a/job.c b/job.c
168
index XXXXXXX..XXXXXXX 100644
169
--- a/job.c
170
+++ b/job.c
171
@@ -XXX,XX +XXX,XX @@ void job_unref(Job *job)
172
173
QLIST_REMOVE(job, job_list);
174
175
- g_free(job->error);
176
+ error_free(job->err);
177
g_free(job->id);
178
g_free(job);
179
}
180
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn job_co_entry(void *opaque)
181
182
assert(job && job->driver && job->driver->run);
183
job_pause_point(job);
184
- job->ret = job->driver->run(job, NULL);
185
+ job->ret = job->driver->run(job, &job->err);
186
}
187
188
189
@@ -XXX,XX +XXX,XX @@ static void job_update_rc(Job *job)
190
job->ret = -ECANCELED;
191
}
192
if (job->ret) {
193
- if (!job->error) {
194
- job->error = g_strdup(strerror(-job->ret));
195
+ if (!job->err) {
196
+ error_setg(&job->err, "%s", strerror(-job->ret));
197
}
198
job_state_transition(job, JOB_STATUS_ABORTING);
199
}
200
@@ -XXX,XX +XXX,XX @@ static void job_completed_txn_success(Job *job)
201
}
102
}
202
}
103
}
203
104
204
-void job_completed(Job *job, int ret, Error *error)
105
-/* return < 0 if error. See bdrv_pwrite() for the return codes */
205
+void job_completed(Job *job, int ret)
106
-int bdrv_preadv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
206
{
107
-{
207
assert(job && job->txn && !job_is_completed(job));
108
- int ret;
208
109
-
209
job->ret = ret;
110
- ret = bdrv_prwv(child, offset, qiov, false, 0);
210
- if (error) {
111
- if (ret < 0) {
211
- assert(job->ret < 0);
112
- return ret;
212
- job->error = g_strdup(error_get_pretty(error));
213
- error_free(error);
214
- }
113
- }
215
-
114
-
216
job_update_rc(job);
115
- return qiov->size;
217
trace_job_completed(job, ret, job->ret);
116
-}
218
if (job->ret) {
117
-
219
@@ -XXX,XX +XXX,XX @@ void job_cancel(Job *job, bool force)
118
/* See bdrv_pwrite() for the return codes */
119
int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes)
120
{
121
+ int ret;
122
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
123
124
if (bytes < 0) {
125
return -EINVAL;
220
}
126
}
221
job_cancel_async(job, force);
127
222
if (!job_started(job)) {
128
- return bdrv_preadv(child, offset, &qiov);
223
- job_completed(job, -ECANCELED, NULL);
129
-}
224
+ job_completed(job, -ECANCELED);
130
+ ret = bdrv_preadv(child, offset, bytes, &qiov, 0);
225
} else if (job->deferred_to_main_loop) {
131
226
job_completed_txn_abort(job);
132
-int bdrv_pwritev(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
227
} else {
133
-{
134
- int ret;
135
-
136
- ret = bdrv_prwv(child, offset, qiov, true, 0);
137
- if (ret < 0) {
138
- return ret;
139
- }
140
-
141
- return qiov->size;
142
+ return ret < 0 ? ret : bytes;
143
}
144
145
/* Return no. of bytes on success or < 0 on error. Important errors are:
146
@@ -XXX,XX +XXX,XX @@ int bdrv_pwritev(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
147
*/
148
int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes)
149
{
150
+ int ret;
151
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
152
153
if (bytes < 0) {
154
return -EINVAL;
155
}
156
157
- return bdrv_pwritev(child, offset, &qiov);
158
+ ret = bdrv_pwritev(child, offset, bytes, &qiov, 0);
159
+
160
+ return ret < 0 ? ret : bytes;
161
}
162
163
/*
228
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
164
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
229
index XXXXXXX..XXXXXXX 100644
165
index XXXXXXX..XXXXXXX 100644
230
--- a/tests/test-bdrv-drain.c
166
--- a/tests/test-bdrv-drain.c
231
+++ b/tests/test-bdrv-drain.c
167
+++ b/tests/test-bdrv-drain.c
232
@@ -XXX,XX +XXX,XX @@ typedef struct TestBlockJob {
168
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn bdrv_replace_test_co_preadv(BlockDriverState *bs,
233
169
}
234
static void test_job_completed(Job *job, void *opaque)
170
s->io_co = NULL;
235
{
171
236
- job_completed(job, 0, NULL);
172
- ret = bdrv_preadv(bs->backing, offset, qiov);
237
+ job_completed(job, 0);
173
+ ret = bdrv_co_preadv(bs->backing, offset, bytes, qiov, 0);
238
}
174
s->has_read = true;
239
175
240
static int coroutine_fn test_job_run(Job *job, Error **errp)
176
/* Wake up drain_co if it runs */
241
diff --git a/tests/test-blockjob-txn.c b/tests/test-blockjob-txn.c
242
index XXXXXXX..XXXXXXX 100644
243
--- a/tests/test-blockjob-txn.c
244
+++ b/tests/test-blockjob-txn.c
245
@@ -XXX,XX +XXX,XX @@ static void test_block_job_complete(Job *job, void *opaque)
246
rc = -ECANCELED;
247
}
248
249
- job_completed(job, rc, NULL);
250
+ job_completed(job, rc);
251
bdrv_unref(bs);
252
}
253
254
diff --git a/tests/test-blockjob.c b/tests/test-blockjob.c
255
index XXXXXXX..XXXXXXX 100644
256
--- a/tests/test-blockjob.c
257
+++ b/tests/test-blockjob.c
258
@@ -XXX,XX +XXX,XX @@ static void cancel_job_completed(Job *job, void *opaque)
259
{
260
CancelJob *s = opaque;
261
s->completed = true;
262
- job_completed(job, 0, NULL);
263
+ job_completed(job, 0);
264
}
265
266
static void cancel_job_complete(Job *job, Error **errp)
267
--
177
--
268
2.17.1
178
2.26.2
269
179
270
diff view generated by jsdifflib
1
From: John Snow <jsnow@redhat.com>
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
2
3
Presently we codify the entry point for a job as the "start" callback,
3
Like for read/write in a previous commit, drop extra indirection layer,
4
but a more apt name would be "run" to clarify the idea that when this
4
generate directly bdrv_readv_vmstate() and bdrv_writev_vmstate().
5
function returns we consider the job to have "finished," except for
6
any cleanup which occurs in separate callbacks later.
7
5
8
As part of this clarification, change the signature to include an error
6
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
9
object and a return code. The error ptr is not yet used, and the return
7
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
10
code while captured, will be overwritten by actions in the job_completed
8
Reviewed-by: Eric Blake <eblake@redhat.com>
11
function.
9
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
10
Message-Id: <20200924185414.28642-8-vsementsov@virtuozzo.com>
11
---
12
block/coroutines.h | 10 +++----
13
include/block/block.h | 6 ++--
14
block/io.c | 70 ++++++++++++++++++++++---------------------
15
3 files changed, 44 insertions(+), 42 deletions(-)
12
16
13
Signed-off-by: John Snow <jsnow@redhat.com>
17
diff --git a/block/coroutines.h b/block/coroutines.h
14
Reviewed-by: Max Reitz <mreitz@redhat.com>
15
Message-id: 20180830015734.19765-2-jsnow@redhat.com
16
Reviewed-by: Jeff Cody <jcody@redhat.com>
17
Signed-off-by: Max Reitz <mreitz@redhat.com>
18
---
19
include/qemu/job.h | 2 +-
20
block/backup.c | 7 ++++---
21
block/commit.c | 7 ++++---
22
block/create.c | 8 +++++---
23
block/mirror.c | 10 ++++++----
24
block/stream.c | 7 ++++---
25
job.c | 6 +++---
26
tests/test-bdrv-drain.c | 7 ++++---
27
tests/test-blockjob-txn.c | 16 ++++++++--------
28
tests/test-blockjob.c | 7 ++++---
29
10 files changed, 43 insertions(+), 34 deletions(-)
30
31
diff --git a/include/qemu/job.h b/include/qemu/job.h
32
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
33
--- a/include/qemu/job.h
19
--- a/block/coroutines.h
34
+++ b/include/qemu/job.h
20
+++ b/block/coroutines.h
35
@@ -XXX,XX +XXX,XX @@ struct JobDriver {
21
@@ -XXX,XX +XXX,XX @@ bdrv_common_block_status_above(BlockDriverState *bs,
36
JobType job_type;
22
int64_t *map,
37
23
BlockDriverState **file);
38
/** Mandatory: Entrypoint for the Coroutine. */
24
39
- CoroutineEntry *start;
25
-int coroutine_fn
40
+ int coroutine_fn (*run)(Job *job, Error **errp);
26
-bdrv_co_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
41
27
- bool is_read);
42
/**
28
-int generated_co_wrapper
43
* If the callback is not NULL, it will be invoked when the job transitions
29
-bdrv_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
44
diff --git a/block/backup.c b/block/backup.c
30
- bool is_read);
31
+int coroutine_fn bdrv_co_readv_vmstate(BlockDriverState *bs,
32
+ QEMUIOVector *qiov, int64_t pos);
33
+int coroutine_fn bdrv_co_writev_vmstate(BlockDriverState *bs,
34
+ QEMUIOVector *qiov, int64_t pos);
35
36
#endif /* BLOCK_COROUTINES_INT_H */
37
diff --git a/include/block/block.h b/include/block/block.h
45
index XXXXXXX..XXXXXXX 100644
38
index XXXXXXX..XXXXXXX 100644
46
--- a/block/backup.c
39
--- a/include/block/block.h
47
+++ b/block/backup.c
40
+++ b/include/block/block.h
48
@@ -XXX,XX +XXX,XX @@ static void backup_incremental_init_copy_bitmap(BackupBlockJob *job)
41
@@ -XXX,XX +XXX,XX @@ int path_has_protocol(const char *path);
49
bdrv_dirty_iter_free(dbi);
42
int path_is_absolute(const char *path);
43
char *path_combine(const char *base_path, const char *filename);
44
45
-int bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos);
46
-int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos);
47
+int generated_co_wrapper
48
+bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos);
49
+int generated_co_wrapper
50
+bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos);
51
int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
52
int64_t pos, int size);
53
54
diff --git a/block/io.c b/block/io.c
55
index XXXXXXX..XXXXXXX 100644
56
--- a/block/io.c
57
+++ b/block/io.c
58
@@ -XXX,XX +XXX,XX @@ int bdrv_is_allocated_above(BlockDriverState *top,
50
}
59
}
51
60
52
-static void coroutine_fn backup_run(void *opaque)
61
int coroutine_fn
53
+static int coroutine_fn backup_run(Job *opaque_job, Error **errp)
62
-bdrv_co_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
63
- bool is_read)
64
+bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
54
{
65
{
55
- BackupBlockJob *job = opaque;
66
BlockDriver *drv = bs->drv;
56
+ BackupBlockJob *job = container_of(opaque_job, BackupBlockJob, common.job);
67
BlockDriverState *child_bs = bdrv_primary_bs(bs);
57
BackupCompleteData *data;
68
int ret = -ENOTSUP;
58
BlockDriverState *bs = blk_bs(job->common.blk);
69
59
int64_t offset, nb_clusters;
70
+ if (!drv) {
60
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn backup_run(void *opaque)
71
+ return -ENOMEDIUM;
61
data = g_malloc(sizeof(*data));
72
+ }
62
data->ret = ret;
73
+
63
job_defer_to_main_loop(&job->common.job, backup_complete, data);
74
bdrv_inc_in_flight(bs);
75
76
+ if (drv->bdrv_load_vmstate) {
77
+ ret = drv->bdrv_load_vmstate(bs, qiov, pos);
78
+ } else if (child_bs) {
79
+ ret = bdrv_co_readv_vmstate(child_bs, qiov, pos);
80
+ }
81
+
82
+ bdrv_dec_in_flight(bs);
83
+
64
+ return ret;
84
+ return ret;
65
}
85
+}
66
67
static const BlockJobDriver backup_job_driver = {
68
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver backup_job_driver = {
69
.free = block_job_free,
70
.user_resume = block_job_user_resume,
71
.drain = block_job_drain,
72
- .start = backup_run,
73
+ .run = backup_run,
74
.commit = backup_commit,
75
.abort = backup_abort,
76
.clean = backup_clean,
77
diff --git a/block/commit.c b/block/commit.c
78
index XXXXXXX..XXXXXXX 100644
79
--- a/block/commit.c
80
+++ b/block/commit.c
81
@@ -XXX,XX +XXX,XX @@ static void commit_complete(Job *job, void *opaque)
82
bdrv_unref(top);
83
}
84
85
-static void coroutine_fn commit_run(void *opaque)
86
+static int coroutine_fn commit_run(Job *job, Error **errp)
87
{
88
- CommitBlockJob *s = opaque;
89
+ CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
90
CommitCompleteData *data;
91
int64_t offset;
92
uint64_t delay_ns = 0;
93
@@ -XXX,XX +XXX,XX @@ out:
94
data = g_malloc(sizeof(*data));
95
data->ret = ret;
96
job_defer_to_main_loop(&s->common.job, commit_complete, data);
97
+ return ret;
98
}
99
100
static const BlockJobDriver commit_job_driver = {
101
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver commit_job_driver = {
102
.free = block_job_free,
103
.user_resume = block_job_user_resume,
104
.drain = block_job_drain,
105
- .start = commit_run,
106
+ .run = commit_run,
107
},
108
};
109
110
diff --git a/block/create.c b/block/create.c
111
index XXXXXXX..XXXXXXX 100644
112
--- a/block/create.c
113
+++ b/block/create.c
114
@@ -XXX,XX +XXX,XX @@ static void blockdev_create_complete(Job *job, void *opaque)
115
job_completed(job, s->ret, s->err);
116
}
117
118
-static void coroutine_fn blockdev_create_run(void *opaque)
119
+static int coroutine_fn blockdev_create_run(Job *job, Error **errp)
120
{
121
- BlockdevCreateJob *s = opaque;
122
+ BlockdevCreateJob *s = container_of(job, BlockdevCreateJob, common);
123
124
job_progress_set_remaining(&s->common, 1);
125
s->ret = s->drv->bdrv_co_create(s->opts, &s->err);
126
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn blockdev_create_run(void *opaque)
127
128
qapi_free_BlockdevCreateOptions(s->opts);
129
job_defer_to_main_loop(&s->common, blockdev_create_complete, NULL);
130
+
86
+
131
+ return s->ret;
87
+int coroutine_fn
132
}
88
+bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
133
89
+{
134
static const JobDriver blockdev_create_job_driver = {
90
+ BlockDriver *drv = bs->drv;
135
.instance_size = sizeof(BlockdevCreateJob),
91
+ BlockDriverState *child_bs = bdrv_primary_bs(bs);
136
.job_type = JOB_TYPE_CREATE,
92
+ int ret = -ENOTSUP;
137
- .start = blockdev_create_run,
93
+
138
+ .run = blockdev_create_run,
94
if (!drv) {
139
};
95
- ret = -ENOMEDIUM;
140
96
- } else if (drv->bdrv_load_vmstate) {
141
void qmp_blockdev_create(const char *job_id, BlockdevCreateOptions *options,
97
- if (is_read) {
142
diff --git a/block/mirror.c b/block/mirror.c
98
- ret = drv->bdrv_load_vmstate(bs, qiov, pos);
143
index XXXXXXX..XXXXXXX 100644
99
- } else {
144
--- a/block/mirror.c
100
- ret = drv->bdrv_save_vmstate(bs, qiov, pos);
145
+++ b/block/mirror.c
101
- }
146
@@ -XXX,XX +XXX,XX @@ static int mirror_flush(MirrorBlockJob *s)
102
+ return -ENOMEDIUM;
103
+ }
104
+
105
+ bdrv_inc_in_flight(bs);
106
+
107
+ if (drv->bdrv_save_vmstate) {
108
+ ret = drv->bdrv_save_vmstate(bs, qiov, pos);
109
} else if (child_bs) {
110
- ret = bdrv_co_rw_vmstate(child_bs, qiov, pos, is_read);
111
+ ret = bdrv_co_writev_vmstate(child_bs, qiov, pos);
112
}
113
114
bdrv_dec_in_flight(bs);
115
+
147
return ret;
116
return ret;
148
}
117
}
149
118
150
-static void coroutine_fn mirror_run(void *opaque)
119
@@ -XXX,XX +XXX,XX @@ int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
151
+static int coroutine_fn mirror_run(Job *job, Error **errp)
120
int64_t pos, int size)
152
{
121
{
153
- MirrorBlockJob *s = opaque;
122
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
154
+ MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
123
- int ret;
155
MirrorExitData *data;
124
+ int ret = bdrv_writev_vmstate(bs, &qiov, pos);
156
BlockDriverState *bs = s->mirror_top_bs->backing->bs;
125
157
BlockDriverState *target_bs = blk_bs(s->target);
126
- ret = bdrv_writev_vmstate(bs, &qiov, pos);
158
@@ -XXX,XX +XXX,XX @@ immediate_exit:
127
- if (ret < 0) {
159
if (need_drain) {
128
- return ret;
160
bdrv_drained_begin(bs);
129
- }
161
}
130
-
162
+
131
- return size;
163
job_defer_to_main_loop(&s->common.job, mirror_exit, data);
132
-}
164
+ return ret;
133
-
134
-int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
135
-{
136
- return bdrv_rw_vmstate(bs, qiov, pos, false);
137
+ return ret < 0 ? ret : size;
165
}
138
}
166
139
167
static void mirror_complete(Job *job, Error **errp)
140
int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
168
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver mirror_job_driver = {
141
int64_t pos, int size)
169
.free = block_job_free,
142
{
170
.user_resume = block_job_user_resume,
143
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
171
.drain = block_job_drain,
144
- int ret;
172
- .start = mirror_run,
145
+ int ret = bdrv_readv_vmstate(bs, &qiov, pos);
173
+ .run = mirror_run,
146
174
.pause = mirror_pause,
147
- ret = bdrv_readv_vmstate(bs, &qiov, pos);
175
.complete = mirror_complete,
148
- if (ret < 0) {
176
},
149
- return ret;
177
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver commit_active_job_driver = {
150
- }
178
.free = block_job_free,
151
-
179
.user_resume = block_job_user_resume,
152
- return size;
180
.drain = block_job_drain,
153
-}
181
- .start = mirror_run,
154
-
182
+ .run = mirror_run,
155
-int bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
183
.pause = mirror_pause,
156
-{
184
.complete = mirror_complete,
157
- return bdrv_rw_vmstate(bs, qiov, pos, true);
185
},
158
+ return ret < 0 ? ret : size;
186
diff --git a/block/stream.c b/block/stream.c
187
index XXXXXXX..XXXXXXX 100644
188
--- a/block/stream.c
189
+++ b/block/stream.c
190
@@ -XXX,XX +XXX,XX @@ out:
191
g_free(data);
192
}
159
}
193
160
194
-static void coroutine_fn stream_run(void *opaque)
161
/**************************************************************/
195
+static int coroutine_fn stream_run(Job *job, Error **errp)
196
{
197
- StreamBlockJob *s = opaque;
198
+ StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
199
StreamCompleteData *data;
200
BlockBackend *blk = s->common.blk;
201
BlockDriverState *bs = blk_bs(blk);
202
@@ -XXX,XX +XXX,XX @@ out:
203
data = g_malloc(sizeof(*data));
204
data->ret = ret;
205
job_defer_to_main_loop(&s->common.job, stream_complete, data);
206
+ return ret;
207
}
208
209
static const BlockJobDriver stream_job_driver = {
210
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver stream_job_driver = {
211
.instance_size = sizeof(StreamBlockJob),
212
.job_type = JOB_TYPE_STREAM,
213
.free = block_job_free,
214
- .start = stream_run,
215
+ .run = stream_run,
216
.user_resume = block_job_user_resume,
217
.drain = block_job_drain,
218
},
219
diff --git a/job.c b/job.c
220
index XXXXXXX..XXXXXXX 100644
221
--- a/job.c
222
+++ b/job.c
223
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn job_co_entry(void *opaque)
224
{
225
Job *job = opaque;
226
227
- assert(job && job->driver && job->driver->start);
228
+ assert(job && job->driver && job->driver->run);
229
job_pause_point(job);
230
- job->driver->start(job);
231
+ job->ret = job->driver->run(job, NULL);
232
}
233
234
235
void job_start(Job *job)
236
{
237
assert(job && !job_started(job) && job->paused &&
238
- job->driver && job->driver->start);
239
+ job->driver && job->driver->run);
240
job->co = qemu_coroutine_create(job_co_entry, job);
241
job->pause_count--;
242
job->busy = true;
243
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
244
index XXXXXXX..XXXXXXX 100644
245
--- a/tests/test-bdrv-drain.c
246
+++ b/tests/test-bdrv-drain.c
247
@@ -XXX,XX +XXX,XX @@ static void test_job_completed(Job *job, void *opaque)
248
job_completed(job, 0, NULL);
249
}
250
251
-static void coroutine_fn test_job_start(void *opaque)
252
+static int coroutine_fn test_job_run(Job *job, Error **errp)
253
{
254
- TestBlockJob *s = opaque;
255
+ TestBlockJob *s = container_of(job, TestBlockJob, common.job);
256
257
job_transition_to_ready(&s->common.job);
258
while (!s->should_complete) {
259
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn test_job_start(void *opaque)
260
}
261
262
job_defer_to_main_loop(&s->common.job, test_job_completed, NULL);
263
+ return 0;
264
}
265
266
static void test_job_complete(Job *job, Error **errp)
267
@@ -XXX,XX +XXX,XX @@ BlockJobDriver test_job_driver = {
268
.free = block_job_free,
269
.user_resume = block_job_user_resume,
270
.drain = block_job_drain,
271
- .start = test_job_start,
272
+ .run = test_job_run,
273
.complete = test_job_complete,
274
},
275
};
276
diff --git a/tests/test-blockjob-txn.c b/tests/test-blockjob-txn.c
277
index XXXXXXX..XXXXXXX 100644
278
--- a/tests/test-blockjob-txn.c
279
+++ b/tests/test-blockjob-txn.c
280
@@ -XXX,XX +XXX,XX @@ static void test_block_job_complete(Job *job, void *opaque)
281
bdrv_unref(bs);
282
}
283
284
-static void coroutine_fn test_block_job_run(void *opaque)
285
+static int coroutine_fn test_block_job_run(Job *job, Error **errp)
286
{
287
- TestBlockJob *s = opaque;
288
- BlockJob *job = &s->common;
289
+ TestBlockJob *s = container_of(job, TestBlockJob, common.job);
290
291
while (s->iterations--) {
292
if (s->use_timer) {
293
- job_sleep_ns(&job->job, 0);
294
+ job_sleep_ns(job, 0);
295
} else {
296
- job_yield(&job->job);
297
+ job_yield(job);
298
}
299
300
- if (job_is_cancelled(&job->job)) {
301
+ if (job_is_cancelled(job)) {
302
break;
303
}
304
}
305
306
- job_defer_to_main_loop(&job->job, test_block_job_complete,
307
+ job_defer_to_main_loop(job, test_block_job_complete,
308
(void *)(intptr_t)s->rc);
309
+ return s->rc;
310
}
311
312
typedef struct {
313
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver test_block_job_driver = {
314
.free = block_job_free,
315
.user_resume = block_job_user_resume,
316
.drain = block_job_drain,
317
- .start = test_block_job_run,
318
+ .run = test_block_job_run,
319
},
320
};
321
322
diff --git a/tests/test-blockjob.c b/tests/test-blockjob.c
323
index XXXXXXX..XXXXXXX 100644
324
--- a/tests/test-blockjob.c
325
+++ b/tests/test-blockjob.c
326
@@ -XXX,XX +XXX,XX @@ static void cancel_job_complete(Job *job, Error **errp)
327
s->should_complete = true;
328
}
329
330
-static void coroutine_fn cancel_job_start(void *opaque)
331
+static int coroutine_fn cancel_job_run(Job *job, Error **errp)
332
{
333
- CancelJob *s = opaque;
334
+ CancelJob *s = container_of(job, CancelJob, common.job);
335
336
while (!s->should_complete) {
337
if (job_is_cancelled(&s->common.job)) {
338
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn cancel_job_start(void *opaque)
339
340
defer:
341
job_defer_to_main_loop(&s->common.job, cancel_job_completed, s);
342
+ return 0;
343
}
344
345
static const BlockJobDriver test_cancel_driver = {
346
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver test_cancel_driver = {
347
.free = block_job_free,
348
.user_resume = block_job_user_resume,
349
.drain = block_job_drain,
350
- .start = cancel_job_start,
351
+ .run = cancel_job_run,
352
.complete = cancel_job_complete,
353
},
354
};
355
--
162
--
356
2.17.1
163
2.26.2
357
164
358
diff view generated by jsdifflib
1
From: John Snow <jsnow@redhat.com>
1
From: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
2
2
3
All jobs do the same thing when they leave their running loop:
3
This is the only non-ascii character in the file and it doesn't really
4
- Store the return code in a structure
4
needed here. Let's use normal "'" symbol for consistency with the rest
5
- wait to receive this structure in the main thread
5
11 occurrences of "'" in the file.
6
- signal job completion via job_completed
7
6
8
Few jobs do anything beyond exactly this. Consolidate this exit
7
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
9
logic for a net reduction in SLOC.
8
Reviewed-by: Eric Blake <eblake@redhat.com>
9
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
10
---
11
include/block/block.h | 2 +-
12
1 file changed, 1 insertion(+), 1 deletion(-)
10
13
11
More seriously, when we utilize job_defer_to_main_loop_bh to call
14
diff --git a/include/block/block.h b/include/block/block.h
12
a function that calls job_completed, job_finalize_single will run
15
index XXXXXXX..XXXXXXX 100644
13
in a context where it has recursively taken the aio_context lock,
16
--- a/include/block/block.h
14
which can cause hangs if it puts down a reference that causes a flush.
17
+++ b/include/block/block.h
18
@@ -XXX,XX +XXX,XX @@ enum BdrvChildRoleBits {
19
BDRV_CHILD_FILTERED = (1 << 2),
20
21
/*
22
- * Child from which to read all data that isn’t allocated in the
23
+ * Child from which to read all data that isn't allocated in the
24
* parent (i.e., the backing child); such data is copied to the
25
* parent through COW (and optionally COR).
26
* This field is mutually exclusive with DATA, METADATA, and
27
--
28
2.26.2
15
29
16
You can observe this in practice by looking at mirror_exit's careful
17
placement of job_completed and bdrv_unref calls.
18
19
If we centralize job exiting, we can signal job completion from outside
20
of the aio_context, which should allow for job cleanup code to run with
21
only one lock, which makes cleanup callbacks less tricky to write.
22
23
Signed-off-by: John Snow <jsnow@redhat.com>
24
Reviewed-by: Max Reitz <mreitz@redhat.com>
25
Message-id: 20180830015734.19765-4-jsnow@redhat.com
26
Reviewed-by: Jeff Cody <jcody@redhat.com>
27
Signed-off-by: Max Reitz <mreitz@redhat.com>
28
---
29
include/qemu/job.h | 11 +++++++++++
30
job.c | 18 ++++++++++++++++++
31
2 files changed, 29 insertions(+)
32
33
diff --git a/include/qemu/job.h b/include/qemu/job.h
34
index XXXXXXX..XXXXXXX 100644
35
--- a/include/qemu/job.h
36
+++ b/include/qemu/job.h
37
@@ -XXX,XX +XXX,XX @@ struct JobDriver {
38
*/
39
void (*drain)(Job *job);
40
41
+ /**
42
+ * If the callback is not NULL, exit will be invoked from the main thread
43
+ * when the job's coroutine has finished, but before transactional
44
+ * convergence; before @prepare or @abort.
45
+ *
46
+ * FIXME TODO: This callback is only temporary to transition remaining jobs
47
+ * to prepare/commit/abort/clean callbacks and will be removed before 3.1.
48
+ * is released.
49
+ */
50
+ void (*exit)(Job *job);
51
+
52
/**
53
* If the callback is not NULL, prepare will be invoked when all the jobs
54
* belonging to the same transaction complete; or upon this job's completion
55
diff --git a/job.c b/job.c
56
index XXXXXXX..XXXXXXX 100644
57
--- a/job.c
58
+++ b/job.c
59
@@ -XXX,XX +XXX,XX @@ void job_drain(Job *job)
60
}
61
}
62
63
+static void job_exit(void *opaque)
64
+{
65
+ Job *job = (Job *)opaque;
66
+ AioContext *aio_context = job->aio_context;
67
+
68
+ if (job->driver->exit) {
69
+ aio_context_acquire(aio_context);
70
+ job->driver->exit(job);
71
+ aio_context_release(aio_context);
72
+ }
73
+ job_completed(job, job->ret);
74
+}
75
76
/**
77
* All jobs must allow a pause point before entering their job proper. This
78
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn job_co_entry(void *opaque)
79
assert(job && job->driver && job->driver->run);
80
job_pause_point(job);
81
job->ret = job->driver->run(job, &job->err);
82
+ if (!job->deferred_to_main_loop) {
83
+ job->deferred_to_main_loop = true;
84
+ aio_bh_schedule_oneshot(qemu_get_aio_context(),
85
+ job_exit,
86
+ job);
87
+ }
88
}
89
90
91
--
92
2.17.1
93
94
diff view generated by jsdifflib
1
From: Marc-André Lureau <marcandre.lureau@redhat.com>
1
From: Stefano Garzarella <sgarzare@redhat.com>
2
2
3
Spotted by ASAN:
3
When we added io_uring AIO engine, we forgot to update qemu-options.hx,
4
so qemu(1) man page and qemu help were outdated.
4
5
5
=================================================================
6
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
6
==5378==ERROR: LeakSanitizer: detected memory leaks
7
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
8
Reviewed-by: Julia Suvorova <jusual@redhat.com>
9
Reviewed-by: Pankaj Gupta <pankaj.gupta.linux@gmail.com>
10
Message-Id: <20200924151511.131471-1-sgarzare@redhat.com>
11
---
12
qemu-options.hx | 10 ++++++----
13
1 file changed, 6 insertions(+), 4 deletions(-)
7
14
8
Direct leak of 65536 byte(s) in 1 object(s) allocated from:
15
diff --git a/qemu-options.hx b/qemu-options.hx
9
#0 0x7f788f83bc48 in malloc (/lib64/libasan.so.5+0xeec48)
16
index XXXXXXX..XXXXXXX 100644
10
#1 0x7f788c9923c5 in g_malloc (/lib64/libglib-2.0.so.0+0x523c5)
17
--- a/qemu-options.hx
11
#2 0x5622a1fe37bc in coroutine_trampoline /home/elmarco/src/qq/util/coroutine-ucontext.c:116
18
+++ b/qemu-options.hx
12
#3 0x7f788a15d75f in __correctly_grouped_prefixwc (/lib64/libc.so.6+0x4c75f)
19
@@ -XXX,XX +XXX,XX @@ SRST
20
The path to the image file in the local filesystem
21
22
``aio``
23
- Specifies the AIO backend (threads/native, default: threads)
24
+ Specifies the AIO backend (threads/native/io_uring,
25
+ default: threads)
26
27
``locking``
28
Specifies whether the image file is protected with Linux OFD
29
@@ -XXX,XX +XXX,XX @@ DEF("drive", HAS_ARG, QEMU_OPTION_drive,
30
"-drive [file=file][,if=type][,bus=n][,unit=m][,media=d][,index=i]\n"
31
" [,cache=writethrough|writeback|none|directsync|unsafe][,format=f]\n"
32
" [,snapshot=on|off][,rerror=ignore|stop|report]\n"
33
- " [,werror=ignore|stop|report|enospc][,id=name][,aio=threads|native]\n"
34
+ " [,werror=ignore|stop|report|enospc][,id=name]\n"
35
+ " [,aio=threads|native|io_uring]\n"
36
" [,readonly=on|off][,copy-on-read=on|off]\n"
37
" [,discard=ignore|unmap][,detect-zeroes=on|off|unmap]\n"
38
" [[,bps=b]|[[,bps_rd=r][,bps_wr=w]]]\n"
39
@@ -XXX,XX +XXX,XX @@ SRST
40
The default mode is ``cache=writeback``.
41
42
``aio=aio``
43
- aio is "threads", or "native" and selects between pthread based
44
- disk I/O and native Linux AIO.
45
+ aio is "threads", "native", or "io_uring" and selects between pthread
46
+ based disk I/O, native Linux AIO, or Linux io_uring API.
47
48
``format=format``
49
Specify which disk format will be used rather than detecting the
50
--
51
2.26.2
13
52
14
(Broken in commit 4c8158e359d.)
15
16
Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
17
Message-id: 20180809114417.28718-3-marcandre.lureau@redhat.com
18
Signed-off-by: Max Reitz <mreitz@redhat.com>
19
---
20
tests/test-bdrv-drain.c | 1 +
21
1 file changed, 1 insertion(+)
22
23
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
24
index XXXXXXX..XXXXXXX 100644
25
--- a/tests/test-bdrv-drain.c
26
+++ b/tests/test-bdrv-drain.c
27
@@ -XXX,XX +XXX,XX @@ static void coroutine_fn test_co_delete_by_drain(void *opaque)
28
}
29
30
dbdd->done = true;
31
+ g_free(buffer);
32
}
33
34
/**
35
--
36
2.17.1
37
38
diff view generated by jsdifflib
1
From: John Snow <jsnow@redhat.com>
1
From: Eric Auger <eric.auger@redhat.com>
2
2
3
Rename opaque_job to job to be consistent with other job implementations.
3
The IOVA allocator currently ignores host reserved regions.
4
Rename 'job', the BackupBlockJob object, to 's' to also be consistent.
4
As a result some chosen IOVAs may collide with some of them,
5
resulting in VFIO MAP_DMA errors later on. This happens on ARM
6
where the MSI reserved window quickly is encountered:
7
[0x8000000, 0x8100000]. since 5.4 kernel, VFIO returns the usable
8
IOVA regions. So let's enumerate them in the prospect to avoid
9
them, later on.
5
10
6
Suggested-by: Eric Blake <eblake@redhat.com>
11
Signed-off-by: Eric Auger <eric.auger@redhat.com>
7
Signed-off-by: John Snow <jsnow@redhat.com>
12
Message-id: 20200929085550.30926-2-eric.auger@redhat.com
8
Reviewed-by: Max Reitz <mreitz@redhat.com>
13
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
9
Message-id: 20180830015734.19765-8-jsnow@redhat.com
10
Signed-off-by: Max Reitz <mreitz@redhat.com>
11
---
14
---
12
block/backup.c | 62 +++++++++++++++++++++++++-------------------------
15
util/vfio-helpers.c | 72 +++++++++++++++++++++++++++++++++++++++++++--
13
1 file changed, 31 insertions(+), 31 deletions(-)
16
1 file changed, 70 insertions(+), 2 deletions(-)
14
17
15
diff --git a/block/backup.c b/block/backup.c
18
diff --git a/util/vfio-helpers.c b/util/vfio-helpers.c
16
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
17
--- a/block/backup.c
20
--- a/util/vfio-helpers.c
18
+++ b/block/backup.c
21
+++ b/util/vfio-helpers.c
19
@@ -XXX,XX +XXX,XX @@ static void backup_incremental_init_copy_bitmap(BackupBlockJob *job)
22
@@ -XXX,XX +XXX,XX @@ typedef struct {
20
bdrv_dirty_iter_free(dbi);
23
uint64_t iova;
24
} IOVAMapping;
25
26
+struct IOVARange {
27
+ uint64_t start;
28
+ uint64_t end;
29
+};
30
+
31
struct QEMUVFIOState {
32
QemuMutex lock;
33
34
@@ -XXX,XX +XXX,XX @@ struct QEMUVFIOState {
35
int device;
36
RAMBlockNotifier ram_notifier;
37
struct vfio_region_info config_region_info, bar_region_info[6];
38
+ struct IOVARange *usable_iova_ranges;
39
+ uint8_t nb_iova_ranges;
40
41
/* These fields are protected by @lock */
42
/* VFIO's IO virtual address space is managed by splitting into a few
43
@@ -XXX,XX +XXX,XX @@ static int qemu_vfio_pci_write_config(QEMUVFIOState *s, void *buf, int size, int
44
return ret == size ? 0 : -errno;
21
}
45
}
22
46
23
-static int coroutine_fn backup_run(Job *opaque_job, Error **errp)
47
+static void collect_usable_iova_ranges(QEMUVFIOState *s, void *buf)
24
+static int coroutine_fn backup_run(Job *job, Error **errp)
48
+{
49
+ struct vfio_iommu_type1_info *info = (struct vfio_iommu_type1_info *)buf;
50
+ struct vfio_info_cap_header *cap = (void *)buf + info->cap_offset;
51
+ struct vfio_iommu_type1_info_cap_iova_range *cap_iova_range;
52
+ int i;
53
+
54
+ while (cap->id != VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE) {
55
+ if (!cap->next) {
56
+ return;
57
+ }
58
+ cap = (struct vfio_info_cap_header *)(buf + cap->next);
59
+ }
60
+
61
+ cap_iova_range = (struct vfio_iommu_type1_info_cap_iova_range *)cap;
62
+
63
+ s->nb_iova_ranges = cap_iova_range->nr_iovas;
64
+ if (s->nb_iova_ranges > 1) {
65
+ s->usable_iova_ranges =
66
+ g_realloc(s->usable_iova_ranges,
67
+ s->nb_iova_ranges * sizeof(struct IOVARange));
68
+ }
69
+
70
+ for (i = 0; i < s->nb_iova_ranges; i++) {
71
+ s->usable_iova_ranges[i].start = cap_iova_range->iova_ranges[i].start;
72
+ s->usable_iova_ranges[i].end = cap_iova_range->iova_ranges[i].end;
73
+ }
74
+}
75
+
76
static int qemu_vfio_init_pci(QEMUVFIOState *s, const char *device,
77
Error **errp)
25
{
78
{
26
- BackupBlockJob *job = container_of(opaque_job, BackupBlockJob, common.job);
79
@@ -XXX,XX +XXX,XX @@ static int qemu_vfio_init_pci(QEMUVFIOState *s, const char *device,
27
- BlockDriverState *bs = blk_bs(job->common.blk);
80
int i;
28
+ BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
81
uint16_t pci_cmd;
29
+ BlockDriverState *bs = blk_bs(s->common.blk);
82
struct vfio_group_status group_status = { .argsz = sizeof(group_status) };
30
int64_t offset, nb_clusters;
83
- struct vfio_iommu_type1_info iommu_info = { .argsz = sizeof(iommu_info) };
31
int ret = 0;
84
+ struct vfio_iommu_type1_info *iommu_info = NULL;
32
85
+ size_t iommu_info_size = sizeof(*iommu_info);
33
- QLIST_INIT(&job->inflight_reqs);
86
struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
34
- qemu_co_rwlock_init(&job->flush_rwlock);
87
char *group_file = NULL;
35
+ QLIST_INIT(&s->inflight_reqs);
88
36
+ qemu_co_rwlock_init(&s->flush_rwlock);
89
+ s->usable_iova_ranges = NULL;
37
90
+
38
- nb_clusters = DIV_ROUND_UP(job->len, job->cluster_size);
91
/* Create a new container */
39
- job_progress_set_remaining(&job->common.job, job->len);
92
s->container = open("/dev/vfio/vfio", O_RDWR);
40
+ nb_clusters = DIV_ROUND_UP(s->len, s->cluster_size);
93
41
+ job_progress_set_remaining(job, s->len);
94
@@ -XXX,XX +XXX,XX @@ static int qemu_vfio_init_pci(QEMUVFIOState *s, const char *device,
42
95
goto fail;
43
- job->copy_bitmap = hbitmap_alloc(nb_clusters, 0);
44
- if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
45
- backup_incremental_init_copy_bitmap(job);
46
+ s->copy_bitmap = hbitmap_alloc(nb_clusters, 0);
47
+ if (s->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
48
+ backup_incremental_init_copy_bitmap(s);
49
} else {
50
- hbitmap_set(job->copy_bitmap, 0, nb_clusters);
51
+ hbitmap_set(s->copy_bitmap, 0, nb_clusters);
52
}
96
}
53
97
54
98
+ iommu_info = g_malloc0(iommu_info_size);
55
- job->before_write.notify = backup_before_write_notify;
99
+ iommu_info->argsz = iommu_info_size;
56
- bdrv_add_before_write_notifier(bs, &job->before_write);
100
+
57
+ s->before_write.notify = backup_before_write_notify;
101
/* Get additional IOMMU info */
58
+ bdrv_add_before_write_notifier(bs, &s->before_write);
102
- if (ioctl(s->container, VFIO_IOMMU_GET_INFO, &iommu_info)) {
59
103
+ if (ioctl(s->container, VFIO_IOMMU_GET_INFO, iommu_info)) {
60
- if (job->sync_mode == MIRROR_SYNC_MODE_NONE) {
104
error_setg_errno(errp, errno, "Failed to get IOMMU info");
61
+ if (s->sync_mode == MIRROR_SYNC_MODE_NONE) {
105
ret = -errno;
62
/* All bits are set in copy_bitmap to allow any cluster to be copied.
106
goto fail;
63
* This does not actually require them to be copied. */
64
- while (!job_is_cancelled(&job->common.job)) {
65
+ while (!job_is_cancelled(job)) {
66
/* Yield until the job is cancelled. We just let our before_write
67
* notify callback service CoW requests. */
68
- job_yield(&job->common.job);
69
+ job_yield(job);
70
}
71
- } else if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
72
- ret = backup_run_incremental(job);
73
+ } else if (s->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
74
+ ret = backup_run_incremental(s);
75
} else {
76
/* Both FULL and TOP SYNC_MODE's require copying.. */
77
- for (offset = 0; offset < job->len;
78
- offset += job->cluster_size) {
79
+ for (offset = 0; offset < s->len;
80
+ offset += s->cluster_size) {
81
bool error_is_read;
82
int alloced = 0;
83
84
- if (yield_and_check(job)) {
85
+ if (yield_and_check(s)) {
86
break;
87
}
88
89
- if (job->sync_mode == MIRROR_SYNC_MODE_TOP) {
90
+ if (s->sync_mode == MIRROR_SYNC_MODE_TOP) {
91
int i;
92
int64_t n;
93
94
/* Check to see if these blocks are already in the
95
* backing file. */
96
97
- for (i = 0; i < job->cluster_size;) {
98
+ for (i = 0; i < s->cluster_size;) {
99
/* bdrv_is_allocated() only returns true/false based
100
* on the first set of sectors it comes across that
101
* are are all in the same state.
102
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn backup_run(Job *opaque_job, Error **errp)
103
* needed but at some point that is always the case. */
104
alloced =
105
bdrv_is_allocated(bs, offset + i,
106
- job->cluster_size - i, &n);
107
+ s->cluster_size - i, &n);
108
i += n;
109
110
if (alloced || n == 0) {
111
@@ -XXX,XX +XXX,XX @@ static int coroutine_fn backup_run(Job *opaque_job, Error **errp)
112
if (alloced < 0) {
113
ret = alloced;
114
} else {
115
- ret = backup_do_cow(job, offset, job->cluster_size,
116
+ ret = backup_do_cow(s, offset, s->cluster_size,
117
&error_is_read, false);
118
}
119
if (ret < 0) {
120
/* Depending on error action, fail now or retry cluster */
121
BlockErrorAction action =
122
- backup_error_action(job, error_is_read, -ret);
123
+ backup_error_action(s, error_is_read, -ret);
124
if (action == BLOCK_ERROR_ACTION_REPORT) {
125
break;
126
} else {
127
- offset -= job->cluster_size;
128
+ offset -= s->cluster_size;
129
continue;
130
}
131
}
132
}
133
}
107
}
134
108
135
- notifier_with_return_remove(&job->before_write);
109
+ /*
136
+ notifier_with_return_remove(&s->before_write);
110
+ * if the kernel does not report usable IOVA regions, choose
137
111
+ * the legacy [QEMU_VFIO_IOVA_MIN, QEMU_VFIO_IOVA_MAX -1] region
138
/* wait until pending backup_do_cow() calls have completed */
112
+ */
139
- qemu_co_rwlock_wrlock(&job->flush_rwlock);
113
+ s->nb_iova_ranges = 1;
140
- qemu_co_rwlock_unlock(&job->flush_rwlock);
114
+ s->usable_iova_ranges = g_new0(struct IOVARange, 1);
141
- hbitmap_free(job->copy_bitmap);
115
+ s->usable_iova_ranges[0].start = QEMU_VFIO_IOVA_MIN;
142
+ qemu_co_rwlock_wrlock(&s->flush_rwlock);
116
+ s->usable_iova_ranges[0].end = QEMU_VFIO_IOVA_MAX - 1;
143
+ qemu_co_rwlock_unlock(&s->flush_rwlock);
117
+
144
+ hbitmap_free(s->copy_bitmap);
118
+ if (iommu_info->argsz > iommu_info_size) {
145
119
+ iommu_info_size = iommu_info->argsz;
146
return ret;
120
+ iommu_info = g_realloc(iommu_info, iommu_info_size);
147
}
121
+ if (ioctl(s->container, VFIO_IOMMU_GET_INFO, iommu_info)) {
122
+ ret = -errno;
123
+ goto fail;
124
+ }
125
+ collect_usable_iova_ranges(s, iommu_info);
126
+ }
127
+
128
s->device = ioctl(s->group, VFIO_GROUP_GET_DEVICE_FD, device);
129
130
if (s->device < 0) {
131
@@ -XXX,XX +XXX,XX @@ static int qemu_vfio_init_pci(QEMUVFIOState *s, const char *device,
132
if (ret) {
133
goto fail;
134
}
135
+ g_free(iommu_info);
136
return 0;
137
fail:
138
+ g_free(s->usable_iova_ranges);
139
+ s->usable_iova_ranges = NULL;
140
+ s->nb_iova_ranges = 0;
141
+ g_free(iommu_info);
142
close(s->group);
143
fail_container:
144
close(s->container);
145
@@ -XXX,XX +XXX,XX @@ void qemu_vfio_close(QEMUVFIOState *s)
146
qemu_vfio_undo_mapping(s, &s->mappings[i], NULL);
147
}
148
ram_block_notifier_remove(&s->ram_notifier);
149
+ g_free(s->usable_iova_ranges);
150
+ s->nb_iova_ranges = 0;
151
qemu_vfio_reset(s);
152
close(s->device);
153
close(s->group);
148
--
154
--
149
2.17.1
155
2.26.2
150
156
151
diff view generated by jsdifflib
1
From: John Snow <jsnow@redhat.com>
1
From: Eric Auger <eric.auger@redhat.com>
2
2
3
Change the manual deferment to mirror_exit into the implicit
3
Introduce the qemu_vfio_find_fixed/temp_iova helpers which
4
callback to job_exit and the mirror_exit callback.
4
respectively allocate IOVAs from the bottom/top parts of the
5
usable IOVA range, without picking within host IOVA reserved
6
windows. The allocation remains basic: if the size is too big
7
for the remaining of the current usable IOVA range, we jump
8
to the next one, leaving a hole in the address map.
5
9
6
This does change the order of some bdrv_unref calls and job_completed,
10
Signed-off-by: Eric Auger <eric.auger@redhat.com>
7
but thanks to the new context in which we call .exit, this is safe to
11
Message-id: 20200929085550.30926-3-eric.auger@redhat.com
8
defer the possible flushing of any nodes to the job_finalize_single
12
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
9
cleanup stage.
13
---
14
util/vfio-helpers.c | 57 +++++++++++++++++++++++++++++++++++++++++----
15
1 file changed, 53 insertions(+), 4 deletions(-)
10
16
11
Signed-off-by: John Snow <jsnow@redhat.com>
17
diff --git a/util/vfio-helpers.c b/util/vfio-helpers.c
12
Message-id: 20180830015734.19765-6-jsnow@redhat.com
13
Reviewed-by: Max Reitz <mreitz@redhat.com>
14
Reviewed-by: Jeff Cody <jcody@redhat.com>
15
Signed-off-by: Max Reitz <mreitz@redhat.com>
16
---
17
block/mirror.c | 29 +++++++++++------------------
18
1 file changed, 11 insertions(+), 18 deletions(-)
19
20
diff --git a/block/mirror.c b/block/mirror.c
21
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
22
--- a/block/mirror.c
19
--- a/util/vfio-helpers.c
23
+++ b/block/mirror.c
20
+++ b/util/vfio-helpers.c
24
@@ -XXX,XX +XXX,XX @@ static void mirror_wait_for_all_io(MirrorBlockJob *s)
21
@@ -XXX,XX +XXX,XX @@ static bool qemu_vfio_verify_mappings(QEMUVFIOState *s)
25
}
22
return true;
26
}
23
}
27
24
28
-typedef struct {
25
+static int
29
- int ret;
26
+qemu_vfio_find_fixed_iova(QEMUVFIOState *s, size_t size, uint64_t *iova)
30
-} MirrorExitData;
27
+{
31
-
28
+ int i;
32
-static void mirror_exit(Job *job, void *opaque)
29
+
33
+static void mirror_exit(Job *job)
30
+ for (i = 0; i < s->nb_iova_ranges; i++) {
34
{
31
+ if (s->usable_iova_ranges[i].end < s->low_water_mark) {
35
MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
32
+ continue;
36
BlockJob *bjob = &s->common;
33
+ }
37
- MirrorExitData *data = opaque;
34
+ s->low_water_mark =
38
MirrorBDSOpaque *bs_opaque = s->mirror_top_bs->opaque;
35
+ MAX(s->low_water_mark, s->usable_iova_ranges[i].start);
39
AioContext *replace_aio_context = NULL;
36
+
40
BlockDriverState *src = s->mirror_top_bs->backing->bs;
37
+ if (s->usable_iova_ranges[i].end - s->low_water_mark + 1 >= size ||
41
BlockDriverState *target_bs = blk_bs(s->target);
38
+ s->usable_iova_ranges[i].end - s->low_water_mark + 1 == 0) {
42
BlockDriverState *mirror_top_bs = s->mirror_top_bs;
39
+ *iova = s->low_water_mark;
43
Error *local_err = NULL;
40
+ s->low_water_mark += size;
44
+ int ret = job->ret;
41
+ return 0;
45
42
+ }
46
bdrv_release_dirty_bitmap(src, s->dirty_bitmap);
43
+ }
47
44
+ return -ENOMEM;
48
- /* Make sure that the source BDS doesn't go away before we called
45
+}
49
- * job_completed(). */
46
+
50
+ /* Make sure that the source BDS doesn't go away during bdrv_replace_node,
47
+static int
51
+ * before we can call bdrv_drained_end */
48
+qemu_vfio_find_temp_iova(QEMUVFIOState *s, size_t size, uint64_t *iova)
52
bdrv_ref(src);
49
+{
53
bdrv_ref(mirror_top_bs);
50
+ int i;
54
bdrv_ref(target_bs);
51
+
55
@@ -XXX,XX +XXX,XX @@ static void mirror_exit(Job *job, void *opaque)
52
+ for (i = s->nb_iova_ranges - 1; i >= 0; i--) {
56
bdrv_set_backing_hd(target_bs, backing, &local_err);
53
+ if (s->usable_iova_ranges[i].start > s->high_water_mark) {
57
if (local_err) {
54
+ continue;
58
error_report_err(local_err);
55
+ }
59
- data->ret = -EPERM;
56
+ s->high_water_mark =
60
+ ret = -EPERM;
57
+ MIN(s->high_water_mark, s->usable_iova_ranges[i].end + 1);
58
+
59
+ if (s->high_water_mark - s->usable_iova_ranges[i].start + 1 >= size ||
60
+ s->high_water_mark - s->usable_iova_ranges[i].start + 1 == 0) {
61
+ *iova = s->high_water_mark - size;
62
+ s->high_water_mark = *iova;
63
+ return 0;
64
+ }
65
+ }
66
+ return -ENOMEM;
67
+}
68
+
69
/* Map [host, host + size) area into a contiguous IOVA address space, and store
70
* the result in @iova if not NULL. The caller need to make sure the area is
71
* aligned to page size, and mustn't overlap with existing mapping areas (split
72
@@ -XXX,XX +XXX,XX @@ int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,
73
goto out;
74
}
75
if (!temporary) {
76
- iova0 = s->low_water_mark;
77
+ if (qemu_vfio_find_fixed_iova(s, size, &iova0)) {
78
+ ret = -ENOMEM;
79
+ goto out;
80
+ }
81
+
82
mapping = qemu_vfio_add_mapping(s, host, size, index + 1, iova0);
83
if (!mapping) {
84
ret = -ENOMEM;
85
@@ -XXX,XX +XXX,XX @@ int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,
86
qemu_vfio_undo_mapping(s, mapping, NULL);
87
goto out;
61
}
88
}
89
- s->low_water_mark += size;
90
qemu_vfio_dump_mappings(s);
91
} else {
92
- iova0 = s->high_water_mark - size;
93
+ if (qemu_vfio_find_temp_iova(s, size, &iova0)) {
94
+ ret = -ENOMEM;
95
+ goto out;
96
+ }
97
ret = qemu_vfio_do_mapping(s, host, size, iova0);
98
if (ret) {
99
goto out;
100
}
101
- s->high_water_mark -= size;
62
}
102
}
63
}
103
}
64
@@ -XXX,XX +XXX,XX @@ static void mirror_exit(Job *job, void *opaque)
104
if (iova) {
65
aio_context_acquire(replace_aio_context);
66
}
67
68
- if (s->should_complete && data->ret == 0) {
69
+ if (s->should_complete && ret == 0) {
70
BlockDriverState *to_replace = src;
71
if (s->to_replace) {
72
to_replace = s->to_replace;
73
@@ -XXX,XX +XXX,XX @@ static void mirror_exit(Job *job, void *opaque)
74
bdrv_drained_end(target_bs);
75
if (local_err) {
76
error_report_err(local_err);
77
- data->ret = -EPERM;
78
+ ret = -EPERM;
79
}
80
}
81
if (s->to_replace) {
82
@@ -XXX,XX +XXX,XX @@ static void mirror_exit(Job *job, void *opaque)
83
blk_insert_bs(bjob->blk, mirror_top_bs, &error_abort);
84
85
bs_opaque->job = NULL;
86
- job_completed(job, data->ret);
87
88
- g_free(data);
89
bdrv_drained_end(src);
90
bdrv_unref(mirror_top_bs);
91
bdrv_unref(src);
92
+
93
+ job->ret = ret;
94
}
95
96
static void mirror_throttle(MirrorBlockJob *s)
97
@@ -XXX,XX +XXX,XX @@ static int mirror_flush(MirrorBlockJob *s)
98
static int coroutine_fn mirror_run(Job *job, Error **errp)
99
{
100
MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
101
- MirrorExitData *data;
102
BlockDriverState *bs = s->mirror_top_bs->backing->bs;
103
BlockDriverState *target_bs = blk_bs(s->target);
104
bool need_drain = true;
105
@@ -XXX,XX +XXX,XX @@ immediate_exit:
106
g_free(s->in_flight_bitmap);
107
bdrv_dirty_iter_free(s->dbi);
108
109
- data = g_malloc(sizeof(*data));
110
- data->ret = ret;
111
-
112
if (need_drain) {
113
bdrv_drained_begin(bs);
114
}
115
116
- job_defer_to_main_loop(&s->common.job, mirror_exit, data);
117
return ret;
118
}
119
120
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver mirror_job_driver = {
121
.user_resume = block_job_user_resume,
122
.drain = block_job_drain,
123
.run = mirror_run,
124
+ .exit = mirror_exit,
125
.pause = mirror_pause,
126
.complete = mirror_complete,
127
},
128
@@ -XXX,XX +XXX,XX @@ static const BlockJobDriver commit_active_job_driver = {
129
.user_resume = block_job_user_resume,
130
.drain = block_job_drain,
131
.run = mirror_run,
132
+ .exit = mirror_exit,
133
.pause = mirror_pause,
134
.complete = mirror_complete,
135
},
136
--
105
--
137
2.17.1
106
2.26.2
138
107
139
diff view generated by jsdifflib