1 | The following changes since commit 91f5f7a5df1fda8c34677a7c49ee8a4bb5b56a36: | 1 | The following changes since commit 05d50ba2d4668d43a835c5a502efdec9b92646e6: |
---|---|---|---|
2 | 2 | ||
3 | Merge remote-tracking branch 'remotes/lvivier-gitlab/tags/linux-user-for-7.0-pull-request' into staging (2022-01-12 11:51:47 +0000) | 3 | Merge tag 'migration-20230427-pull-request' of https://gitlab.com/juan.quintela/qemu into staging (2023-04-28 08:35:06 +0100) |
4 | 4 | ||
5 | are available in the Git repository at: | 5 | are available in the Git repository at: |
6 | 6 | ||
7 | https://gitlab.com/stefanha/qemu.git tags/block-pull-request | 7 | https://gitlab.com/stefanha/qemu.git tags/block-pull-request |
8 | 8 | ||
9 | for you to fetch changes up to db608fb78444c58896db69495729e4458eeaace1: | 9 | for you to fetch changes up to d3c760be786571d83d5cea01953e543df4d76f51: |
10 | 10 | ||
11 | virtio: unify dataplane and non-dataplane ->handle_output() (2022-01-12 17:09:39 +0000) | 11 | docs/zoned-storage:add zoned emulation use case (2023-04-28 08:34:07 -0400) |
12 | 12 | ||
13 | ---------------------------------------------------------------- | 13 | ---------------------------------------------------------------- |
14 | Pull request | 14 | Pull request |
15 | 15 | ||
16 | This pull request contains Sam Li's virtio-blk zoned storage work. These | ||
17 | patches were dropped from my previous block pull request due to CI failures. | ||
18 | |||
16 | ---------------------------------------------------------------- | 19 | ---------------------------------------------------------------- |
17 | 20 | ||
18 | Stefan Hajnoczi (6): | 21 | Sam Li (17): |
19 | aio-posix: split poll check from ready handler | 22 | block/block-common: add zoned device structs |
20 | virtio: get rid of VirtIOHandleAIOOutput | 23 | block/file-posix: introduce helper functions for sysfs attributes |
21 | virtio-blk: drop unused virtio_blk_handle_vq() return value | 24 | block/block-backend: add block layer APIs resembling Linux |
22 | virtio-scsi: prepare virtio_scsi_handle_cmd for dataplane | 25 | ZonedBlockDevice ioctls |
23 | virtio: use ->handle_output() instead of ->handle_aio_output() | 26 | block/raw-format: add zone operations to pass through requests |
24 | virtio: unify dataplane and non-dataplane ->handle_output() | 27 | block: add zoned BlockDriver check to block layer |
28 | iotests: test new zone operations | ||
29 | block: add some trace events for new block layer APIs | ||
30 | docs/zoned-storage: add zoned device documentation | ||
31 | file-posix: add tracking of the zone write pointers | ||
32 | block: introduce zone append write for zoned devices | ||
33 | qemu-iotests: test zone append operation | ||
34 | block: add some trace events for zone append | ||
35 | include: update virtio_blk headers to v6.3-rc1 | ||
36 | virtio-blk: add zoned storage emulation for zoned devices | ||
37 | block: add accounting for zone append operation | ||
38 | virtio-blk: add some trace events for zoned emulation | ||
39 | docs/zoned-storage:add zoned emulation use case | ||
25 | 40 | ||
26 | include/block/aio.h | 4 +- | 41 | docs/devel/index-api.rst | 1 + |
27 | include/hw/virtio/virtio-blk.h | 2 +- | 42 | docs/devel/zoned-storage.rst | 62 ++ |
28 | include/hw/virtio/virtio.h | 5 +- | 43 | qapi/block-core.json | 68 +- |
29 | util/aio-posix.h | 1 + | 44 | qapi/block.json | 4 + |
30 | block/curl.c | 11 ++-- | 45 | meson.build | 4 + |
31 | block/export/fuse.c | 4 +- | 46 | include/block/accounting.h | 1 + |
32 | block/io_uring.c | 19 ++++--- | 47 | include/block/block-common.h | 57 ++ |
33 | block/iscsi.c | 4 +- | 48 | include/block/block-io.h | 13 + |
34 | block/linux-aio.c | 16 +++--- | 49 | include/block/block_int-common.h | 37 + |
35 | block/nfs.c | 6 +-- | 50 | include/block/raw-aio.h | 8 +- |
36 | block/nvme.c | 51 ++++++++++++------- | 51 | include/standard-headers/drm/drm_fourcc.h | 12 + |
37 | block/ssh.c | 4 +- | 52 | include/standard-headers/linux/ethtool.h | 48 +- |
38 | block/win32-aio.c | 4 +- | 53 | include/standard-headers/linux/fuse.h | 45 +- |
39 | hw/block/dataplane/virtio-blk.c | 16 +----- | 54 | include/standard-headers/linux/pci_regs.h | 1 + |
40 | hw/block/virtio-blk.c | 14 ++---- | 55 | include/standard-headers/linux/vhost_types.h | 2 + |
41 | hw/scsi/virtio-scsi-dataplane.c | 60 +++------------------- | 56 | include/standard-headers/linux/virtio_blk.h | 105 +++ |
42 | hw/scsi/virtio-scsi.c | 2 +- | 57 | include/sysemu/block-backend-io.h | 27 + |
43 | hw/virtio/virtio.c | 73 +++++++++------------------ | 58 | linux-headers/asm-arm64/kvm.h | 1 + |
44 | hw/xen/xen-bus.c | 6 +-- | 59 | linux-headers/asm-x86/kvm.h | 34 +- |
45 | io/channel-command.c | 6 ++- | 60 | linux-headers/linux/kvm.h | 9 + |
46 | io/channel-file.c | 3 +- | 61 | linux-headers/linux/vfio.h | 15 +- |
47 | io/channel-socket.c | 3 +- | 62 | linux-headers/linux/vhost.h | 8 + |
48 | migration/rdma.c | 8 +-- | 63 | block.c | 19 + |
49 | tests/unit/test-aio.c | 4 +- | 64 | block/block-backend.c | 198 ++++++ |
50 | tests/unit/test-fdmon-epoll.c | 4 +- | 65 | block/file-posix.c | 696 +++++++++++++++++-- |
51 | util/aio-posix.c | 89 +++++++++++++++++++++++++-------- | 66 | block/io.c | 68 ++ |
52 | util/aio-win32.c | 4 +- | 67 | block/io_uring.c | 4 + |
53 | util/async.c | 10 +++- | 68 | block/linux-aio.c | 3 + |
54 | util/main-loop.c | 4 +- | 69 | block/qapi-sysemu.c | 11 + |
55 | util/qemu-coroutine-io.c | 5 +- | 70 | block/qapi.c | 18 + |
56 | util/vhost-user-server.c | 11 ++-- | 71 | block/raw-format.c | 26 + |
57 | 31 files changed, 221 insertions(+), 232 deletions(-) | 72 | hw/block/virtio-blk-common.c | 2 + |
73 | hw/block/virtio-blk.c | 405 +++++++++++ | ||
74 | hw/virtio/virtio-qmp.c | 2 + | ||
75 | qemu-io-cmds.c | 224 ++++++ | ||
76 | block/trace-events | 4 + | ||
77 | docs/system/qemu-block-drivers.rst.inc | 6 + | ||
78 | hw/block/trace-events | 7 + | ||
79 | tests/qemu-iotests/tests/zoned | 105 +++ | ||
80 | tests/qemu-iotests/tests/zoned.out | 69 ++ | ||
81 | 40 files changed, 2361 insertions(+), 68 deletions(-) | ||
82 | create mode 100644 docs/devel/zoned-storage.rst | ||
83 | create mode 100755 tests/qemu-iotests/tests/zoned | ||
84 | create mode 100644 tests/qemu-iotests/tests/zoned.out | ||
58 | 85 | ||
59 | -- | 86 | -- |
60 | 2.34.1 | 87 | 2.40.0 |
61 | |||
62 | |||
63 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Sam Li <faithilikerun@gmail.com> | ||
1 | 2 | ||
3 | Signed-off-by: Sam Li <faithilikerun@gmail.com> | ||
4 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
5 | Reviewed-by: Damien Le Moal <damien.lemoal@opensource.wdc.com> | ||
6 | Reviewed-by: Hannes Reinecke <hare@suse.de> | ||
7 | Reviewed-by: Dmitry Fomichev <dmitry.fomichev@wdc.com> | ||
8 | Acked-by: Kevin Wolf <kwolf@redhat.com> | ||
9 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
10 | Message-id: 20230427172019.3345-2-faithilikerun@gmail.com | ||
11 | Message-id: 20230324090605.28361-2-faithilikerun@gmail.com | ||
12 | [Adjust commit message prefix as suggested by Philippe Mathieu-Daudé | ||
13 | <philmd@linaro.org>. | ||
14 | --Stefan] | ||
15 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
16 | --- | ||
17 | include/block/block-common.h | 43 ++++++++++++++++++++++++++++++++++++ | ||
18 | 1 file changed, 43 insertions(+) | ||
19 | |||
20 | diff --git a/include/block/block-common.h b/include/block/block-common.h | ||
21 | index XXXXXXX..XXXXXXX 100644 | ||
22 | --- a/include/block/block-common.h | ||
23 | +++ b/include/block/block-common.h | ||
24 | @@ -XXX,XX +XXX,XX @@ typedef struct BlockDriver BlockDriver; | ||
25 | typedef struct BdrvChild BdrvChild; | ||
26 | typedef struct BdrvChildClass BdrvChildClass; | ||
27 | |||
28 | +typedef enum BlockZoneOp { | ||
29 | + BLK_ZO_OPEN, | ||
30 | + BLK_ZO_CLOSE, | ||
31 | + BLK_ZO_FINISH, | ||
32 | + BLK_ZO_RESET, | ||
33 | +} BlockZoneOp; | ||
34 | + | ||
35 | +typedef enum BlockZoneModel { | ||
36 | + BLK_Z_NONE = 0x0, /* Regular block device */ | ||
37 | + BLK_Z_HM = 0x1, /* Host-managed zoned block device */ | ||
38 | + BLK_Z_HA = 0x2, /* Host-aware zoned block device */ | ||
39 | +} BlockZoneModel; | ||
40 | + | ||
41 | +typedef enum BlockZoneState { | ||
42 | + BLK_ZS_NOT_WP = 0x0, | ||
43 | + BLK_ZS_EMPTY = 0x1, | ||
44 | + BLK_ZS_IOPEN = 0x2, | ||
45 | + BLK_ZS_EOPEN = 0x3, | ||
46 | + BLK_ZS_CLOSED = 0x4, | ||
47 | + BLK_ZS_RDONLY = 0xD, | ||
48 | + BLK_ZS_FULL = 0xE, | ||
49 | + BLK_ZS_OFFLINE = 0xF, | ||
50 | +} BlockZoneState; | ||
51 | + | ||
52 | +typedef enum BlockZoneType { | ||
53 | + BLK_ZT_CONV = 0x1, /* Conventional random writes supported */ | ||
54 | + BLK_ZT_SWR = 0x2, /* Sequential writes required */ | ||
55 | + BLK_ZT_SWP = 0x3, /* Sequential writes preferred */ | ||
56 | +} BlockZoneType; | ||
57 | + | ||
58 | +/* | ||
59 | + * Zone descriptor data structure. | ||
60 | + * Provides information on a zone with all position and size values in bytes. | ||
61 | + */ | ||
62 | +typedef struct BlockZoneDescriptor { | ||
63 | + uint64_t start; | ||
64 | + uint64_t length; | ||
65 | + uint64_t cap; | ||
66 | + uint64_t wp; | ||
67 | + BlockZoneType type; | ||
68 | + BlockZoneState state; | ||
69 | +} BlockZoneDescriptor; | ||
70 | + | ||
71 | typedef struct BlockDriverInfo { | ||
72 | /* in bytes, 0 if irrelevant */ | ||
73 | int cluster_size; | ||
74 | -- | ||
75 | 2.40.0 | ||
76 | |||
77 | diff view generated by jsdifflib |
1 | The virtqueue host notifier API | 1 | From: Sam Li <faithilikerun@gmail.com> |
---|---|---|---|
2 | virtio_queue_aio_set_host_notifier_handler() polls the virtqueue for new | 2 | |
3 | buffers. AioContext previously required a bool progress return value | 3 | Use get_sysfs_str_val() to get the string value of device |
4 | indicating whether an event was handled or not. This is no longer | 4 | zoned model. Then get_sysfs_zoned_model() can convert it to |
5 | necessary because the AioContext polling API has been split into a poll | 5 | BlockZoneModel type of QEMU. |
6 | check function and an event handler function. The event handler is only | 6 | |
7 | run when we know there is work to do, so it doesn't return bool. | 7 | Use get_sysfs_long_val() to get the long value of zoned device |
8 | 8 | information. | |
9 | The VirtIOHandleAIOOutput function signature is now the same as | 9 | |
10 | VirtIOHandleOutput. Get rid of the bool return value. | 10 | Signed-off-by: Sam Li <faithilikerun@gmail.com> |
11 | 11 | Reviewed-by: Hannes Reinecke <hare@suse.de> | |
12 | Further simplifications will be made for virtio-blk and virtio-scsi in | 12 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> |
13 | the next patch. | 13 | Reviewed-by: Damien Le Moal <damien.lemoal@opensource.wdc.com> |
14 | 14 | Reviewed-by: Dmitry Fomichev <dmitry.fomichev@wdc.com> | |
15 | Acked-by: Kevin Wolf <kwolf@redhat.com> | ||
15 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 16 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
16 | Reviewed-by: Stefano Garzarella <sgarzare@redhat.com> | 17 | Message-id: 20230427172019.3345-3-faithilikerun@gmail.com |
17 | Message-id: 20211207132336.36627-3-stefanha@redhat.com | 18 | Message-id: 20230324090605.28361-3-faithilikerun@gmail.com |
19 | [Adjust commit message prefix as suggested by Philippe Mathieu-Daudé | ||
20 | <philmd@linaro.org>. | ||
21 | --Stefan] | ||
18 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 22 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
19 | --- | 23 | --- |
20 | include/hw/virtio/virtio.h | 3 +-- | 24 | include/block/block_int-common.h | 3 + |
21 | hw/block/dataplane/virtio-blk.c | 4 ++-- | 25 | block/file-posix.c | 139 ++++++++++++++++++++++--------- |
22 | hw/scsi/virtio-scsi-dataplane.c | 18 ++++++------------ | 26 | 2 files changed, 104 insertions(+), 38 deletions(-) |
23 | hw/virtio/virtio.c | 12 ++++-------- | 27 | |
24 | 4 files changed, 13 insertions(+), 24 deletions(-) | 28 | diff --git a/include/block/block_int-common.h b/include/block/block_int-common.h |
25 | |||
26 | diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h | ||
27 | index XXXXXXX..XXXXXXX 100644 | 29 | index XXXXXXX..XXXXXXX 100644 |
28 | --- a/include/hw/virtio/virtio.h | 30 | --- a/include/block/block_int-common.h |
29 | +++ b/include/hw/virtio/virtio.h | 31 | +++ b/include/block/block_int-common.h |
30 | @@ -XXX,XX +XXX,XX @@ void virtio_error(VirtIODevice *vdev, const char *fmt, ...) GCC_FMT_ATTR(2, 3); | 32 | @@ -XXX,XX +XXX,XX @@ typedef struct BlockLimits { |
31 | void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name); | 33 | * an explicit monitor command to load the disk inside the guest). |
32 | 34 | */ | |
33 | typedef void (*VirtIOHandleOutput)(VirtIODevice *, VirtQueue *); | 35 | bool has_variable_length; |
34 | -typedef bool (*VirtIOHandleAIOOutput)(VirtIODevice *, VirtQueue *); | 36 | + |
35 | 37 | + /* device zone model */ | |
36 | VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size, | 38 | + BlockZoneModel zoned; |
37 | VirtIOHandleOutput handle_output); | 39 | } BlockLimits; |
38 | @@ -XXX,XX +XXX,XX @@ EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq); | 40 | |
39 | void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled); | 41 | typedef struct BdrvOpBlocker BdrvOpBlocker; |
40 | void virtio_queue_host_notifier_read(EventNotifier *n); | 42 | diff --git a/block/file-posix.c b/block/file-posix.c |
41 | void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx, | ||
42 | - VirtIOHandleAIOOutput handle_output); | ||
43 | + VirtIOHandleOutput handle_output); | ||
44 | VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector); | ||
45 | VirtQueue *virtio_vector_next_queue(VirtQueue *vq); | ||
46 | |||
47 | diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c | ||
48 | index XXXXXXX..XXXXXXX 100644 | 43 | index XXXXXXX..XXXXXXX 100644 |
49 | --- a/hw/block/dataplane/virtio-blk.c | 44 | --- a/block/file-posix.c |
50 | +++ b/hw/block/dataplane/virtio-blk.c | 45 | +++ b/block/file-posix.c |
51 | @@ -XXX,XX +XXX,XX @@ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s) | 46 | @@ -XXX,XX +XXX,XX @@ static int hdev_get_max_hw_transfer(int fd, struct stat *st) |
52 | g_free(s); | 47 | #endif |
53 | } | 48 | } |
54 | 49 | ||
55 | -static bool virtio_blk_data_plane_handle_output(VirtIODevice *vdev, | 50 | -static int hdev_get_max_segments(int fd, struct stat *st) |
56 | +static void virtio_blk_data_plane_handle_output(VirtIODevice *vdev, | 51 | +/* |
57 | VirtQueue *vq) | 52 | + * Get a sysfs attribute value as character string. |
53 | + */ | ||
54 | +static int get_sysfs_str_val(struct stat *st, const char *attribute, | ||
55 | + char **val) { | ||
56 | +#ifdef CONFIG_LINUX | ||
57 | + g_autofree char *sysfspath = NULL; | ||
58 | + int ret; | ||
59 | + size_t len; | ||
60 | + | ||
61 | + if (!S_ISBLK(st->st_mode)) { | ||
62 | + return -ENOTSUP; | ||
63 | + } | ||
64 | + | ||
65 | + sysfspath = g_strdup_printf("/sys/dev/block/%u:%u/queue/%s", | ||
66 | + major(st->st_rdev), minor(st->st_rdev), | ||
67 | + attribute); | ||
68 | + ret = g_file_get_contents(sysfspath, val, &len, NULL); | ||
69 | + if (ret == -1) { | ||
70 | + return -ENOENT; | ||
71 | + } | ||
72 | + | ||
73 | + /* The file is ended with '\n' */ | ||
74 | + char *p; | ||
75 | + p = *val; | ||
76 | + if (*(p + len - 1) == '\n') { | ||
77 | + *(p + len - 1) = '\0'; | ||
78 | + } | ||
79 | + return ret; | ||
80 | +#else | ||
81 | + return -ENOTSUP; | ||
82 | +#endif | ||
83 | +} | ||
84 | + | ||
85 | +static int get_sysfs_zoned_model(struct stat *st, BlockZoneModel *zoned) | ||
86 | +{ | ||
87 | + g_autofree char *val = NULL; | ||
88 | + int ret; | ||
89 | + | ||
90 | + ret = get_sysfs_str_val(st, "zoned", &val); | ||
91 | + if (ret < 0) { | ||
92 | + return ret; | ||
93 | + } | ||
94 | + | ||
95 | + if (strcmp(val, "host-managed") == 0) { | ||
96 | + *zoned = BLK_Z_HM; | ||
97 | + } else if (strcmp(val, "host-aware") == 0) { | ||
98 | + *zoned = BLK_Z_HA; | ||
99 | + } else if (strcmp(val, "none") == 0) { | ||
100 | + *zoned = BLK_Z_NONE; | ||
101 | + } else { | ||
102 | + return -ENOTSUP; | ||
103 | + } | ||
104 | + return 0; | ||
105 | +} | ||
106 | + | ||
107 | +/* | ||
108 | + * Get a sysfs attribute value as a long integer. | ||
109 | + */ | ||
110 | +static long get_sysfs_long_val(struct stat *st, const char *attribute) | ||
58 | { | 111 | { |
59 | VirtIOBlock *s = (VirtIOBlock *)vdev; | 112 | #ifdef CONFIG_LINUX |
60 | @@ -XXX,XX +XXX,XX @@ static bool virtio_blk_data_plane_handle_output(VirtIODevice *vdev, | 113 | - char buf[32]; |
61 | assert(s->dataplane); | 114 | + g_autofree char *str = NULL; |
62 | assert(s->dataplane_started); | 115 | const char *end; |
63 | 116 | - char *sysfspath = NULL; | |
64 | - return virtio_blk_handle_vq(s, vq); | 117 | + long val; |
65 | + virtio_blk_handle_vq(s, vq); | 118 | + int ret; |
119 | + | ||
120 | + ret = get_sysfs_str_val(st, attribute, &str); | ||
121 | + if (ret < 0) { | ||
122 | + return ret; | ||
123 | + } | ||
124 | + | ||
125 | + /* The file is ended with '\n', pass 'end' to accept that. */ | ||
126 | + ret = qemu_strtol(str, &end, 10, &val); | ||
127 | + if (ret == 0 && end && *end == '\0') { | ||
128 | + ret = val; | ||
129 | + } | ||
130 | + return ret; | ||
131 | +#else | ||
132 | + return -ENOTSUP; | ||
133 | +#endif | ||
134 | +} | ||
135 | + | ||
136 | +static int hdev_get_max_segments(int fd, struct stat *st) | ||
137 | +{ | ||
138 | +#ifdef CONFIG_LINUX | ||
139 | int ret; | ||
140 | - int sysfd = -1; | ||
141 | - long max_segments; | ||
142 | |||
143 | if (S_ISCHR(st->st_mode)) { | ||
144 | if (ioctl(fd, SG_GET_SG_TABLESIZE, &ret) == 0) { | ||
145 | @@ -XXX,XX +XXX,XX @@ static int hdev_get_max_segments(int fd, struct stat *st) | ||
146 | } | ||
147 | return -ENOTSUP; | ||
148 | } | ||
149 | - | ||
150 | - if (!S_ISBLK(st->st_mode)) { | ||
151 | - return -ENOTSUP; | ||
152 | - } | ||
153 | - | ||
154 | - sysfspath = g_strdup_printf("/sys/dev/block/%u:%u/queue/max_segments", | ||
155 | - major(st->st_rdev), minor(st->st_rdev)); | ||
156 | - sysfd = open(sysfspath, O_RDONLY); | ||
157 | - if (sysfd == -1) { | ||
158 | - ret = -errno; | ||
159 | - goto out; | ||
160 | - } | ||
161 | - ret = RETRY_ON_EINTR(read(sysfd, buf, sizeof(buf) - 1)); | ||
162 | - if (ret < 0) { | ||
163 | - ret = -errno; | ||
164 | - goto out; | ||
165 | - } else if (ret == 0) { | ||
166 | - ret = -EIO; | ||
167 | - goto out; | ||
168 | - } | ||
169 | - buf[ret] = 0; | ||
170 | - /* The file is ended with '\n', pass 'end' to accept that. */ | ||
171 | - ret = qemu_strtol(buf, &end, 10, &max_segments); | ||
172 | - if (ret == 0 && end && *end == '\n') { | ||
173 | - ret = max_segments; | ||
174 | - } | ||
175 | - | ||
176 | -out: | ||
177 | - if (sysfd != -1) { | ||
178 | - close(sysfd); | ||
179 | - } | ||
180 | - g_free(sysfspath); | ||
181 | - return ret; | ||
182 | + return get_sysfs_long_val(st, "max_segments"); | ||
183 | #else | ||
184 | return -ENOTSUP; | ||
185 | #endif | ||
66 | } | 186 | } |
67 | 187 | ||
68 | /* Context: QEMU global mutex held */ | 188 | +static void raw_refresh_zoned_limits(BlockDriverState *bs, struct stat *st, |
69 | diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c | 189 | + Error **errp) |
70 | index XXXXXXX..XXXXXXX 100644 | 190 | +{ |
71 | --- a/hw/scsi/virtio-scsi-dataplane.c | 191 | + BlockZoneModel zoned; |
72 | +++ b/hw/scsi/virtio-scsi-dataplane.c | 192 | + int ret; |
73 | @@ -XXX,XX +XXX,XX @@ void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp) | 193 | + |
74 | } | 194 | + bs->bl.zoned = BLK_Z_NONE; |
75 | } | 195 | + |
76 | 196 | + ret = get_sysfs_zoned_model(st, &zoned); | |
77 | -static bool virtio_scsi_data_plane_handle_cmd(VirtIODevice *vdev, | 197 | + if (ret < 0 || zoned == BLK_Z_NONE) { |
78 | +static void virtio_scsi_data_plane_handle_cmd(VirtIODevice *vdev, | 198 | + return; |
79 | VirtQueue *vq) | 199 | + } |
200 | + bs->bl.zoned = zoned; | ||
201 | +} | ||
202 | + | ||
203 | static void raw_refresh_limits(BlockDriverState *bs, Error **errp) | ||
80 | { | 204 | { |
81 | - bool progress = false; | 205 | BDRVRawState *s = bs->opaque; |
82 | VirtIOSCSI *s = VIRTIO_SCSI(vdev); | 206 | @@ -XXX,XX +XXX,XX @@ static void raw_refresh_limits(BlockDriverState *bs, Error **errp) |
83 | 207 | bs->bl.max_hw_iov = ret; | |
84 | virtio_scsi_acquire(s); | ||
85 | if (!s->dataplane_fenced) { | ||
86 | assert(s->ctx && s->dataplane_started); | ||
87 | - progress = virtio_scsi_handle_cmd_vq(s, vq); | ||
88 | + virtio_scsi_handle_cmd_vq(s, vq); | ||
89 | } | ||
90 | virtio_scsi_release(s); | ||
91 | - return progress; | ||
92 | } | ||
93 | |||
94 | -static bool virtio_scsi_data_plane_handle_ctrl(VirtIODevice *vdev, | ||
95 | +static void virtio_scsi_data_plane_handle_ctrl(VirtIODevice *vdev, | ||
96 | VirtQueue *vq) | ||
97 | { | ||
98 | - bool progress = false; | ||
99 | VirtIOSCSI *s = VIRTIO_SCSI(vdev); | ||
100 | |||
101 | virtio_scsi_acquire(s); | ||
102 | if (!s->dataplane_fenced) { | ||
103 | assert(s->ctx && s->dataplane_started); | ||
104 | - progress = virtio_scsi_handle_ctrl_vq(s, vq); | ||
105 | + virtio_scsi_handle_ctrl_vq(s, vq); | ||
106 | } | ||
107 | virtio_scsi_release(s); | ||
108 | - return progress; | ||
109 | } | ||
110 | |||
111 | -static bool virtio_scsi_data_plane_handle_event(VirtIODevice *vdev, | ||
112 | +static void virtio_scsi_data_plane_handle_event(VirtIODevice *vdev, | ||
113 | VirtQueue *vq) | ||
114 | { | ||
115 | - bool progress = false; | ||
116 | VirtIOSCSI *s = VIRTIO_SCSI(vdev); | ||
117 | |||
118 | virtio_scsi_acquire(s); | ||
119 | if (!s->dataplane_fenced) { | ||
120 | assert(s->ctx && s->dataplane_started); | ||
121 | - progress = virtio_scsi_handle_event_vq(s, vq); | ||
122 | + virtio_scsi_handle_event_vq(s, vq); | ||
123 | } | ||
124 | virtio_scsi_release(s); | ||
125 | - return progress; | ||
126 | } | ||
127 | |||
128 | static int virtio_scsi_set_host_notifier(VirtIOSCSI *s, VirtQueue *vq, int n) | ||
129 | diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c | ||
130 | index XXXXXXX..XXXXXXX 100644 | ||
131 | --- a/hw/virtio/virtio.c | ||
132 | +++ b/hw/virtio/virtio.c | ||
133 | @@ -XXX,XX +XXX,XX @@ struct VirtQueue | ||
134 | |||
135 | uint16_t vector; | ||
136 | VirtIOHandleOutput handle_output; | ||
137 | - VirtIOHandleAIOOutput handle_aio_output; | ||
138 | + VirtIOHandleOutput handle_aio_output; | ||
139 | VirtIODevice *vdev; | ||
140 | EventNotifier guest_notifier; | ||
141 | EventNotifier host_notifier; | ||
142 | @@ -XXX,XX +XXX,XX @@ void virtio_queue_set_align(VirtIODevice *vdev, int n, int align) | ||
143 | } | ||
144 | } | ||
145 | |||
146 | -static bool virtio_queue_notify_aio_vq(VirtQueue *vq) | ||
147 | +static void virtio_queue_notify_aio_vq(VirtQueue *vq) | ||
148 | { | ||
149 | - bool ret = false; | ||
150 | - | ||
151 | if (vq->vring.desc && vq->handle_aio_output) { | ||
152 | VirtIODevice *vdev = vq->vdev; | ||
153 | |||
154 | trace_virtio_queue_notify(vdev, vq - vdev->vq, vq); | ||
155 | - ret = vq->handle_aio_output(vdev, vq); | ||
156 | + vq->handle_aio_output(vdev, vq); | ||
157 | |||
158 | if (unlikely(vdev->start_on_kick)) { | ||
159 | virtio_set_started(vdev, true); | ||
160 | } | 208 | } |
161 | } | 209 | } |
162 | - | 210 | + |
163 | - return ret; | 211 | + raw_refresh_zoned_limits(bs, &st, errp); |
164 | } | 212 | } |
165 | 213 | ||
166 | static void virtio_queue_notify_vq(VirtQueue *vq) | 214 | static int check_for_dasd(int fd) |
167 | @@ -XXX,XX +XXX,XX @@ static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n) | ||
168 | } | ||
169 | |||
170 | void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx, | ||
171 | - VirtIOHandleAIOOutput handle_output) | ||
172 | + VirtIOHandleOutput handle_output) | ||
173 | { | ||
174 | if (handle_output) { | ||
175 | vq->handle_aio_output = handle_output; | ||
176 | -- | 215 | -- |
177 | 2.34.1 | 216 | 2.40.0 |
178 | 217 | ||
179 | 218 | diff view generated by jsdifflib |
1 | Now that virtio-blk and virtio-scsi are ready, get rid of | 1 | From: Sam Li <faithilikerun@gmail.com> |
---|---|---|---|
2 | the handle_aio_output() callback. It's no longer needed. | ||
3 | 2 | ||
3 | Add zoned device option to host_device BlockDriver. It will be presented only | ||
4 | for zoned host block devices. By adding zone management operations to the | ||
5 | host_block_device BlockDriver, users can use the new block layer APIs | ||
6 | including Report Zone and four zone management operations | ||
7 | (open, close, finish, reset, reset_all). | ||
8 | |||
9 | Qemu-io uses the new APIs to perform zoned storage commands of the device: | ||
10 | zone_report(zrp), zone_open(zo), zone_close(zc), zone_reset(zrs), | ||
11 | zone_finish(zf). | ||
12 | |||
13 | For example, to test zone_report, use following command: | ||
14 | $ ./build/qemu-io --image-opts -n driver=host_device, filename=/dev/nullb0 | ||
15 | -c "zrp offset nr_zones" | ||
16 | |||
17 | Signed-off-by: Sam Li <faithilikerun@gmail.com> | ||
18 | Reviewed-by: Hannes Reinecke <hare@suse.de> | ||
19 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
20 | Reviewed-by: Dmitry Fomichev <dmitry.fomichev@wdc.com> | ||
21 | Acked-by: Kevin Wolf <kwolf@redhat.com> | ||
4 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 22 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
5 | Reviewed-by: Stefano Garzarella <sgarzare@redhat.com> | 23 | Message-id: 20230427172019.3345-4-faithilikerun@gmail.com |
6 | Message-id: 20211207132336.36627-7-stefanha@redhat.com | 24 | Message-id: 20230324090605.28361-4-faithilikerun@gmail.com |
25 | [Adjust commit message prefix as suggested by Philippe Mathieu-Daudé | ||
26 | <philmd@linaro.org> and remove spurious ret = -errno in | ||
27 | raw_co_zone_mgmt(). | ||
28 | --Stefan] | ||
7 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 29 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
8 | --- | 30 | --- |
9 | include/hw/virtio/virtio.h | 4 +-- | 31 | meson.build | 4 + |
10 | hw/block/dataplane/virtio-blk.c | 16 ++-------- | 32 | include/block/block-io.h | 9 + |
11 | hw/scsi/virtio-scsi-dataplane.c | 54 ++++----------------------------- | 33 | include/block/block_int-common.h | 21 ++ |
12 | hw/virtio/virtio.c | 32 +++++++++---------- | 34 | include/block/raw-aio.h | 6 +- |
13 | 4 files changed, 26 insertions(+), 80 deletions(-) | 35 | include/sysemu/block-backend-io.h | 18 ++ |
36 | block/block-backend.c | 137 +++++++++++++ | ||
37 | block/file-posix.c | 313 +++++++++++++++++++++++++++++- | ||
38 | block/io.c | 41 ++++ | ||
39 | qemu-io-cmds.c | 149 ++++++++++++++ | ||
40 | 9 files changed, 695 insertions(+), 3 deletions(-) | ||
14 | 41 | ||
15 | diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h | 42 | diff --git a/meson.build b/meson.build |
16 | index XXXXXXX..XXXXXXX 100644 | 43 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/include/hw/virtio/virtio.h | 44 | --- a/meson.build |
18 | +++ b/include/hw/virtio/virtio.h | 45 | +++ b/meson.build |
19 | @@ -XXX,XX +XXX,XX @@ bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev); | 46 | @@ -XXX,XX +XXX,XX @@ config_host_data.set('CONFIG_REPLICATION', get_option('replication').allowed()) |
20 | EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq); | 47 | # has_header |
21 | void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled); | 48 | config_host_data.set('CONFIG_EPOLL', cc.has_header('sys/epoll.h')) |
22 | void virtio_queue_host_notifier_read(EventNotifier *n); | 49 | config_host_data.set('CONFIG_LINUX_MAGIC_H', cc.has_header('linux/magic.h')) |
23 | -void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx, | 50 | +config_host_data.set('CONFIG_BLKZONED', cc.has_header('linux/blkzoned.h')) |
24 | - VirtIOHandleOutput handle_output); | 51 | config_host_data.set('CONFIG_VALGRIND_H', cc.has_header('valgrind/valgrind.h')) |
25 | +void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx); | 52 | config_host_data.set('HAVE_BTRFS_H', cc.has_header('linux/btrfs.h')) |
26 | +void virtio_queue_aio_detach_host_notifier(VirtQueue *vq, AioContext *ctx); | 53 | config_host_data.set('HAVE_DRM_H', cc.has_header('libdrm/drm.h')) |
27 | VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector); | 54 | @@ -XXX,XX +XXX,XX @@ config_host_data.set('HAVE_SIGEV_NOTIFY_THREAD_ID', |
28 | VirtQueue *virtio_vector_next_queue(VirtQueue *vq); | 55 | config_host_data.set('HAVE_STRUCT_STAT_ST_ATIM', |
29 | 56 | cc.has_member('struct stat', 'st_atim', | |
30 | diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c | 57 | prefix: '#include <sys/stat.h>')) |
58 | +config_host_data.set('HAVE_BLK_ZONE_REP_CAPACITY', | ||
59 | + cc.has_member('struct blk_zone', 'capacity', | ||
60 | + prefix: '#include <linux/blkzoned.h>')) | ||
61 | |||
62 | # has_type | ||
63 | config_host_data.set('CONFIG_IOVEC', | ||
64 | diff --git a/include/block/block-io.h b/include/block/block-io.h | ||
31 | index XXXXXXX..XXXXXXX 100644 | 65 | index XXXXXXX..XXXXXXX 100644 |
32 | --- a/hw/block/dataplane/virtio-blk.c | 66 | --- a/include/block/block-io.h |
33 | +++ b/hw/block/dataplane/virtio-blk.c | 67 | +++ b/include/block/block-io.h |
34 | @@ -XXX,XX +XXX,XX @@ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s) | 68 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn GRAPH_RDLOCK bdrv_co_flush(BlockDriverState *bs); |
35 | g_free(s); | 69 | int coroutine_fn GRAPH_RDLOCK bdrv_co_pdiscard(BdrvChild *child, int64_t offset, |
70 | int64_t bytes); | ||
71 | |||
72 | +/* Report zone information of zone block device. */ | ||
73 | +int coroutine_fn GRAPH_RDLOCK bdrv_co_zone_report(BlockDriverState *bs, | ||
74 | + int64_t offset, | ||
75 | + unsigned int *nr_zones, | ||
76 | + BlockZoneDescriptor *zones); | ||
77 | +int coroutine_fn GRAPH_RDLOCK bdrv_co_zone_mgmt(BlockDriverState *bs, | ||
78 | + BlockZoneOp op, | ||
79 | + int64_t offset, int64_t len); | ||
80 | + | ||
81 | bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs); | ||
82 | int bdrv_block_status(BlockDriverState *bs, int64_t offset, | ||
83 | int64_t bytes, int64_t *pnum, int64_t *map, | ||
84 | diff --git a/include/block/block_int-common.h b/include/block/block_int-common.h | ||
85 | index XXXXXXX..XXXXXXX 100644 | ||
86 | --- a/include/block/block_int-common.h | ||
87 | +++ b/include/block/block_int-common.h | ||
88 | @@ -XXX,XX +XXX,XX @@ struct BlockDriver { | ||
89 | int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_load_vmstate)( | ||
90 | BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos); | ||
91 | |||
92 | + int coroutine_fn (*bdrv_co_zone_report)(BlockDriverState *bs, | ||
93 | + int64_t offset, unsigned int *nr_zones, | ||
94 | + BlockZoneDescriptor *zones); | ||
95 | + int coroutine_fn (*bdrv_co_zone_mgmt)(BlockDriverState *bs, BlockZoneOp op, | ||
96 | + int64_t offset, int64_t len); | ||
97 | + | ||
98 | /* removable device specific */ | ||
99 | bool coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_is_inserted)( | ||
100 | BlockDriverState *bs); | ||
101 | @@ -XXX,XX +XXX,XX @@ typedef struct BlockLimits { | ||
102 | |||
103 | /* device zone model */ | ||
104 | BlockZoneModel zoned; | ||
105 | + | ||
106 | + /* zone size expressed in bytes */ | ||
107 | + uint32_t zone_size; | ||
108 | + | ||
109 | + /* total number of zones */ | ||
110 | + uint32_t nr_zones; | ||
111 | + | ||
112 | + /* maximum sectors of a zone append write operation */ | ||
113 | + int64_t max_append_sectors; | ||
114 | + | ||
115 | + /* maximum number of open zones */ | ||
116 | + int64_t max_open_zones; | ||
117 | + | ||
118 | + /* maximum number of active zones */ | ||
119 | + int64_t max_active_zones; | ||
120 | } BlockLimits; | ||
121 | |||
122 | typedef struct BdrvOpBlocker BdrvOpBlocker; | ||
123 | diff --git a/include/block/raw-aio.h b/include/block/raw-aio.h | ||
124 | index XXXXXXX..XXXXXXX 100644 | ||
125 | --- a/include/block/raw-aio.h | ||
126 | +++ b/include/block/raw-aio.h | ||
127 | @@ -XXX,XX +XXX,XX @@ | ||
128 | #define QEMU_AIO_WRITE_ZEROES 0x0020 | ||
129 | #define QEMU_AIO_COPY_RANGE 0x0040 | ||
130 | #define QEMU_AIO_TRUNCATE 0x0080 | ||
131 | +#define QEMU_AIO_ZONE_REPORT 0x0100 | ||
132 | +#define QEMU_AIO_ZONE_MGMT 0x0200 | ||
133 | #define QEMU_AIO_TYPE_MASK \ | ||
134 | (QEMU_AIO_READ | \ | ||
135 | QEMU_AIO_WRITE | \ | ||
136 | @@ -XXX,XX +XXX,XX @@ | ||
137 | QEMU_AIO_DISCARD | \ | ||
138 | QEMU_AIO_WRITE_ZEROES | \ | ||
139 | QEMU_AIO_COPY_RANGE | \ | ||
140 | - QEMU_AIO_TRUNCATE) | ||
141 | + QEMU_AIO_TRUNCATE | \ | ||
142 | + QEMU_AIO_ZONE_REPORT | \ | ||
143 | + QEMU_AIO_ZONE_MGMT) | ||
144 | |||
145 | /* AIO flags */ | ||
146 | #define QEMU_AIO_MISALIGNED 0x1000 | ||
147 | diff --git a/include/sysemu/block-backend-io.h b/include/sysemu/block-backend-io.h | ||
148 | index XXXXXXX..XXXXXXX 100644 | ||
149 | --- a/include/sysemu/block-backend-io.h | ||
150 | +++ b/include/sysemu/block-backend-io.h | ||
151 | @@ -XXX,XX +XXX,XX @@ BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset, | ||
152 | BlockCompletionFunc *cb, void *opaque); | ||
153 | BlockAIOCB *blk_aio_flush(BlockBackend *blk, | ||
154 | BlockCompletionFunc *cb, void *opaque); | ||
155 | +BlockAIOCB *blk_aio_zone_report(BlockBackend *blk, int64_t offset, | ||
156 | + unsigned int *nr_zones, | ||
157 | + BlockZoneDescriptor *zones, | ||
158 | + BlockCompletionFunc *cb, void *opaque); | ||
159 | +BlockAIOCB *blk_aio_zone_mgmt(BlockBackend *blk, BlockZoneOp op, | ||
160 | + int64_t offset, int64_t len, | ||
161 | + BlockCompletionFunc *cb, void *opaque); | ||
162 | BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes, | ||
163 | BlockCompletionFunc *cb, void *opaque); | ||
164 | void blk_aio_cancel_async(BlockAIOCB *acb); | ||
165 | @@ -XXX,XX +XXX,XX @@ int co_wrapper_mixed blk_pwrite_zeroes(BlockBackend *blk, int64_t offset, | ||
166 | int coroutine_fn blk_co_pwrite_zeroes(BlockBackend *blk, int64_t offset, | ||
167 | int64_t bytes, BdrvRequestFlags flags); | ||
168 | |||
169 | +int coroutine_fn blk_co_zone_report(BlockBackend *blk, int64_t offset, | ||
170 | + unsigned int *nr_zones, | ||
171 | + BlockZoneDescriptor *zones); | ||
172 | +int co_wrapper_mixed blk_zone_report(BlockBackend *blk, int64_t offset, | ||
173 | + unsigned int *nr_zones, | ||
174 | + BlockZoneDescriptor *zones); | ||
175 | +int coroutine_fn blk_co_zone_mgmt(BlockBackend *blk, BlockZoneOp op, | ||
176 | + int64_t offset, int64_t len); | ||
177 | +int co_wrapper_mixed blk_zone_mgmt(BlockBackend *blk, BlockZoneOp op, | ||
178 | + int64_t offset, int64_t len); | ||
179 | + | ||
180 | int co_wrapper_mixed blk_pdiscard(BlockBackend *blk, int64_t offset, | ||
181 | int64_t bytes); | ||
182 | int coroutine_fn blk_co_pdiscard(BlockBackend *blk, int64_t offset, | ||
183 | diff --git a/block/block-backend.c b/block/block-backend.c | ||
184 | index XXXXXXX..XXXXXXX 100644 | ||
185 | --- a/block/block-backend.c | ||
186 | +++ b/block/block-backend.c | ||
187 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn blk_co_flush(BlockBackend *blk) | ||
188 | return ret; | ||
36 | } | 189 | } |
37 | 190 | ||
38 | -static void virtio_blk_data_plane_handle_output(VirtIODevice *vdev, | 191 | +static void coroutine_fn blk_aio_zone_report_entry(void *opaque) |
39 | - VirtQueue *vq) | 192 | +{ |
40 | -{ | 193 | + BlkAioEmAIOCB *acb = opaque; |
41 | - VirtIOBlock *s = (VirtIOBlock *)vdev; | 194 | + BlkRwCo *rwco = &acb->rwco; |
42 | - | 195 | + |
43 | - assert(s->dataplane); | 196 | + rwco->ret = blk_co_zone_report(rwco->blk, rwco->offset, |
44 | - assert(s->dataplane_started); | 197 | + (unsigned int*)(uintptr_t)acb->bytes, |
45 | - | 198 | + rwco->iobuf); |
46 | - virtio_blk_handle_vq(s, vq); | 199 | + blk_aio_complete(acb); |
47 | -} | 200 | +} |
48 | - | 201 | + |
49 | /* Context: QEMU global mutex held */ | 202 | +BlockAIOCB *blk_aio_zone_report(BlockBackend *blk, int64_t offset, |
50 | int virtio_blk_data_plane_start(VirtIODevice *vdev) | 203 | + unsigned int *nr_zones, |
204 | + BlockZoneDescriptor *zones, | ||
205 | + BlockCompletionFunc *cb, void *opaque) | ||
206 | +{ | ||
207 | + BlkAioEmAIOCB *acb; | ||
208 | + Coroutine *co; | ||
209 | + IO_CODE(); | ||
210 | + | ||
211 | + blk_inc_in_flight(blk); | ||
212 | + acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque); | ||
213 | + acb->rwco = (BlkRwCo) { | ||
214 | + .blk = blk, | ||
215 | + .offset = offset, | ||
216 | + .iobuf = zones, | ||
217 | + .ret = NOT_DONE, | ||
218 | + }; | ||
219 | + acb->bytes = (int64_t)(uintptr_t)nr_zones, | ||
220 | + acb->has_returned = false; | ||
221 | + | ||
222 | + co = qemu_coroutine_create(blk_aio_zone_report_entry, acb); | ||
223 | + aio_co_enter(blk_get_aio_context(blk), co); | ||
224 | + | ||
225 | + acb->has_returned = true; | ||
226 | + if (acb->rwco.ret != NOT_DONE) { | ||
227 | + replay_bh_schedule_oneshot_event(blk_get_aio_context(blk), | ||
228 | + blk_aio_complete_bh, acb); | ||
229 | + } | ||
230 | + | ||
231 | + return &acb->common; | ||
232 | +} | ||
233 | + | ||
234 | +static void coroutine_fn blk_aio_zone_mgmt_entry(void *opaque) | ||
235 | +{ | ||
236 | + BlkAioEmAIOCB *acb = opaque; | ||
237 | + BlkRwCo *rwco = &acb->rwco; | ||
238 | + | ||
239 | + rwco->ret = blk_co_zone_mgmt(rwco->blk, | ||
240 | + (BlockZoneOp)(uintptr_t)rwco->iobuf, | ||
241 | + rwco->offset, acb->bytes); | ||
242 | + blk_aio_complete(acb); | ||
243 | +} | ||
244 | + | ||
245 | +BlockAIOCB *blk_aio_zone_mgmt(BlockBackend *blk, BlockZoneOp op, | ||
246 | + int64_t offset, int64_t len, | ||
247 | + BlockCompletionFunc *cb, void *opaque) { | ||
248 | + BlkAioEmAIOCB *acb; | ||
249 | + Coroutine *co; | ||
250 | + IO_CODE(); | ||
251 | + | ||
252 | + blk_inc_in_flight(blk); | ||
253 | + acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque); | ||
254 | + acb->rwco = (BlkRwCo) { | ||
255 | + .blk = blk, | ||
256 | + .offset = offset, | ||
257 | + .iobuf = (void *)(uintptr_t)op, | ||
258 | + .ret = NOT_DONE, | ||
259 | + }; | ||
260 | + acb->bytes = len; | ||
261 | + acb->has_returned = false; | ||
262 | + | ||
263 | + co = qemu_coroutine_create(blk_aio_zone_mgmt_entry, acb); | ||
264 | + aio_co_enter(blk_get_aio_context(blk), co); | ||
265 | + | ||
266 | + acb->has_returned = true; | ||
267 | + if (acb->rwco.ret != NOT_DONE) { | ||
268 | + replay_bh_schedule_oneshot_event(blk_get_aio_context(blk), | ||
269 | + blk_aio_complete_bh, acb); | ||
270 | + } | ||
271 | + | ||
272 | + return &acb->common; | ||
273 | +} | ||
274 | + | ||
275 | +/* | ||
276 | + * Send a zone_report command. | ||
277 | + * offset is a byte offset from the start of the device. No alignment | ||
278 | + * required for offset. | ||
279 | + * nr_zones represents IN maximum and OUT actual. | ||
280 | + */ | ||
281 | +int coroutine_fn blk_co_zone_report(BlockBackend *blk, int64_t offset, | ||
282 | + unsigned int *nr_zones, | ||
283 | + BlockZoneDescriptor *zones) | ||
284 | +{ | ||
285 | + int ret; | ||
286 | + IO_CODE(); | ||
287 | + | ||
288 | + blk_inc_in_flight(blk); /* increase before waiting */ | ||
289 | + blk_wait_while_drained(blk); | ||
290 | + GRAPH_RDLOCK_GUARD(); | ||
291 | + if (!blk_is_available(blk)) { | ||
292 | + blk_dec_in_flight(blk); | ||
293 | + return -ENOMEDIUM; | ||
294 | + } | ||
295 | + ret = bdrv_co_zone_report(blk_bs(blk), offset, nr_zones, zones); | ||
296 | + blk_dec_in_flight(blk); | ||
297 | + return ret; | ||
298 | +} | ||
299 | + | ||
300 | +/* | ||
301 | + * Send a zone_management command. | ||
302 | + * op is the zone operation; | ||
303 | + * offset is the byte offset from the start of the zoned device; | ||
304 | + * len is the maximum number of bytes the command should operate on. It | ||
305 | + * should be aligned with the device zone size. | ||
306 | + */ | ||
307 | +int coroutine_fn blk_co_zone_mgmt(BlockBackend *blk, BlockZoneOp op, | ||
308 | + int64_t offset, int64_t len) | ||
309 | +{ | ||
310 | + int ret; | ||
311 | + IO_CODE(); | ||
312 | + | ||
313 | + blk_inc_in_flight(blk); | ||
314 | + blk_wait_while_drained(blk); | ||
315 | + GRAPH_RDLOCK_GUARD(); | ||
316 | + | ||
317 | + ret = blk_check_byte_request(blk, offset, len); | ||
318 | + if (ret < 0) { | ||
319 | + blk_dec_in_flight(blk); | ||
320 | + return ret; | ||
321 | + } | ||
322 | + | ||
323 | + ret = bdrv_co_zone_mgmt(blk_bs(blk), op, offset, len); | ||
324 | + blk_dec_in_flight(blk); | ||
325 | + return ret; | ||
326 | +} | ||
327 | + | ||
328 | void blk_drain(BlockBackend *blk) | ||
51 | { | 329 | { |
52 | @@ -XXX,XX +XXX,XX @@ int virtio_blk_data_plane_start(VirtIODevice *vdev) | 330 | BlockDriverState *bs = blk_bs(blk); |
53 | for (i = 0; i < nvqs; i++) { | 331 | diff --git a/block/file-posix.c b/block/file-posix.c |
54 | VirtQueue *vq = virtio_get_queue(s->vdev, i); | 332 | index XXXXXXX..XXXXXXX 100644 |
55 | 333 | --- a/block/file-posix.c | |
56 | - virtio_queue_aio_set_host_notifier_handler(vq, s->ctx, | 334 | +++ b/block/file-posix.c |
57 | - virtio_blk_data_plane_handle_output); | 335 | @@ -XXX,XX +XXX,XX @@ |
58 | + virtio_queue_aio_attach_host_notifier(vq, s->ctx); | 336 | #include <sys/param.h> |
337 | #include <sys/syscall.h> | ||
338 | #include <sys/vfs.h> | ||
339 | +#if defined(CONFIG_BLKZONED) | ||
340 | +#include <linux/blkzoned.h> | ||
341 | +#endif | ||
342 | #include <linux/cdrom.h> | ||
343 | #include <linux/fd.h> | ||
344 | #include <linux/fs.h> | ||
345 | @@ -XXX,XX +XXX,XX @@ typedef struct RawPosixAIOData { | ||
346 | PreallocMode prealloc; | ||
347 | Error **errp; | ||
348 | } truncate; | ||
349 | + struct { | ||
350 | + unsigned int *nr_zones; | ||
351 | + BlockZoneDescriptor *zones; | ||
352 | + } zone_report; | ||
353 | + struct { | ||
354 | + unsigned long op; | ||
355 | + } zone_mgmt; | ||
356 | }; | ||
357 | } RawPosixAIOData; | ||
358 | |||
359 | @@ -XXX,XX +XXX,XX @@ static int get_sysfs_str_val(struct stat *st, const char *attribute, | ||
360 | #endif | ||
361 | } | ||
362 | |||
363 | +#if defined(CONFIG_BLKZONED) | ||
364 | static int get_sysfs_zoned_model(struct stat *st, BlockZoneModel *zoned) | ||
365 | { | ||
366 | g_autofree char *val = NULL; | ||
367 | @@ -XXX,XX +XXX,XX @@ static int get_sysfs_zoned_model(struct stat *st, BlockZoneModel *zoned) | ||
59 | } | 368 | } |
60 | aio_context_release(s->ctx); | ||
61 | return 0; | 369 | return 0; |
62 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_data_plane_stop_bh(void *opaque) | 370 | } |
63 | for (i = 0; i < s->conf->num_queues; i++) { | 371 | +#endif /* defined(CONFIG_BLKZONED) */ |
64 | VirtQueue *vq = virtio_get_queue(s->vdev, i); | 372 | |
65 | 373 | /* | |
66 | - virtio_queue_aio_set_host_notifier_handler(vq, s->ctx, NULL); | 374 | * Get a sysfs attribute value as a long integer. |
67 | + virtio_queue_aio_detach_host_notifier(vq, s->ctx); | 375 | @@ -XXX,XX +XXX,XX @@ static int hdev_get_max_segments(int fd, struct stat *st) |
376 | #endif | ||
377 | } | ||
378 | |||
379 | +#if defined(CONFIG_BLKZONED) | ||
380 | static void raw_refresh_zoned_limits(BlockDriverState *bs, struct stat *st, | ||
381 | Error **errp) | ||
382 | { | ||
383 | @@ -XXX,XX +XXX,XX @@ static void raw_refresh_zoned_limits(BlockDriverState *bs, struct stat *st, | ||
384 | return; | ||
385 | } | ||
386 | bs->bl.zoned = zoned; | ||
387 | + | ||
388 | + ret = get_sysfs_long_val(st, "max_open_zones"); | ||
389 | + if (ret >= 0) { | ||
390 | + bs->bl.max_open_zones = ret; | ||
391 | + } | ||
392 | + | ||
393 | + ret = get_sysfs_long_val(st, "max_active_zones"); | ||
394 | + if (ret >= 0) { | ||
395 | + bs->bl.max_active_zones = ret; | ||
396 | + } | ||
397 | + | ||
398 | + /* | ||
399 | + * The zoned device must at least have zone size and nr_zones fields. | ||
400 | + */ | ||
401 | + ret = get_sysfs_long_val(st, "chunk_sectors"); | ||
402 | + if (ret < 0) { | ||
403 | + error_setg_errno(errp, -ret, "Unable to read chunk_sectors " | ||
404 | + "sysfs attribute"); | ||
405 | + return; | ||
406 | + } else if (!ret) { | ||
407 | + error_setg(errp, "Read 0 from chunk_sectors sysfs attribute"); | ||
408 | + return; | ||
409 | + } | ||
410 | + bs->bl.zone_size = ret << BDRV_SECTOR_BITS; | ||
411 | + | ||
412 | + ret = get_sysfs_long_val(st, "nr_zones"); | ||
413 | + if (ret < 0) { | ||
414 | + error_setg_errno(errp, -ret, "Unable to read nr_zones " | ||
415 | + "sysfs attribute"); | ||
416 | + return; | ||
417 | + } else if (!ret) { | ||
418 | + error_setg(errp, "Read 0 from nr_zones sysfs attribute"); | ||
419 | + return; | ||
420 | + } | ||
421 | + bs->bl.nr_zones = ret; | ||
422 | + | ||
423 | + ret = get_sysfs_long_val(st, "zone_append_max_bytes"); | ||
424 | + if (ret > 0) { | ||
425 | + bs->bl.max_append_sectors = ret >> BDRV_SECTOR_BITS; | ||
426 | + } | ||
427 | } | ||
428 | +#else /* !defined(CONFIG_BLKZONED) */ | ||
429 | +static void raw_refresh_zoned_limits(BlockDriverState *bs, struct stat *st, | ||
430 | + Error **errp) | ||
431 | +{ | ||
432 | + bs->bl.zoned = BLK_Z_NONE; | ||
433 | +} | ||
434 | +#endif /* !defined(CONFIG_BLKZONED) */ | ||
435 | |||
436 | static void raw_refresh_limits(BlockDriverState *bs, Error **errp) | ||
437 | { | ||
438 | @@ -XXX,XX +XXX,XX @@ static int hdev_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz) | ||
439 | BDRVRawState *s = bs->opaque; | ||
440 | int ret; | ||
441 | |||
442 | - /* If DASD, get blocksizes */ | ||
443 | + /* If DASD or zoned devices, get blocksizes */ | ||
444 | if (check_for_dasd(s->fd) < 0) { | ||
445 | - return -ENOTSUP; | ||
446 | + /* zoned devices are not DASD */ | ||
447 | + if (bs->bl.zoned == BLK_Z_NONE) { | ||
448 | + return -ENOTSUP; | ||
449 | + } | ||
450 | } | ||
451 | ret = probe_logical_blocksize(s->fd, &bsz->log); | ||
452 | if (ret < 0) { | ||
453 | @@ -XXX,XX +XXX,XX @@ static off_t copy_file_range(int in_fd, off_t *in_off, int out_fd, | ||
454 | } | ||
455 | #endif | ||
456 | |||
457 | +/* | ||
458 | + * parse_zone - Fill a zone descriptor | ||
459 | + */ | ||
460 | +#if defined(CONFIG_BLKZONED) | ||
461 | +static inline int parse_zone(struct BlockZoneDescriptor *zone, | ||
462 | + const struct blk_zone *blkz) { | ||
463 | + zone->start = blkz->start << BDRV_SECTOR_BITS; | ||
464 | + zone->length = blkz->len << BDRV_SECTOR_BITS; | ||
465 | + zone->wp = blkz->wp << BDRV_SECTOR_BITS; | ||
466 | + | ||
467 | +#ifdef HAVE_BLK_ZONE_REP_CAPACITY | ||
468 | + zone->cap = blkz->capacity << BDRV_SECTOR_BITS; | ||
469 | +#else | ||
470 | + zone->cap = blkz->len << BDRV_SECTOR_BITS; | ||
471 | +#endif | ||
472 | + | ||
473 | + switch (blkz->type) { | ||
474 | + case BLK_ZONE_TYPE_SEQWRITE_REQ: | ||
475 | + zone->type = BLK_ZT_SWR; | ||
476 | + break; | ||
477 | + case BLK_ZONE_TYPE_SEQWRITE_PREF: | ||
478 | + zone->type = BLK_ZT_SWP; | ||
479 | + break; | ||
480 | + case BLK_ZONE_TYPE_CONVENTIONAL: | ||
481 | + zone->type = BLK_ZT_CONV; | ||
482 | + break; | ||
483 | + default: | ||
484 | + error_report("Unsupported zone type: 0x%x", blkz->type); | ||
485 | + return -ENOTSUP; | ||
486 | + } | ||
487 | + | ||
488 | + switch (blkz->cond) { | ||
489 | + case BLK_ZONE_COND_NOT_WP: | ||
490 | + zone->state = BLK_ZS_NOT_WP; | ||
491 | + break; | ||
492 | + case BLK_ZONE_COND_EMPTY: | ||
493 | + zone->state = BLK_ZS_EMPTY; | ||
494 | + break; | ||
495 | + case BLK_ZONE_COND_IMP_OPEN: | ||
496 | + zone->state = BLK_ZS_IOPEN; | ||
497 | + break; | ||
498 | + case BLK_ZONE_COND_EXP_OPEN: | ||
499 | + zone->state = BLK_ZS_EOPEN; | ||
500 | + break; | ||
501 | + case BLK_ZONE_COND_CLOSED: | ||
502 | + zone->state = BLK_ZS_CLOSED; | ||
503 | + break; | ||
504 | + case BLK_ZONE_COND_READONLY: | ||
505 | + zone->state = BLK_ZS_RDONLY; | ||
506 | + break; | ||
507 | + case BLK_ZONE_COND_FULL: | ||
508 | + zone->state = BLK_ZS_FULL; | ||
509 | + break; | ||
510 | + case BLK_ZONE_COND_OFFLINE: | ||
511 | + zone->state = BLK_ZS_OFFLINE; | ||
512 | + break; | ||
513 | + default: | ||
514 | + error_report("Unsupported zone state: 0x%x", blkz->cond); | ||
515 | + return -ENOTSUP; | ||
516 | + } | ||
517 | + return 0; | ||
518 | +} | ||
519 | +#endif | ||
520 | + | ||
521 | +#if defined(CONFIG_BLKZONED) | ||
522 | +static int handle_aiocb_zone_report(void *opaque) | ||
523 | +{ | ||
524 | + RawPosixAIOData *aiocb = opaque; | ||
525 | + int fd = aiocb->aio_fildes; | ||
526 | + unsigned int *nr_zones = aiocb->zone_report.nr_zones; | ||
527 | + BlockZoneDescriptor *zones = aiocb->zone_report.zones; | ||
528 | + /* zoned block devices use 512-byte sectors */ | ||
529 | + uint64_t sector = aiocb->aio_offset / 512; | ||
530 | + | ||
531 | + struct blk_zone *blkz; | ||
532 | + size_t rep_size; | ||
533 | + unsigned int nrz; | ||
534 | + int ret; | ||
535 | + unsigned int n = 0, i = 0; | ||
536 | + | ||
537 | + nrz = *nr_zones; | ||
538 | + rep_size = sizeof(struct blk_zone_report) + nrz * sizeof(struct blk_zone); | ||
539 | + g_autofree struct blk_zone_report *rep = NULL; | ||
540 | + rep = g_malloc(rep_size); | ||
541 | + | ||
542 | + blkz = (struct blk_zone *)(rep + 1); | ||
543 | + while (n < nrz) { | ||
544 | + memset(rep, 0, rep_size); | ||
545 | + rep->sector = sector; | ||
546 | + rep->nr_zones = nrz - n; | ||
547 | + | ||
548 | + do { | ||
549 | + ret = ioctl(fd, BLKREPORTZONE, rep); | ||
550 | + } while (ret != 0 && errno == EINTR); | ||
551 | + if (ret != 0) { | ||
552 | + error_report("%d: ioctl BLKREPORTZONE at %" PRId64 " failed %d", | ||
553 | + fd, sector, errno); | ||
554 | + return -errno; | ||
555 | + } | ||
556 | + | ||
557 | + if (!rep->nr_zones) { | ||
558 | + break; | ||
559 | + } | ||
560 | + | ||
561 | + for (i = 0; i < rep->nr_zones; i++, n++) { | ||
562 | + ret = parse_zone(&zones[n], &blkz[i]); | ||
563 | + if (ret != 0) { | ||
564 | + return ret; | ||
565 | + } | ||
566 | + | ||
567 | + /* The next report should start after the last zone reported */ | ||
568 | + sector = blkz[i].start + blkz[i].len; | ||
569 | + } | ||
570 | + } | ||
571 | + | ||
572 | + *nr_zones = n; | ||
573 | + return 0; | ||
574 | +} | ||
575 | +#endif | ||
576 | + | ||
577 | +#if defined(CONFIG_BLKZONED) | ||
578 | +static int handle_aiocb_zone_mgmt(void *opaque) | ||
579 | +{ | ||
580 | + RawPosixAIOData *aiocb = opaque; | ||
581 | + int fd = aiocb->aio_fildes; | ||
582 | + uint64_t sector = aiocb->aio_offset / 512; | ||
583 | + int64_t nr_sectors = aiocb->aio_nbytes / 512; | ||
584 | + struct blk_zone_range range; | ||
585 | + int ret; | ||
586 | + | ||
587 | + /* Execute the operation */ | ||
588 | + range.sector = sector; | ||
589 | + range.nr_sectors = nr_sectors; | ||
590 | + do { | ||
591 | + ret = ioctl(fd, aiocb->zone_mgmt.op, &range); | ||
592 | + } while (ret != 0 && errno == EINTR); | ||
593 | + | ||
594 | + return ret; | ||
595 | +} | ||
596 | +#endif | ||
597 | + | ||
598 | static int handle_aiocb_copy_range(void *opaque) | ||
599 | { | ||
600 | RawPosixAIOData *aiocb = opaque; | ||
601 | @@ -XXX,XX +XXX,XX @@ static void raw_account_discard(BDRVRawState *s, uint64_t nbytes, int ret) | ||
68 | } | 602 | } |
69 | } | 603 | } |
70 | 604 | ||
71 | diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c | 605 | +/* |
606 | + * zone report - Get a zone block device's information in the form | ||
607 | + * of an array of zone descriptors. | ||
608 | + * zones is an array of zone descriptors to hold zone information on reply; | ||
609 | + * offset can be any byte within the entire size of the device; | ||
610 | + * nr_zones is the maxium number of sectors the command should operate on. | ||
611 | + */ | ||
612 | +#if defined(CONFIG_BLKZONED) | ||
613 | +static int coroutine_fn raw_co_zone_report(BlockDriverState *bs, int64_t offset, | ||
614 | + unsigned int *nr_zones, | ||
615 | + BlockZoneDescriptor *zones) { | ||
616 | + BDRVRawState *s = bs->opaque; | ||
617 | + RawPosixAIOData acb = (RawPosixAIOData) { | ||
618 | + .bs = bs, | ||
619 | + .aio_fildes = s->fd, | ||
620 | + .aio_type = QEMU_AIO_ZONE_REPORT, | ||
621 | + .aio_offset = offset, | ||
622 | + .zone_report = { | ||
623 | + .nr_zones = nr_zones, | ||
624 | + .zones = zones, | ||
625 | + }, | ||
626 | + }; | ||
627 | + | ||
628 | + return raw_thread_pool_submit(handle_aiocb_zone_report, &acb); | ||
629 | +} | ||
630 | +#endif | ||
631 | + | ||
632 | +/* | ||
633 | + * zone management operations - Execute an operation on a zone | ||
634 | + */ | ||
635 | +#if defined(CONFIG_BLKZONED) | ||
636 | +static int coroutine_fn raw_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op, | ||
637 | + int64_t offset, int64_t len) { | ||
638 | + BDRVRawState *s = bs->opaque; | ||
639 | + RawPosixAIOData acb; | ||
640 | + int64_t zone_size, zone_size_mask; | ||
641 | + const char *op_name; | ||
642 | + unsigned long zo; | ||
643 | + int ret; | ||
644 | + int64_t capacity = bs->total_sectors << BDRV_SECTOR_BITS; | ||
645 | + | ||
646 | + zone_size = bs->bl.zone_size; | ||
647 | + zone_size_mask = zone_size - 1; | ||
648 | + if (offset & zone_size_mask) { | ||
649 | + error_report("sector offset %" PRId64 " is not aligned to zone size " | ||
650 | + "%" PRId64 "", offset / 512, zone_size / 512); | ||
651 | + return -EINVAL; | ||
652 | + } | ||
653 | + | ||
654 | + if (((offset + len) < capacity && len & zone_size_mask) || | ||
655 | + offset + len > capacity) { | ||
656 | + error_report("number of sectors %" PRId64 " is not aligned to zone size" | ||
657 | + " %" PRId64 "", len / 512, zone_size / 512); | ||
658 | + return -EINVAL; | ||
659 | + } | ||
660 | + | ||
661 | + switch (op) { | ||
662 | + case BLK_ZO_OPEN: | ||
663 | + op_name = "BLKOPENZONE"; | ||
664 | + zo = BLKOPENZONE; | ||
665 | + break; | ||
666 | + case BLK_ZO_CLOSE: | ||
667 | + op_name = "BLKCLOSEZONE"; | ||
668 | + zo = BLKCLOSEZONE; | ||
669 | + break; | ||
670 | + case BLK_ZO_FINISH: | ||
671 | + op_name = "BLKFINISHZONE"; | ||
672 | + zo = BLKFINISHZONE; | ||
673 | + break; | ||
674 | + case BLK_ZO_RESET: | ||
675 | + op_name = "BLKRESETZONE"; | ||
676 | + zo = BLKRESETZONE; | ||
677 | + break; | ||
678 | + default: | ||
679 | + error_report("Unsupported zone op: 0x%x", op); | ||
680 | + return -ENOTSUP; | ||
681 | + } | ||
682 | + | ||
683 | + acb = (RawPosixAIOData) { | ||
684 | + .bs = bs, | ||
685 | + .aio_fildes = s->fd, | ||
686 | + .aio_type = QEMU_AIO_ZONE_MGMT, | ||
687 | + .aio_offset = offset, | ||
688 | + .aio_nbytes = len, | ||
689 | + .zone_mgmt = { | ||
690 | + .op = zo, | ||
691 | + }, | ||
692 | + }; | ||
693 | + | ||
694 | + ret = raw_thread_pool_submit(handle_aiocb_zone_mgmt, &acb); | ||
695 | + if (ret != 0) { | ||
696 | + error_report("ioctl %s failed %d", op_name, ret); | ||
697 | + } | ||
698 | + | ||
699 | + return ret; | ||
700 | +} | ||
701 | +#endif | ||
702 | + | ||
703 | static coroutine_fn int | ||
704 | raw_do_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes, | ||
705 | bool blkdev) | ||
706 | @@ -XXX,XX +XXX,XX @@ static BlockDriver bdrv_host_device = { | ||
707 | #ifdef __linux__ | ||
708 | .bdrv_co_ioctl = hdev_co_ioctl, | ||
709 | #endif | ||
710 | + | ||
711 | + /* zoned device */ | ||
712 | +#if defined(CONFIG_BLKZONED) | ||
713 | + /* zone management operations */ | ||
714 | + .bdrv_co_zone_report = raw_co_zone_report, | ||
715 | + .bdrv_co_zone_mgmt = raw_co_zone_mgmt, | ||
716 | +#endif | ||
717 | }; | ||
718 | |||
719 | #if defined(__linux__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) | ||
720 | diff --git a/block/io.c b/block/io.c | ||
72 | index XXXXXXX..XXXXXXX 100644 | 721 | index XXXXXXX..XXXXXXX 100644 |
73 | --- a/hw/scsi/virtio-scsi-dataplane.c | 722 | --- a/block/io.c |
74 | +++ b/hw/scsi/virtio-scsi-dataplane.c | 723 | +++ b/block/io.c |
75 | @@ -XXX,XX +XXX,XX @@ void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp) | 724 | @@ -XXX,XX +XXX,XX @@ out: |
76 | } | 725 | return co.ret; |
77 | } | 726 | } |
78 | 727 | ||
79 | -static void virtio_scsi_data_plane_handle_cmd(VirtIODevice *vdev, | 728 | +int coroutine_fn bdrv_co_zone_report(BlockDriverState *bs, int64_t offset, |
80 | - VirtQueue *vq) | 729 | + unsigned int *nr_zones, |
81 | -{ | 730 | + BlockZoneDescriptor *zones) |
82 | - VirtIOSCSI *s = VIRTIO_SCSI(vdev); | 731 | +{ |
83 | - | 732 | + BlockDriver *drv = bs->drv; |
84 | - virtio_scsi_acquire(s); | 733 | + CoroutineIOCompletion co = { |
85 | - if (!s->dataplane_fenced) { | 734 | + .coroutine = qemu_coroutine_self(), |
86 | - assert(s->ctx && s->dataplane_started); | 735 | + }; |
87 | - virtio_scsi_handle_cmd_vq(s, vq); | 736 | + IO_CODE(); |
88 | - } | 737 | + |
89 | - virtio_scsi_release(s); | 738 | + bdrv_inc_in_flight(bs); |
90 | -} | 739 | + if (!drv || !drv->bdrv_co_zone_report || bs->bl.zoned == BLK_Z_NONE) { |
91 | - | 740 | + co.ret = -ENOTSUP; |
92 | -static void virtio_scsi_data_plane_handle_ctrl(VirtIODevice *vdev, | 741 | + goto out; |
93 | - VirtQueue *vq) | 742 | + } |
94 | -{ | 743 | + co.ret = drv->bdrv_co_zone_report(bs, offset, nr_zones, zones); |
95 | - VirtIOSCSI *s = VIRTIO_SCSI(vdev); | 744 | +out: |
96 | - | 745 | + bdrv_dec_in_flight(bs); |
97 | - virtio_scsi_acquire(s); | 746 | + return co.ret; |
98 | - if (!s->dataplane_fenced) { | 747 | +} |
99 | - assert(s->ctx && s->dataplane_started); | 748 | + |
100 | - virtio_scsi_handle_ctrl_vq(s, vq); | 749 | +int coroutine_fn bdrv_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op, |
101 | - } | 750 | + int64_t offset, int64_t len) |
102 | - virtio_scsi_release(s); | 751 | +{ |
103 | -} | 752 | + BlockDriver *drv = bs->drv; |
104 | - | 753 | + CoroutineIOCompletion co = { |
105 | -static void virtio_scsi_data_plane_handle_event(VirtIODevice *vdev, | 754 | + .coroutine = qemu_coroutine_self(), |
106 | - VirtQueue *vq) | 755 | + }; |
107 | -{ | 756 | + IO_CODE(); |
108 | - VirtIOSCSI *s = VIRTIO_SCSI(vdev); | 757 | + |
109 | - | 758 | + bdrv_inc_in_flight(bs); |
110 | - virtio_scsi_acquire(s); | 759 | + if (!drv || !drv->bdrv_co_zone_mgmt || bs->bl.zoned == BLK_Z_NONE) { |
111 | - if (!s->dataplane_fenced) { | 760 | + co.ret = -ENOTSUP; |
112 | - assert(s->ctx && s->dataplane_started); | 761 | + goto out; |
113 | - virtio_scsi_handle_event_vq(s, vq); | 762 | + } |
114 | - } | 763 | + co.ret = drv->bdrv_co_zone_mgmt(bs, op, offset, len); |
115 | - virtio_scsi_release(s); | 764 | +out: |
116 | -} | 765 | + bdrv_dec_in_flight(bs); |
117 | - | 766 | + return co.ret; |
118 | static int virtio_scsi_set_host_notifier(VirtIOSCSI *s, VirtQueue *vq, int n) | 767 | +} |
768 | + | ||
769 | void *qemu_blockalign(BlockDriverState *bs, size_t size) | ||
119 | { | 770 | { |
120 | BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s))); | 771 | IO_CODE(); |
121 | @@ -XXX,XX +XXX,XX @@ static void virtio_scsi_dataplane_stop_bh(void *opaque) | 772 | diff --git a/qemu-io-cmds.c b/qemu-io-cmds.c |
122 | VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s); | ||
123 | int i; | ||
124 | |||
125 | - virtio_queue_aio_set_host_notifier_handler(vs->ctrl_vq, s->ctx, NULL); | ||
126 | - virtio_queue_aio_set_host_notifier_handler(vs->event_vq, s->ctx, NULL); | ||
127 | + virtio_queue_aio_detach_host_notifier(vs->ctrl_vq, s->ctx); | ||
128 | + virtio_queue_aio_detach_host_notifier(vs->event_vq, s->ctx); | ||
129 | for (i = 0; i < vs->conf.num_queues; i++) { | ||
130 | - virtio_queue_aio_set_host_notifier_handler(vs->cmd_vqs[i], s->ctx, NULL); | ||
131 | + virtio_queue_aio_detach_host_notifier(vs->cmd_vqs[i], s->ctx); | ||
132 | } | ||
133 | } | ||
134 | |||
135 | @@ -XXX,XX +XXX,XX @@ int virtio_scsi_dataplane_start(VirtIODevice *vdev) | ||
136 | memory_region_transaction_commit(); | ||
137 | |||
138 | aio_context_acquire(s->ctx); | ||
139 | - virtio_queue_aio_set_host_notifier_handler(vs->ctrl_vq, s->ctx, | ||
140 | - virtio_scsi_data_plane_handle_ctrl); | ||
141 | - virtio_queue_aio_set_host_notifier_handler(vs->event_vq, s->ctx, | ||
142 | - virtio_scsi_data_plane_handle_event); | ||
143 | + virtio_queue_aio_attach_host_notifier(vs->ctrl_vq, s->ctx); | ||
144 | + virtio_queue_aio_attach_host_notifier(vs->event_vq, s->ctx); | ||
145 | |||
146 | for (i = 0; i < vs->conf.num_queues; i++) { | ||
147 | - virtio_queue_aio_set_host_notifier_handler(vs->cmd_vqs[i], s->ctx, | ||
148 | - virtio_scsi_data_plane_handle_cmd); | ||
149 | + virtio_queue_aio_attach_host_notifier(vs->cmd_vqs[i], s->ctx); | ||
150 | } | ||
151 | |||
152 | s->dataplane_starting = false; | ||
153 | diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c | ||
154 | index XXXXXXX..XXXXXXX 100644 | 773 | index XXXXXXX..XXXXXXX 100644 |
155 | --- a/hw/virtio/virtio.c | 774 | --- a/qemu-io-cmds.c |
156 | +++ b/hw/virtio/virtio.c | 775 | +++ b/qemu-io-cmds.c |
157 | @@ -XXX,XX +XXX,XX @@ static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n) | 776 | @@ -XXX,XX +XXX,XX @@ static const cmdinfo_t flush_cmd = { |
158 | virtio_queue_set_notification(vq, 1); | 777 | .oneline = "flush all in-core file state to disk", |
159 | } | 778 | }; |
160 | 779 | ||
161 | -void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx, | 780 | +static inline int64_t tosector(int64_t bytes) |
162 | - VirtIOHandleOutput handle_output) | 781 | +{ |
163 | +void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx) | 782 | + return bytes >> BDRV_SECTOR_BITS; |
164 | { | 783 | +} |
165 | - if (handle_output) { | 784 | + |
166 | - aio_set_event_notifier(ctx, &vq->host_notifier, true, | 785 | +static int zone_report_f(BlockBackend *blk, int argc, char **argv) |
167 | - virtio_queue_host_notifier_read, | 786 | +{ |
168 | - virtio_queue_host_notifier_aio_poll, | 787 | + int ret; |
169 | - virtio_queue_host_notifier_aio_poll_ready); | 788 | + int64_t offset; |
170 | - aio_set_event_notifier_poll(ctx, &vq->host_notifier, | 789 | + unsigned int nr_zones; |
171 | - virtio_queue_host_notifier_aio_poll_begin, | 790 | + |
172 | - virtio_queue_host_notifier_aio_poll_end); | 791 | + ++optind; |
173 | - } else { | 792 | + offset = cvtnum(argv[optind]); |
174 | - aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL, NULL); | 793 | + ++optind; |
175 | - /* Test and clear notifier before after disabling event, | 794 | + nr_zones = cvtnum(argv[optind]); |
176 | - * in case poll callback didn't have time to run. */ | 795 | + |
177 | - virtio_queue_host_notifier_read(&vq->host_notifier); | 796 | + g_autofree BlockZoneDescriptor *zones = NULL; |
178 | - } | 797 | + zones = g_new(BlockZoneDescriptor, nr_zones); |
179 | + aio_set_event_notifier(ctx, &vq->host_notifier, true, | 798 | + ret = blk_zone_report(blk, offset, &nr_zones, zones); |
180 | + virtio_queue_host_notifier_read, | 799 | + if (ret < 0) { |
181 | + virtio_queue_host_notifier_aio_poll, | 800 | + printf("zone report failed: %s\n", strerror(-ret)); |
182 | + virtio_queue_host_notifier_aio_poll_ready); | 801 | + } else { |
183 | + aio_set_event_notifier_poll(ctx, &vq->host_notifier, | 802 | + for (int i = 0; i < nr_zones; ++i) { |
184 | + virtio_queue_host_notifier_aio_poll_begin, | 803 | + printf("start: 0x%" PRIx64 ", len 0x%" PRIx64 ", " |
185 | + virtio_queue_host_notifier_aio_poll_end); | 804 | + "cap"" 0x%" PRIx64 ", wptr 0x%" PRIx64 ", " |
186 | +} | 805 | + "zcond:%u, [type: %u]\n", |
187 | + | 806 | + tosector(zones[i].start), tosector(zones[i].length), |
188 | +void virtio_queue_aio_detach_host_notifier(VirtQueue *vq, AioContext *ctx) | 807 | + tosector(zones[i].cap), tosector(zones[i].wp), |
189 | +{ | 808 | + zones[i].state, zones[i].type); |
190 | + aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL, NULL); | 809 | + } |
191 | + /* Test and clear notifier before after disabling event, | 810 | + } |
192 | + * in case poll callback didn't have time to run. */ | 811 | + return ret; |
193 | + virtio_queue_host_notifier_read(&vq->host_notifier); | 812 | +} |
194 | } | 813 | + |
195 | 814 | +static const cmdinfo_t zone_report_cmd = { | |
196 | void virtio_queue_host_notifier_read(EventNotifier *n) | 815 | + .name = "zone_report", |
816 | + .altname = "zrp", | ||
817 | + .cfunc = zone_report_f, | ||
818 | + .argmin = 2, | ||
819 | + .argmax = 2, | ||
820 | + .args = "offset number", | ||
821 | + .oneline = "report zone information", | ||
822 | +}; | ||
823 | + | ||
824 | +static int zone_open_f(BlockBackend *blk, int argc, char **argv) | ||
825 | +{ | ||
826 | + int ret; | ||
827 | + int64_t offset, len; | ||
828 | + ++optind; | ||
829 | + offset = cvtnum(argv[optind]); | ||
830 | + ++optind; | ||
831 | + len = cvtnum(argv[optind]); | ||
832 | + ret = blk_zone_mgmt(blk, BLK_ZO_OPEN, offset, len); | ||
833 | + if (ret < 0) { | ||
834 | + printf("zone open failed: %s\n", strerror(-ret)); | ||
835 | + } | ||
836 | + return ret; | ||
837 | +} | ||
838 | + | ||
839 | +static const cmdinfo_t zone_open_cmd = { | ||
840 | + .name = "zone_open", | ||
841 | + .altname = "zo", | ||
842 | + .cfunc = zone_open_f, | ||
843 | + .argmin = 2, | ||
844 | + .argmax = 2, | ||
845 | + .args = "offset len", | ||
846 | + .oneline = "explicit open a range of zones in zone block device", | ||
847 | +}; | ||
848 | + | ||
849 | +static int zone_close_f(BlockBackend *blk, int argc, char **argv) | ||
850 | +{ | ||
851 | + int ret; | ||
852 | + int64_t offset, len; | ||
853 | + ++optind; | ||
854 | + offset = cvtnum(argv[optind]); | ||
855 | + ++optind; | ||
856 | + len = cvtnum(argv[optind]); | ||
857 | + ret = blk_zone_mgmt(blk, BLK_ZO_CLOSE, offset, len); | ||
858 | + if (ret < 0) { | ||
859 | + printf("zone close failed: %s\n", strerror(-ret)); | ||
860 | + } | ||
861 | + return ret; | ||
862 | +} | ||
863 | + | ||
864 | +static const cmdinfo_t zone_close_cmd = { | ||
865 | + .name = "zone_close", | ||
866 | + .altname = "zc", | ||
867 | + .cfunc = zone_close_f, | ||
868 | + .argmin = 2, | ||
869 | + .argmax = 2, | ||
870 | + .args = "offset len", | ||
871 | + .oneline = "close a range of zones in zone block device", | ||
872 | +}; | ||
873 | + | ||
874 | +static int zone_finish_f(BlockBackend *blk, int argc, char **argv) | ||
875 | +{ | ||
876 | + int ret; | ||
877 | + int64_t offset, len; | ||
878 | + ++optind; | ||
879 | + offset = cvtnum(argv[optind]); | ||
880 | + ++optind; | ||
881 | + len = cvtnum(argv[optind]); | ||
882 | + ret = blk_zone_mgmt(blk, BLK_ZO_FINISH, offset, len); | ||
883 | + if (ret < 0) { | ||
884 | + printf("zone finish failed: %s\n", strerror(-ret)); | ||
885 | + } | ||
886 | + return ret; | ||
887 | +} | ||
888 | + | ||
889 | +static const cmdinfo_t zone_finish_cmd = { | ||
890 | + .name = "zone_finish", | ||
891 | + .altname = "zf", | ||
892 | + .cfunc = zone_finish_f, | ||
893 | + .argmin = 2, | ||
894 | + .argmax = 2, | ||
895 | + .args = "offset len", | ||
896 | + .oneline = "finish a range of zones in zone block device", | ||
897 | +}; | ||
898 | + | ||
899 | +static int zone_reset_f(BlockBackend *blk, int argc, char **argv) | ||
900 | +{ | ||
901 | + int ret; | ||
902 | + int64_t offset, len; | ||
903 | + ++optind; | ||
904 | + offset = cvtnum(argv[optind]); | ||
905 | + ++optind; | ||
906 | + len = cvtnum(argv[optind]); | ||
907 | + ret = blk_zone_mgmt(blk, BLK_ZO_RESET, offset, len); | ||
908 | + if (ret < 0) { | ||
909 | + printf("zone reset failed: %s\n", strerror(-ret)); | ||
910 | + } | ||
911 | + return ret; | ||
912 | +} | ||
913 | + | ||
914 | +static const cmdinfo_t zone_reset_cmd = { | ||
915 | + .name = "zone_reset", | ||
916 | + .altname = "zrs", | ||
917 | + .cfunc = zone_reset_f, | ||
918 | + .argmin = 2, | ||
919 | + .argmax = 2, | ||
920 | + .args = "offset len", | ||
921 | + .oneline = "reset a zone write pointer in zone block device", | ||
922 | +}; | ||
923 | + | ||
924 | static int truncate_f(BlockBackend *blk, int argc, char **argv); | ||
925 | static const cmdinfo_t truncate_cmd = { | ||
926 | .name = "truncate", | ||
927 | @@ -XXX,XX +XXX,XX @@ static void __attribute((constructor)) init_qemuio_commands(void) | ||
928 | qemuio_add_command(&aio_write_cmd); | ||
929 | qemuio_add_command(&aio_flush_cmd); | ||
930 | qemuio_add_command(&flush_cmd); | ||
931 | + qemuio_add_command(&zone_report_cmd); | ||
932 | + qemuio_add_command(&zone_open_cmd); | ||
933 | + qemuio_add_command(&zone_close_cmd); | ||
934 | + qemuio_add_command(&zone_finish_cmd); | ||
935 | + qemuio_add_command(&zone_reset_cmd); | ||
936 | qemuio_add_command(&truncate_cmd); | ||
937 | qemuio_add_command(&length_cmd); | ||
938 | qemuio_add_command(&info_cmd); | ||
197 | -- | 939 | -- |
198 | 2.34.1 | 940 | 2.40.0 |
199 | 941 | ||
200 | 942 | diff view generated by jsdifflib |
1 | The difference between ->handle_output() and ->handle_aio_output() was | 1 | From: Sam Li <faithilikerun@gmail.com> |
---|---|---|---|
2 | that ->handle_aio_output() returned a bool return value indicating | ||
3 | progress. This was needed by the old polling API but now that the bool | ||
4 | return value is gone, the two functions can be unified. | ||
5 | 2 | ||
3 | raw-format driver usually sits on top of file-posix driver. It needs to | ||
4 | pass through requests of zone commands. | ||
5 | |||
6 | Signed-off-by: Sam Li <faithilikerun@gmail.com> | ||
7 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
8 | Reviewed-by: Damien Le Moal <damien.lemoal@opensource.wdc.com> | ||
9 | Reviewed-by: Hannes Reinecke <hare@suse.de> | ||
10 | Reviewed-by: Dmitry Fomichev <dmitry.fomichev@wdc.com> | ||
11 | Acked-by: Kevin Wolf <kwolf@redhat.com> | ||
6 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 12 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
7 | Reviewed-by: Stefano Garzarella <sgarzare@redhat.com> | 13 | Message-id: 20230427172019.3345-5-faithilikerun@gmail.com |
8 | Message-id: 20211207132336.36627-6-stefanha@redhat.com | 14 | Message-id: 20230324090605.28361-5-faithilikerun@gmail.com |
15 | [Adjust commit message prefix as suggested by Philippe Mathieu-Daudé | ||
16 | <philmd@linaro.org>. | ||
17 | --Stefan] | ||
9 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 18 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
10 | --- | 19 | --- |
11 | hw/virtio/virtio.c | 33 +++------------------------------ | 20 | block/raw-format.c | 17 +++++++++++++++++ |
12 | 1 file changed, 3 insertions(+), 30 deletions(-) | 21 | 1 file changed, 17 insertions(+) |
13 | 22 | ||
14 | diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c | 23 | diff --git a/block/raw-format.c b/block/raw-format.c |
15 | index XXXXXXX..XXXXXXX 100644 | 24 | index XXXXXXX..XXXXXXX 100644 |
16 | --- a/hw/virtio/virtio.c | 25 | --- a/block/raw-format.c |
17 | +++ b/hw/virtio/virtio.c | 26 | +++ b/block/raw-format.c |
18 | @@ -XXX,XX +XXX,XX @@ struct VirtQueue | 27 | @@ -XXX,XX +XXX,XX @@ raw_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes) |
19 | 28 | return bdrv_co_pdiscard(bs->file, offset, bytes); | |
20 | uint16_t vector; | ||
21 | VirtIOHandleOutput handle_output; | ||
22 | - VirtIOHandleOutput handle_aio_output; | ||
23 | VirtIODevice *vdev; | ||
24 | EventNotifier guest_notifier; | ||
25 | EventNotifier host_notifier; | ||
26 | @@ -XXX,XX +XXX,XX @@ void virtio_queue_set_align(VirtIODevice *vdev, int n, int align) | ||
27 | } | ||
28 | } | 29 | } |
29 | 30 | ||
30 | -static void virtio_queue_notify_aio_vq(VirtQueue *vq) | 31 | +static int coroutine_fn GRAPH_RDLOCK |
31 | -{ | 32 | +raw_co_zone_report(BlockDriverState *bs, int64_t offset, |
32 | - if (vq->vring.desc && vq->handle_aio_output) { | 33 | + unsigned int *nr_zones, |
33 | - VirtIODevice *vdev = vq->vdev; | 34 | + BlockZoneDescriptor *zones) |
34 | - | 35 | +{ |
35 | - trace_virtio_queue_notify(vdev, vq - vdev->vq, vq); | 36 | + return bdrv_co_zone_report(bs->file->bs, offset, nr_zones, zones); |
36 | - vq->handle_aio_output(vdev, vq); | 37 | +} |
37 | - | 38 | + |
38 | - if (unlikely(vdev->start_on_kick)) { | 39 | +static int coroutine_fn GRAPH_RDLOCK |
39 | - virtio_set_started(vdev, true); | 40 | +raw_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op, |
40 | - } | 41 | + int64_t offset, int64_t len) |
41 | - } | 42 | +{ |
42 | -} | 43 | + return bdrv_co_zone_mgmt(bs->file->bs, op, offset, len); |
43 | - | 44 | +} |
44 | static void virtio_queue_notify_vq(VirtQueue *vq) | 45 | + |
46 | static int64_t coroutine_fn GRAPH_RDLOCK | ||
47 | raw_co_getlength(BlockDriverState *bs) | ||
45 | { | 48 | { |
46 | if (vq->vring.desc && vq->handle_output) { | 49 | @@ -XXX,XX +XXX,XX @@ BlockDriver bdrv_raw = { |
47 | @@ -XXX,XX +XXX,XX @@ VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size, | 50 | .bdrv_co_pwritev = &raw_co_pwritev, |
48 | vdev->vq[i].vring.num_default = queue_size; | 51 | .bdrv_co_pwrite_zeroes = &raw_co_pwrite_zeroes, |
49 | vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN; | 52 | .bdrv_co_pdiscard = &raw_co_pdiscard, |
50 | vdev->vq[i].handle_output = handle_output; | 53 | + .bdrv_co_zone_report = &raw_co_zone_report, |
51 | - vdev->vq[i].handle_aio_output = NULL; | 54 | + .bdrv_co_zone_mgmt = &raw_co_zone_mgmt, |
52 | vdev->vq[i].used_elems = g_malloc0(sizeof(VirtQueueElement) * | 55 | .bdrv_co_block_status = &raw_co_block_status, |
53 | queue_size); | 56 | .bdrv_co_copy_range_from = &raw_co_copy_range_from, |
54 | 57 | .bdrv_co_copy_range_to = &raw_co_copy_range_to, | |
55 | @@ -XXX,XX +XXX,XX @@ void virtio_delete_queue(VirtQueue *vq) | ||
56 | vq->vring.num = 0; | ||
57 | vq->vring.num_default = 0; | ||
58 | vq->handle_output = NULL; | ||
59 | - vq->handle_aio_output = NULL; | ||
60 | g_free(vq->used_elems); | ||
61 | vq->used_elems = NULL; | ||
62 | virtio_virtqueue_reset_region_cache(vq); | ||
63 | @@ -XXX,XX +XXX,XX @@ EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq) | ||
64 | return &vq->guest_notifier; | ||
65 | } | ||
66 | |||
67 | -static void virtio_queue_host_notifier_aio_read(EventNotifier *n) | ||
68 | -{ | ||
69 | - VirtQueue *vq = container_of(n, VirtQueue, host_notifier); | ||
70 | - if (event_notifier_test_and_clear(n)) { | ||
71 | - virtio_queue_notify_aio_vq(vq); | ||
72 | - } | ||
73 | -} | ||
74 | - | ||
75 | static void virtio_queue_host_notifier_aio_poll_begin(EventNotifier *n) | ||
76 | { | ||
77 | VirtQueue *vq = container_of(n, VirtQueue, host_notifier); | ||
78 | @@ -XXX,XX +XXX,XX @@ static void virtio_queue_host_notifier_aio_poll_ready(EventNotifier *n) | ||
79 | { | ||
80 | VirtQueue *vq = container_of(n, VirtQueue, host_notifier); | ||
81 | |||
82 | - virtio_queue_notify_aio_vq(vq); | ||
83 | + virtio_queue_notify_vq(vq); | ||
84 | } | ||
85 | |||
86 | static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n) | ||
87 | @@ -XXX,XX +XXX,XX @@ void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx, | ||
88 | VirtIOHandleOutput handle_output) | ||
89 | { | ||
90 | if (handle_output) { | ||
91 | - vq->handle_aio_output = handle_output; | ||
92 | aio_set_event_notifier(ctx, &vq->host_notifier, true, | ||
93 | - virtio_queue_host_notifier_aio_read, | ||
94 | + virtio_queue_host_notifier_read, | ||
95 | virtio_queue_host_notifier_aio_poll, | ||
96 | virtio_queue_host_notifier_aio_poll_ready); | ||
97 | aio_set_event_notifier_poll(ctx, &vq->host_notifier, | ||
98 | @@ -XXX,XX +XXX,XX @@ void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx, | ||
99 | aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL, NULL); | ||
100 | /* Test and clear notifier before after disabling event, | ||
101 | * in case poll callback didn't have time to run. */ | ||
102 | - virtio_queue_host_notifier_aio_read(&vq->host_notifier); | ||
103 | - vq->handle_aio_output = NULL; | ||
104 | + virtio_queue_host_notifier_read(&vq->host_notifier); | ||
105 | } | ||
106 | } | ||
107 | |||
108 | -- | 58 | -- |
109 | 2.34.1 | 59 | 2.40.0 |
110 | 60 | ||
111 | 61 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Sam Li <faithilikerun@gmail.com> | ||
1 | 2 | ||
3 | Putting zoned/non-zoned BlockDrivers on top of each other is not | ||
4 | allowed. | ||
5 | |||
6 | Signed-off-by: Sam Li <faithilikerun@gmail.com> | ||
7 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
8 | Reviewed-by: Hannes Reinecke <hare@suse.de> | ||
9 | Reviewed-by: Dmitry Fomichev <dmitry.fomichev@wdc.com> | ||
10 | Acked-by: Kevin Wolf <kwolf@redhat.com> | ||
11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
12 | Message-id: 20230427172019.3345-6-faithilikerun@gmail.com | ||
13 | Message-id: 20230324090605.28361-6-faithilikerun@gmail.com | ||
14 | [Adjust commit message prefix as suggested by Philippe Mathieu-Daudé | ||
15 | <philmd@linaro.org> and clarify that the check is about zoned | ||
16 | BlockDrivers. | ||
17 | --Stefan] | ||
18 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
19 | --- | ||
20 | include/block/block_int-common.h | 5 +++++ | ||
21 | block.c | 19 +++++++++++++++++++ | ||
22 | block/file-posix.c | 12 ++++++++++++ | ||
23 | block/raw-format.c | 1 + | ||
24 | 4 files changed, 37 insertions(+) | ||
25 | |||
26 | diff --git a/include/block/block_int-common.h b/include/block/block_int-common.h | ||
27 | index XXXXXXX..XXXXXXX 100644 | ||
28 | --- a/include/block/block_int-common.h | ||
29 | +++ b/include/block/block_int-common.h | ||
30 | @@ -XXX,XX +XXX,XX @@ struct BlockDriver { | ||
31 | */ | ||
32 | bool is_format; | ||
33 | |||
34 | + /* | ||
35 | + * Set to true if the BlockDriver supports zoned children. | ||
36 | + */ | ||
37 | + bool supports_zoned_children; | ||
38 | + | ||
39 | /* | ||
40 | * Drivers not implementing bdrv_parse_filename nor bdrv_open should have | ||
41 | * this field set to true, except ones that are defined only by their | ||
42 | diff --git a/block.c b/block.c | ||
43 | index XXXXXXX..XXXXXXX 100644 | ||
44 | --- a/block.c | ||
45 | +++ b/block.c | ||
46 | @@ -XXX,XX +XXX,XX @@ void bdrv_add_child(BlockDriverState *parent_bs, BlockDriverState *child_bs, | ||
47 | return; | ||
48 | } | ||
49 | |||
50 | + /* | ||
51 | + * Non-zoned block drivers do not follow zoned storage constraints | ||
52 | + * (i.e. sequential writes to zones). Refuse mixing zoned and non-zoned | ||
53 | + * drivers in a graph. | ||
54 | + */ | ||
55 | + if (!parent_bs->drv->supports_zoned_children && | ||
56 | + child_bs->bl.zoned == BLK_Z_HM) { | ||
57 | + /* | ||
58 | + * The host-aware model allows zoned storage constraints and random | ||
59 | + * write. Allow mixing host-aware and non-zoned drivers. Using | ||
60 | + * host-aware device as a regular device. | ||
61 | + */ | ||
62 | + error_setg(errp, "Cannot add a %s child to a %s parent", | ||
63 | + child_bs->bl.zoned == BLK_Z_HM ? "zoned" : "non-zoned", | ||
64 | + parent_bs->drv->supports_zoned_children ? | ||
65 | + "support zoned children" : "not support zoned children"); | ||
66 | + return; | ||
67 | + } | ||
68 | + | ||
69 | if (!QLIST_EMPTY(&child_bs->parents)) { | ||
70 | error_setg(errp, "The node %s already has a parent", | ||
71 | child_bs->node_name); | ||
72 | diff --git a/block/file-posix.c b/block/file-posix.c | ||
73 | index XXXXXXX..XXXXXXX 100644 | ||
74 | --- a/block/file-posix.c | ||
75 | +++ b/block/file-posix.c | ||
76 | @@ -XXX,XX +XXX,XX @@ static int raw_open_common(BlockDriverState *bs, QDict *options, | ||
77 | goto fail; | ||
78 | } | ||
79 | } | ||
80 | +#ifdef CONFIG_BLKZONED | ||
81 | + /* | ||
82 | + * The kernel page cache does not reliably work for writes to SWR zones | ||
83 | + * of zoned block device because it can not guarantee the order of writes. | ||
84 | + */ | ||
85 | + if ((bs->bl.zoned != BLK_Z_NONE) && | ||
86 | + (!(s->open_flags & O_DIRECT))) { | ||
87 | + error_setg(errp, "The driver supports zoned devices, and it requires " | ||
88 | + "cache.direct=on, which was not specified."); | ||
89 | + return -EINVAL; /* No host kernel page cache */ | ||
90 | + } | ||
91 | +#endif | ||
92 | |||
93 | if (S_ISBLK(st.st_mode)) { | ||
94 | #ifdef __linux__ | ||
95 | diff --git a/block/raw-format.c b/block/raw-format.c | ||
96 | index XXXXXXX..XXXXXXX 100644 | ||
97 | --- a/block/raw-format.c | ||
98 | +++ b/block/raw-format.c | ||
99 | @@ -XXX,XX +XXX,XX @@ static void raw_child_perm(BlockDriverState *bs, BdrvChild *c, | ||
100 | BlockDriver bdrv_raw = { | ||
101 | .format_name = "raw", | ||
102 | .instance_size = sizeof(BDRVRawState), | ||
103 | + .supports_zoned_children = true, | ||
104 | .bdrv_probe = &raw_probe, | ||
105 | .bdrv_reopen_prepare = &raw_reopen_prepare, | ||
106 | .bdrv_reopen_commit = &raw_reopen_commit, | ||
107 | -- | ||
108 | 2.40.0 | ||
109 | |||
110 | diff view generated by jsdifflib |
1 | Prepare virtio_scsi_handle_cmd() to be used by both dataplane and | 1 | From: Sam Li <faithilikerun@gmail.com> |
---|---|---|---|
2 | non-dataplane by making the condition for starting ioeventfd more | ||
3 | specific. This way it won't trigger when dataplane has already been | ||
4 | started. | ||
5 | 2 | ||
3 | The new block layer APIs of zoned block devices can be tested by: | ||
4 | $ tests/qemu-iotests/check zoned | ||
5 | Run each zone operation on a newly created null_blk device | ||
6 | and see whether it outputs the same zone information. | ||
7 | |||
8 | Signed-off-by: Sam Li <faithilikerun@gmail.com> | ||
9 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
10 | Acked-by: Kevin Wolf <kwolf@redhat.com> | ||
6 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
7 | Reviewed-by: Stefano Garzarella <sgarzare@redhat.com> | 12 | Message-id: 20230427172019.3345-7-faithilikerun@gmail.com |
8 | Message-id: 20211207132336.36627-5-stefanha@redhat.com | 13 | Message-id: 20230324090605.28361-7-faithilikerun@gmail.com |
14 | [Adjust commit message prefix as suggested by Philippe Mathieu-Daudé | ||
15 | <philmd@linaro.org>. | ||
16 | --Stefan] | ||
9 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 17 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
10 | --- | 18 | --- |
11 | hw/scsi/virtio-scsi.c | 2 +- | 19 | tests/qemu-iotests/tests/zoned | 89 ++++++++++++++++++++++++++++++ |
12 | 1 file changed, 1 insertion(+), 1 deletion(-) | 20 | tests/qemu-iotests/tests/zoned.out | 53 ++++++++++++++++++ |
21 | 2 files changed, 142 insertions(+) | ||
22 | create mode 100755 tests/qemu-iotests/tests/zoned | ||
23 | create mode 100644 tests/qemu-iotests/tests/zoned.out | ||
13 | 24 | ||
14 | diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c | 25 | diff --git a/tests/qemu-iotests/tests/zoned b/tests/qemu-iotests/tests/zoned |
15 | index XXXXXXX..XXXXXXX 100644 | 26 | new file mode 100755 |
16 | --- a/hw/scsi/virtio-scsi.c | 27 | index XXXXXXX..XXXXXXX |
17 | +++ b/hw/scsi/virtio-scsi.c | 28 | --- /dev/null |
18 | @@ -XXX,XX +XXX,XX @@ static void virtio_scsi_handle_cmd(VirtIODevice *vdev, VirtQueue *vq) | 29 | +++ b/tests/qemu-iotests/tests/zoned |
19 | /* use non-QOM casts in the data path */ | 30 | @@ -XXX,XX +XXX,XX @@ |
20 | VirtIOSCSI *s = (VirtIOSCSI *)vdev; | 31 | +#!/usr/bin/env bash |
21 | 32 | +# | |
22 | - if (s->ctx) { | 33 | +# Test zone management operations. |
23 | + if (s->ctx && !s->dataplane_started) { | 34 | +# |
24 | virtio_device_start_ioeventfd(vdev); | 35 | + |
25 | if (!s->dataplane_fenced) { | 36 | +seq="$(basename $0)" |
26 | return; | 37 | +echo "QA output created by $seq" |
38 | +status=1 # failure is the default! | ||
39 | + | ||
40 | +_cleanup() | ||
41 | +{ | ||
42 | + _cleanup_test_img | ||
43 | + sudo -n rmmod null_blk | ||
44 | +} | ||
45 | +trap "_cleanup; exit \$status" 0 1 2 3 15 | ||
46 | + | ||
47 | +# get standard environment, filters and checks | ||
48 | +. ../common.rc | ||
49 | +. ../common.filter | ||
50 | +. ../common.qemu | ||
51 | + | ||
52 | +# This test only runs on Linux hosts with raw image files. | ||
53 | +_supported_fmt raw | ||
54 | +_supported_proto file | ||
55 | +_supported_os Linux | ||
56 | + | ||
57 | +sudo -n true || \ | ||
58 | + _notrun 'Password-less sudo required' | ||
59 | + | ||
60 | +IMG="--image-opts -n driver=host_device,filename=/dev/nullb0" | ||
61 | +QEMU_IO_OPTIONS=$QEMU_IO_OPTIONS_NO_FMT | ||
62 | + | ||
63 | +echo "Testing a null_blk device:" | ||
64 | +echo "case 1: if the operations work" | ||
65 | +sudo -n modprobe null_blk nr_devices=1 zoned=1 | ||
66 | +sudo -n chmod 0666 /dev/nullb0 | ||
67 | + | ||
68 | +echo "(1) report the first zone:" | ||
69 | +$QEMU_IO $IMG -c "zrp 0 1" | ||
70 | +echo | ||
71 | +echo "report the first 10 zones" | ||
72 | +$QEMU_IO $IMG -c "zrp 0 10" | ||
73 | +echo | ||
74 | +echo "report the last zone:" | ||
75 | +$QEMU_IO $IMG -c "zrp 0x3e70000000 2" # 0x3e70000000 / 512 = 0x1f380000 | ||
76 | +echo | ||
77 | +echo | ||
78 | +echo "(2) opening the first zone" | ||
79 | +$QEMU_IO $IMG -c "zo 0 268435456" # 268435456 / 512 = 524288 | ||
80 | +echo "report after:" | ||
81 | +$QEMU_IO $IMG -c "zrp 0 1" | ||
82 | +echo | ||
83 | +echo "opening the second zone" | ||
84 | +$QEMU_IO $IMG -c "zo 268435456 268435456" # | ||
85 | +echo "report after:" | ||
86 | +$QEMU_IO $IMG -c "zrp 268435456 1" | ||
87 | +echo | ||
88 | +echo "opening the last zone" | ||
89 | +$QEMU_IO $IMG -c "zo 0x3e70000000 268435456" | ||
90 | +echo "report after:" | ||
91 | +$QEMU_IO $IMG -c "zrp 0x3e70000000 2" | ||
92 | +echo | ||
93 | +echo | ||
94 | +echo "(3) closing the first zone" | ||
95 | +$QEMU_IO $IMG -c "zc 0 268435456" | ||
96 | +echo "report after:" | ||
97 | +$QEMU_IO $IMG -c "zrp 0 1" | ||
98 | +echo | ||
99 | +echo "closing the last zone" | ||
100 | +$QEMU_IO $IMG -c "zc 0x3e70000000 268435456" | ||
101 | +echo "report after:" | ||
102 | +$QEMU_IO $IMG -c "zrp 0x3e70000000 2" | ||
103 | +echo | ||
104 | +echo | ||
105 | +echo "(4) finishing the second zone" | ||
106 | +$QEMU_IO $IMG -c "zf 268435456 268435456" | ||
107 | +echo "After finishing a zone:" | ||
108 | +$QEMU_IO $IMG -c "zrp 268435456 1" | ||
109 | +echo | ||
110 | +echo | ||
111 | +echo "(5) resetting the second zone" | ||
112 | +$QEMU_IO $IMG -c "zrs 268435456 268435456" | ||
113 | +echo "After resetting a zone:" | ||
114 | +$QEMU_IO $IMG -c "zrp 268435456 1" | ||
115 | + | ||
116 | +# success, all done | ||
117 | +echo "*** done" | ||
118 | +rm -f $seq.full | ||
119 | +status=0 | ||
120 | diff --git a/tests/qemu-iotests/tests/zoned.out b/tests/qemu-iotests/tests/zoned.out | ||
121 | new file mode 100644 | ||
122 | index XXXXXXX..XXXXXXX | ||
123 | --- /dev/null | ||
124 | +++ b/tests/qemu-iotests/tests/zoned.out | ||
125 | @@ -XXX,XX +XXX,XX @@ | ||
126 | +QA output created by zoned | ||
127 | +Testing a null_blk device: | ||
128 | +case 1: if the operations work | ||
129 | +(1) report the first zone: | ||
130 | +start: 0x0, len 0x80000, cap 0x80000, wptr 0x0, zcond:1, [type: 2] | ||
131 | + | ||
132 | +report the first 10 zones | ||
133 | +start: 0x0, len 0x80000, cap 0x80000, wptr 0x0, zcond:1, [type: 2] | ||
134 | +start: 0x80000, len 0x80000, cap 0x80000, wptr 0x80000, zcond:1, [type: 2] | ||
135 | +start: 0x100000, len 0x80000, cap 0x80000, wptr 0x100000, zcond:1, [type: 2] | ||
136 | +start: 0x180000, len 0x80000, cap 0x80000, wptr 0x180000, zcond:1, [type: 2] | ||
137 | +start: 0x200000, len 0x80000, cap 0x80000, wptr 0x200000, zcond:1, [type: 2] | ||
138 | +start: 0x280000, len 0x80000, cap 0x80000, wptr 0x280000, zcond:1, [type: 2] | ||
139 | +start: 0x300000, len 0x80000, cap 0x80000, wptr 0x300000, zcond:1, [type: 2] | ||
140 | +start: 0x380000, len 0x80000, cap 0x80000, wptr 0x380000, zcond:1, [type: 2] | ||
141 | +start: 0x400000, len 0x80000, cap 0x80000, wptr 0x400000, zcond:1, [type: 2] | ||
142 | +start: 0x480000, len 0x80000, cap 0x80000, wptr 0x480000, zcond:1, [type: 2] | ||
143 | + | ||
144 | +report the last zone: | ||
145 | +start: 0x1f380000, len 0x80000, cap 0x80000, wptr 0x1f380000, zcond:1, [type: 2] | ||
146 | + | ||
147 | + | ||
148 | +(2) opening the first zone | ||
149 | +report after: | ||
150 | +start: 0x0, len 0x80000, cap 0x80000, wptr 0x0, zcond:3, [type: 2] | ||
151 | + | ||
152 | +opening the second zone | ||
153 | +report after: | ||
154 | +start: 0x80000, len 0x80000, cap 0x80000, wptr 0x80000, zcond:3, [type: 2] | ||
155 | + | ||
156 | +opening the last zone | ||
157 | +report after: | ||
158 | +start: 0x1f380000, len 0x80000, cap 0x80000, wptr 0x1f380000, zcond:3, [type: 2] | ||
159 | + | ||
160 | + | ||
161 | +(3) closing the first zone | ||
162 | +report after: | ||
163 | +start: 0x0, len 0x80000, cap 0x80000, wptr 0x0, zcond:1, [type: 2] | ||
164 | + | ||
165 | +closing the last zone | ||
166 | +report after: | ||
167 | +start: 0x1f380000, len 0x80000, cap 0x80000, wptr 0x1f380000, zcond:1, [type: 2] | ||
168 | + | ||
169 | + | ||
170 | +(4) finishing the second zone | ||
171 | +After finishing a zone: | ||
172 | +start: 0x80000, len 0x80000, cap 0x80000, wptr 0x100000, zcond:14, [type: 2] | ||
173 | + | ||
174 | + | ||
175 | +(5) resetting the second zone | ||
176 | +After resetting a zone: | ||
177 | +start: 0x80000, len 0x80000, cap 0x80000, wptr 0x80000, zcond:1, [type: 2] | ||
178 | +*** done | ||
27 | -- | 179 | -- |
28 | 2.34.1 | 180 | 2.40.0 |
29 | 181 | ||
30 | 182 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Sam Li <faithilikerun@gmail.com> | ||
1 | 2 | ||
3 | Signed-off-by: Sam Li <faithilikerun@gmail.com> | ||
4 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
5 | Reviewed-by: Dmitry Fomichev <dmitry.fomichev@wdc.com> | ||
6 | Acked-by: Kevin Wolf <kwolf@redhat.com> | ||
7 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
8 | Message-id: 20230427172019.3345-8-faithilikerun@gmail.com | ||
9 | Message-id: 20230324090605.28361-8-faithilikerun@gmail.com | ||
10 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
11 | --- | ||
12 | block/file-posix.c | 3 +++ | ||
13 | block/trace-events | 2 ++ | ||
14 | 2 files changed, 5 insertions(+) | ||
15 | |||
16 | diff --git a/block/file-posix.c b/block/file-posix.c | ||
17 | index XXXXXXX..XXXXXXX 100644 | ||
18 | --- a/block/file-posix.c | ||
19 | +++ b/block/file-posix.c | ||
20 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn raw_co_zone_report(BlockDriverState *bs, int64_t offset, | ||
21 | }, | ||
22 | }; | ||
23 | |||
24 | + trace_zbd_zone_report(bs, *nr_zones, offset >> BDRV_SECTOR_BITS); | ||
25 | return raw_thread_pool_submit(handle_aiocb_zone_report, &acb); | ||
26 | } | ||
27 | #endif | ||
28 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn raw_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op, | ||
29 | }, | ||
30 | }; | ||
31 | |||
32 | + trace_zbd_zone_mgmt(bs, op_name, offset >> BDRV_SECTOR_BITS, | ||
33 | + len >> BDRV_SECTOR_BITS); | ||
34 | ret = raw_thread_pool_submit(handle_aiocb_zone_mgmt, &acb); | ||
35 | if (ret != 0) { | ||
36 | error_report("ioctl %s failed %d", op_name, ret); | ||
37 | diff --git a/block/trace-events b/block/trace-events | ||
38 | index XXXXXXX..XXXXXXX 100644 | ||
39 | --- a/block/trace-events | ||
40 | +++ b/block/trace-events | ||
41 | @@ -XXX,XX +XXX,XX @@ file_FindEjectableOpticalMedia(const char *media) "Matching using %s" | ||
42 | file_setup_cdrom(const char *partition) "Using %s as optical disc" | ||
43 | file_hdev_is_sg(int type, int version) "SG device found: type=%d, version=%d" | ||
44 | file_flush_fdatasync_failed(int err) "errno %d" | ||
45 | +zbd_zone_report(void *bs, unsigned int nr_zones, int64_t sector) "bs %p report %d zones starting at sector offset 0x%" PRIx64 "" | ||
46 | +zbd_zone_mgmt(void *bs, const char *op_name, int64_t sector, int64_t len) "bs %p %s starts at sector offset 0x%" PRIx64 " over a range of 0x%" PRIx64 " sectors" | ||
47 | |||
48 | # ssh.c | ||
49 | sftp_error(const char *op, const char *ssh_err, int ssh_err_code, int sftp_err_code) "%s failed: %s (libssh error code: %d, sftp error code: %d)" | ||
50 | -- | ||
51 | 2.40.0 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Sam Li <faithilikerun@gmail.com> | ||
1 | 2 | ||
3 | Add the documentation about the zoned device support to virtio-blk | ||
4 | emulation. | ||
5 | |||
6 | Signed-off-by: Sam Li <faithilikerun@gmail.com> | ||
7 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
8 | Reviewed-by: Damien Le Moal <damien.lemoal@opensource.wdc.com> | ||
9 | Reviewed-by: Dmitry Fomichev <dmitry.fomichev@wdc.com> | ||
10 | Acked-by: Kevin Wolf <kwolf@redhat.com> | ||
11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
12 | Message-id: 20230427172019.3345-9-faithilikerun@gmail.com | ||
13 | Message-id: 20230324090605.28361-9-faithilikerun@gmail.com | ||
14 | [Add index-api.rst to fix "zoned-storage.rst:document isn't included in | ||
15 | any toctree" error and fix pre-formatted command-line indentation. | ||
16 | --Stefan] | ||
17 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
18 | --- | ||
19 | docs/devel/index-api.rst | 1 + | ||
20 | docs/devel/zoned-storage.rst | 43 ++++++++++++++++++++++++++ | ||
21 | docs/system/qemu-block-drivers.rst.inc | 6 ++++ | ||
22 | 3 files changed, 50 insertions(+) | ||
23 | create mode 100644 docs/devel/zoned-storage.rst | ||
24 | |||
25 | diff --git a/docs/devel/index-api.rst b/docs/devel/index-api.rst | ||
26 | index XXXXXXX..XXXXXXX 100644 | ||
27 | --- a/docs/devel/index-api.rst | ||
28 | +++ b/docs/devel/index-api.rst | ||
29 | @@ -XXX,XX +XXX,XX @@ generated from in-code annotations to function prototypes. | ||
30 | memory | ||
31 | modules | ||
32 | ui | ||
33 | + zoned-storage | ||
34 | diff --git a/docs/devel/zoned-storage.rst b/docs/devel/zoned-storage.rst | ||
35 | new file mode 100644 | ||
36 | index XXXXXXX..XXXXXXX | ||
37 | --- /dev/null | ||
38 | +++ b/docs/devel/zoned-storage.rst | ||
39 | @@ -XXX,XX +XXX,XX @@ | ||
40 | +============= | ||
41 | +zoned-storage | ||
42 | +============= | ||
43 | + | ||
44 | +Zoned Block Devices (ZBDs) divide the LBA space into block regions called zones | ||
45 | +that are larger than the LBA size. They can only allow sequential writes, which | ||
46 | +can reduce write amplification in SSDs, and potentially lead to higher | ||
47 | +throughput and increased capacity. More details about ZBDs can be found at: | ||
48 | + | ||
49 | +https://zonedstorage.io/docs/introduction/zoned-storage | ||
50 | + | ||
51 | +1. Block layer APIs for zoned storage | ||
52 | +------------------------------------- | ||
53 | +QEMU block layer supports three zoned storage models: | ||
54 | +- BLK_Z_HM: The host-managed zoned model only allows sequential writes access | ||
55 | +to zones. It supports ZBD-specific I/O commands that can be used by a host to | ||
56 | +manage the zones of a device. | ||
57 | +- BLK_Z_HA: The host-aware zoned model allows random write operations in | ||
58 | +zones, making it backward compatible with regular block devices. | ||
59 | +- BLK_Z_NONE: The non-zoned model has no zones support. It includes both | ||
60 | +regular and drive-managed ZBD devices. ZBD-specific I/O commands are not | ||
61 | +supported. | ||
62 | + | ||
63 | +The block device information resides inside BlockDriverState. QEMU uses | ||
64 | +BlockLimits struct(BlockDriverState::bl) that is continuously accessed by the | ||
65 | +block layer while processing I/O requests. A BlockBackend has a root pointer to | ||
66 | +a BlockDriverState graph(for example, raw format on top of file-posix). The | ||
67 | +zoned storage information can be propagated from the leaf BlockDriverState all | ||
68 | +the way up to the BlockBackend. If the zoned storage model in file-posix is | ||
69 | +set to BLK_Z_HM, then block drivers will declare support for zoned host device. | ||
70 | + | ||
71 | +The block layer APIs support commands needed for zoned storage devices, | ||
72 | +including report zones, four zone operations, and zone append. | ||
73 | + | ||
74 | +2. Emulating zoned storage controllers | ||
75 | +-------------------------------------- | ||
76 | +When the BlockBackend's BlockLimits model reports a zoned storage device, users | ||
77 | +like the virtio-blk emulation or the qemu-io-cmds.c utility can use block layer | ||
78 | +APIs for zoned storage emulation or testing. | ||
79 | + | ||
80 | +For example, to test zone_report on a null_blk device using qemu-io is:: | ||
81 | + | ||
82 | + $ path/to/qemu-io --image-opts -n driver=host_device,filename=/dev/nullb0 -c "zrp offset nr_zones" | ||
83 | diff --git a/docs/system/qemu-block-drivers.rst.inc b/docs/system/qemu-block-drivers.rst.inc | ||
84 | index XXXXXXX..XXXXXXX 100644 | ||
85 | --- a/docs/system/qemu-block-drivers.rst.inc | ||
86 | +++ b/docs/system/qemu-block-drivers.rst.inc | ||
87 | @@ -XXX,XX +XXX,XX @@ Hard disks | ||
88 | you may corrupt your host data (use the ``-snapshot`` command | ||
89 | line option or modify the device permissions accordingly). | ||
90 | |||
91 | +Zoned block devices | ||
92 | + Zoned block devices can be passed through to the guest if the emulated storage | ||
93 | + controller supports zoned storage. Use ``--blockdev host_device, | ||
94 | + node-name=drive0,filename=/dev/nullb0,cache.direct=on`` to pass through | ||
95 | + ``/dev/nullb0`` as ``drive0``. | ||
96 | + | ||
97 | Windows | ||
98 | ^^^^^^^ | ||
99 | |||
100 | -- | ||
101 | 2.40.0 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Sam Li <faithilikerun@gmail.com> | ||
1 | 2 | ||
3 | Since Linux doesn't have a user API to issue zone append operations to | ||
4 | zoned devices from user space, the file-posix driver is modified to add | ||
5 | zone append emulation using regular writes. To do this, the file-posix | ||
6 | driver tracks the wp location of all zones of the device. It uses an | ||
7 | array of uint64_t. The most significant bit of each wp location indicates | ||
8 | if the zone type is conventional zones. | ||
9 | |||
10 | The zones wp can be changed due to the following operations issued: | ||
11 | - zone reset: change the wp to the start offset of that zone | ||
12 | - zone finish: change to the end location of that zone | ||
13 | - write to a zone | ||
14 | - zone append | ||
15 | |||
16 | Signed-off-by: Sam Li <faithilikerun@gmail.com> | ||
17 | Message-id: 20230427172339.3709-2-faithilikerun@gmail.com | ||
18 | [Fix errno propagation from handle_aiocb_zone_mgmt() | ||
19 | --Stefan] | ||
20 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
21 | --- | ||
22 | include/block/block-common.h | 14 +++ | ||
23 | include/block/block_int-common.h | 5 + | ||
24 | block/file-posix.c | 178 ++++++++++++++++++++++++++++++- | ||
25 | 3 files changed, 193 insertions(+), 4 deletions(-) | ||
26 | |||
27 | diff --git a/include/block/block-common.h b/include/block/block-common.h | ||
28 | index XXXXXXX..XXXXXXX 100644 | ||
29 | --- a/include/block/block-common.h | ||
30 | +++ b/include/block/block-common.h | ||
31 | @@ -XXX,XX +XXX,XX @@ typedef struct BlockZoneDescriptor { | ||
32 | BlockZoneState state; | ||
33 | } BlockZoneDescriptor; | ||
34 | |||
35 | +/* | ||
36 | + * Track write pointers of a zone in bytes. | ||
37 | + */ | ||
38 | +typedef struct BlockZoneWps { | ||
39 | + CoMutex colock; | ||
40 | + uint64_t wp[]; | ||
41 | +} BlockZoneWps; | ||
42 | + | ||
43 | typedef struct BlockDriverInfo { | ||
44 | /* in bytes, 0 if irrelevant */ | ||
45 | int cluster_size; | ||
46 | @@ -XXX,XX +XXX,XX @@ typedef enum { | ||
47 | #define BDRV_SECTOR_BITS 9 | ||
48 | #define BDRV_SECTOR_SIZE (1ULL << BDRV_SECTOR_BITS) | ||
49 | |||
50 | +/* | ||
51 | + * Get the first most significant bit of wp. If it is zero, then | ||
52 | + * the zone type is SWR. | ||
53 | + */ | ||
54 | +#define BDRV_ZT_IS_CONV(wp) (wp & (1ULL << 63)) | ||
55 | + | ||
56 | #define BDRV_REQUEST_MAX_SECTORS MIN_CONST(SIZE_MAX >> BDRV_SECTOR_BITS, \ | ||
57 | INT_MAX >> BDRV_SECTOR_BITS) | ||
58 | #define BDRV_REQUEST_MAX_BYTES (BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) | ||
59 | diff --git a/include/block/block_int-common.h b/include/block/block_int-common.h | ||
60 | index XXXXXXX..XXXXXXX 100644 | ||
61 | --- a/include/block/block_int-common.h | ||
62 | +++ b/include/block/block_int-common.h | ||
63 | @@ -XXX,XX +XXX,XX @@ typedef struct BlockLimits { | ||
64 | |||
65 | /* maximum number of active zones */ | ||
66 | int64_t max_active_zones; | ||
67 | + | ||
68 | + int64_t write_granularity; | ||
69 | } BlockLimits; | ||
70 | |||
71 | typedef struct BdrvOpBlocker BdrvOpBlocker; | ||
72 | @@ -XXX,XX +XXX,XX @@ struct BlockDriverState { | ||
73 | CoMutex bsc_modify_lock; | ||
74 | /* Always non-NULL, but must only be dereferenced under an RCU read guard */ | ||
75 | BdrvBlockStatusCache *block_status_cache; | ||
76 | + | ||
77 | + /* array of write pointers' location of each zone in the zoned device. */ | ||
78 | + BlockZoneWps *wps; | ||
79 | }; | ||
80 | |||
81 | struct BlockBackendRootState { | ||
82 | diff --git a/block/file-posix.c b/block/file-posix.c | ||
83 | index XXXXXXX..XXXXXXX 100644 | ||
84 | --- a/block/file-posix.c | ||
85 | +++ b/block/file-posix.c | ||
86 | @@ -XXX,XX +XXX,XX @@ static int hdev_get_max_segments(int fd, struct stat *st) | ||
87 | } | ||
88 | |||
89 | #if defined(CONFIG_BLKZONED) | ||
90 | +/* | ||
91 | + * If the reset_all flag is true, then the wps of zone whose state is | ||
92 | + * not readonly or offline should be all reset to the start sector. | ||
93 | + * Else, take the real wp of the device. | ||
94 | + */ | ||
95 | +static int get_zones_wp(BlockDriverState *bs, int fd, int64_t offset, | ||
96 | + unsigned int nrz, bool reset_all) | ||
97 | +{ | ||
98 | + struct blk_zone *blkz; | ||
99 | + size_t rep_size; | ||
100 | + uint64_t sector = offset >> BDRV_SECTOR_BITS; | ||
101 | + BlockZoneWps *wps = bs->wps; | ||
102 | + unsigned int j = offset / bs->bl.zone_size; | ||
103 | + unsigned int n = 0, i = 0; | ||
104 | + int ret; | ||
105 | + rep_size = sizeof(struct blk_zone_report) + nrz * sizeof(struct blk_zone); | ||
106 | + g_autofree struct blk_zone_report *rep = NULL; | ||
107 | + | ||
108 | + rep = g_malloc(rep_size); | ||
109 | + blkz = (struct blk_zone *)(rep + 1); | ||
110 | + while (n < nrz) { | ||
111 | + memset(rep, 0, rep_size); | ||
112 | + rep->sector = sector; | ||
113 | + rep->nr_zones = nrz - n; | ||
114 | + | ||
115 | + do { | ||
116 | + ret = ioctl(fd, BLKREPORTZONE, rep); | ||
117 | + } while (ret != 0 && errno == EINTR); | ||
118 | + if (ret != 0) { | ||
119 | + error_report("%d: ioctl BLKREPORTZONE at %" PRId64 " failed %d", | ||
120 | + fd, offset, errno); | ||
121 | + return -errno; | ||
122 | + } | ||
123 | + | ||
124 | + if (!rep->nr_zones) { | ||
125 | + break; | ||
126 | + } | ||
127 | + | ||
128 | + for (i = 0; i < rep->nr_zones; ++i, ++n, ++j) { | ||
129 | + /* | ||
130 | + * The wp tracking cares only about sequential writes required and | ||
131 | + * sequential write preferred zones so that the wp can advance to | ||
132 | + * the right location. | ||
133 | + * Use the most significant bit of the wp location to indicate the | ||
134 | + * zone type: 0 for SWR/SWP zones and 1 for conventional zones. | ||
135 | + */ | ||
136 | + if (blkz[i].type == BLK_ZONE_TYPE_CONVENTIONAL) { | ||
137 | + wps->wp[j] |= 1ULL << 63; | ||
138 | + } else { | ||
139 | + switch(blkz[i].cond) { | ||
140 | + case BLK_ZONE_COND_FULL: | ||
141 | + case BLK_ZONE_COND_READONLY: | ||
142 | + /* Zone not writable */ | ||
143 | + wps->wp[j] = (blkz[i].start + blkz[i].len) << BDRV_SECTOR_BITS; | ||
144 | + break; | ||
145 | + case BLK_ZONE_COND_OFFLINE: | ||
146 | + /* Zone not writable nor readable */ | ||
147 | + wps->wp[j] = (blkz[i].start) << BDRV_SECTOR_BITS; | ||
148 | + break; | ||
149 | + default: | ||
150 | + if (reset_all) { | ||
151 | + wps->wp[j] = blkz[i].start << BDRV_SECTOR_BITS; | ||
152 | + } else { | ||
153 | + wps->wp[j] = blkz[i].wp << BDRV_SECTOR_BITS; | ||
154 | + } | ||
155 | + break; | ||
156 | + } | ||
157 | + } | ||
158 | + } | ||
159 | + sector = blkz[i - 1].start + blkz[i - 1].len; | ||
160 | + } | ||
161 | + | ||
162 | + return 0; | ||
163 | +} | ||
164 | + | ||
165 | +static void update_zones_wp(BlockDriverState *bs, int fd, int64_t offset, | ||
166 | + unsigned int nrz) | ||
167 | +{ | ||
168 | + if (get_zones_wp(bs, fd, offset, nrz, 0) < 0) { | ||
169 | + error_report("update zone wp failed"); | ||
170 | + } | ||
171 | +} | ||
172 | + | ||
173 | static void raw_refresh_zoned_limits(BlockDriverState *bs, struct stat *st, | ||
174 | Error **errp) | ||
175 | { | ||
176 | + BDRVRawState *s = bs->opaque; | ||
177 | BlockZoneModel zoned; | ||
178 | int ret; | ||
179 | |||
180 | @@ -XXX,XX +XXX,XX @@ static void raw_refresh_zoned_limits(BlockDriverState *bs, struct stat *st, | ||
181 | if (ret > 0) { | ||
182 | bs->bl.max_append_sectors = ret >> BDRV_SECTOR_BITS; | ||
183 | } | ||
184 | + | ||
185 | + ret = get_sysfs_long_val(st, "physical_block_size"); | ||
186 | + if (ret >= 0) { | ||
187 | + bs->bl.write_granularity = ret; | ||
188 | + } | ||
189 | + | ||
190 | + /* The refresh_limits() function can be called multiple times. */ | ||
191 | + g_free(bs->wps); | ||
192 | + bs->wps = g_malloc(sizeof(BlockZoneWps) + | ||
193 | + sizeof(int64_t) * bs->bl.nr_zones); | ||
194 | + ret = get_zones_wp(bs, s->fd, 0, bs->bl.nr_zones, 0); | ||
195 | + if (ret < 0) { | ||
196 | + error_setg_errno(errp, -ret, "report wps failed"); | ||
197 | + bs->wps = NULL; | ||
198 | + return; | ||
199 | + } | ||
200 | + qemu_co_mutex_init(&bs->wps->colock); | ||
201 | } | ||
202 | #else /* !defined(CONFIG_BLKZONED) */ | ||
203 | static void raw_refresh_zoned_limits(BlockDriverState *bs, struct stat *st, | ||
204 | @@ -XXX,XX +XXX,XX @@ static int handle_aiocb_zone_mgmt(void *opaque) | ||
205 | ret = ioctl(fd, aiocb->zone_mgmt.op, &range); | ||
206 | } while (ret != 0 && errno == EINTR); | ||
207 | |||
208 | - return ret; | ||
209 | + return ret < 0 ? -errno : ret; | ||
210 | } | ||
211 | #endif | ||
212 | |||
213 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset, | ||
214 | { | ||
215 | BDRVRawState *s = bs->opaque; | ||
216 | RawPosixAIOData acb; | ||
217 | + int ret; | ||
218 | |||
219 | if (fd_open(bs) < 0) | ||
220 | return -EIO; | ||
221 | +#if defined(CONFIG_BLKZONED) | ||
222 | + if (type & QEMU_AIO_WRITE && bs->wps) { | ||
223 | + qemu_co_mutex_lock(&bs->wps->colock); | ||
224 | + } | ||
225 | +#endif | ||
226 | |||
227 | /* | ||
228 | * When using O_DIRECT, the request must be aligned to be able to use | ||
229 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset, | ||
230 | #ifdef CONFIG_LINUX_IO_URING | ||
231 | } else if (s->use_linux_io_uring) { | ||
232 | assert(qiov->size == bytes); | ||
233 | - return luring_co_submit(bs, s->fd, offset, qiov, type); | ||
234 | + ret = luring_co_submit(bs, s->fd, offset, qiov, type); | ||
235 | + goto out; | ||
236 | #endif | ||
237 | #ifdef CONFIG_LINUX_AIO | ||
238 | } else if (s->use_linux_aio) { | ||
239 | assert(qiov->size == bytes); | ||
240 | - return laio_co_submit(s->fd, offset, qiov, type, s->aio_max_batch); | ||
241 | + ret = laio_co_submit(s->fd, offset, qiov, type, | ||
242 | + s->aio_max_batch); | ||
243 | + goto out; | ||
244 | #endif | ||
245 | } | ||
246 | |||
247 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset, | ||
248 | }; | ||
249 | |||
250 | assert(qiov->size == bytes); | ||
251 | - return raw_thread_pool_submit(handle_aiocb_rw, &acb); | ||
252 | + ret = raw_thread_pool_submit(handle_aiocb_rw, &acb); | ||
253 | + goto out; /* Avoid the compiler err of unused label */ | ||
254 | + | ||
255 | +out: | ||
256 | +#if defined(CONFIG_BLKZONED) | ||
257 | +{ | ||
258 | + BlockZoneWps *wps = bs->wps; | ||
259 | + if (ret == 0) { | ||
260 | + if (type & QEMU_AIO_WRITE && wps && bs->bl.zone_size) { | ||
261 | + uint64_t *wp = &wps->wp[offset / bs->bl.zone_size]; | ||
262 | + if (!BDRV_ZT_IS_CONV(*wp)) { | ||
263 | + /* Advance the wp if needed */ | ||
264 | + if (offset + bytes > *wp) { | ||
265 | + *wp = offset + bytes; | ||
266 | + } | ||
267 | + } | ||
268 | + } | ||
269 | + } else { | ||
270 | + if (type & QEMU_AIO_WRITE) { | ||
271 | + update_zones_wp(bs, s->fd, 0, 1); | ||
272 | + } | ||
273 | + } | ||
274 | + | ||
275 | + if (type & QEMU_AIO_WRITE && wps) { | ||
276 | + qemu_co_mutex_unlock(&wps->colock); | ||
277 | + } | ||
278 | +} | ||
279 | +#endif | ||
280 | + return ret; | ||
281 | } | ||
282 | |||
283 | static int coroutine_fn raw_co_preadv(BlockDriverState *bs, int64_t offset, | ||
284 | @@ -XXX,XX +XXX,XX @@ static void raw_close(BlockDriverState *bs) | ||
285 | BDRVRawState *s = bs->opaque; | ||
286 | |||
287 | if (s->fd >= 0) { | ||
288 | +#if defined(CONFIG_BLKZONED) | ||
289 | + g_free(bs->wps); | ||
290 | +#endif | ||
291 | qemu_close(s->fd); | ||
292 | s->fd = -1; | ||
293 | } | ||
294 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn raw_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op, | ||
295 | const char *op_name; | ||
296 | unsigned long zo; | ||
297 | int ret; | ||
298 | + BlockZoneWps *wps = bs->wps; | ||
299 | int64_t capacity = bs->total_sectors << BDRV_SECTOR_BITS; | ||
300 | |||
301 | zone_size = bs->bl.zone_size; | ||
302 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn raw_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op, | ||
303 | return -EINVAL; | ||
304 | } | ||
305 | |||
306 | + uint32_t i = offset / bs->bl.zone_size; | ||
307 | + uint32_t nrz = len / bs->bl.zone_size; | ||
308 | + uint64_t *wp = &wps->wp[i]; | ||
309 | + if (BDRV_ZT_IS_CONV(*wp) && len != capacity) { | ||
310 | + error_report("zone mgmt operations are not allowed for conventional zones"); | ||
311 | + return -EIO; | ||
312 | + } | ||
313 | + | ||
314 | switch (op) { | ||
315 | case BLK_ZO_OPEN: | ||
316 | op_name = "BLKOPENZONE"; | ||
317 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn raw_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op, | ||
318 | len >> BDRV_SECTOR_BITS); | ||
319 | ret = raw_thread_pool_submit(handle_aiocb_zone_mgmt, &acb); | ||
320 | if (ret != 0) { | ||
321 | + update_zones_wp(bs, s->fd, offset, i); | ||
322 | error_report("ioctl %s failed %d", op_name, ret); | ||
323 | + return ret; | ||
324 | + } | ||
325 | + | ||
326 | + if (zo == BLKRESETZONE && len == capacity) { | ||
327 | + ret = get_zones_wp(bs, s->fd, 0, bs->bl.nr_zones, 1); | ||
328 | + if (ret < 0) { | ||
329 | + error_report("reporting single wp failed"); | ||
330 | + return ret; | ||
331 | + } | ||
332 | + } else if (zo == BLKRESETZONE) { | ||
333 | + for (unsigned int j = 0; j < nrz; ++j) { | ||
334 | + wp[j] = offset + j * zone_size; | ||
335 | + } | ||
336 | + } else if (zo == BLKFINISHZONE) { | ||
337 | + for (unsigned int j = 0; j < nrz; ++j) { | ||
338 | + /* The zoned device allows the last zone smaller that the | ||
339 | + * zone size. */ | ||
340 | + wp[j] = MIN(offset + (j + 1) * zone_size, offset + len); | ||
341 | + } | ||
342 | } | ||
343 | |||
344 | return ret; | ||
345 | -- | ||
346 | 2.40.0 | diff view generated by jsdifflib |
1 | Adaptive polling measures the execution time of the polling check plus | 1 | From: Sam Li <faithilikerun@gmail.com> |
---|---|---|---|
2 | handlers called when a polled event becomes ready. Handlers can take a | ||
3 | significant amount of time, making it look like polling was running for | ||
4 | a long time when in fact the event handler was running for a long time. | ||
5 | 2 | ||
6 | For example, on Linux the io_submit(2) syscall invoked when a virtio-blk | 3 | A zone append command is a write operation that specifies the first |
7 | device's virtqueue becomes ready can take 10s of microseconds. This | 4 | logical block of a zone as the write position. When writing to a zoned |
8 | can exceed the default polling interval (32 microseconds) and cause | 5 | block device using zone append, the byte offset of the call may point at |
9 | adaptive polling to stop polling. | 6 | any position within the zone to which the data is being appended. Upon |
7 | completion the device will respond with the position where the data has | ||
8 | been written in the zone. | ||
10 | 9 | ||
11 | By excluding the handler's execution time from the polling check we make | 10 | Signed-off-by: Sam Li <faithilikerun@gmail.com> |
12 | the adaptive polling calculation more accurate. As a result, the event | 11 | Reviewed-by: Dmitry Fomichev <dmitry.fomichev@wdc.com> |
13 | loop now stays in polling mode where previously it would have fallen | 12 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> |
14 | back to file descriptor monitoring. | 13 | Message-id: 20230427172339.3709-3-faithilikerun@gmail.com |
15 | |||
16 | The following data was collected with virtio-blk num-queues=2 | ||
17 | event_idx=off using an IOThread. Before: | ||
18 | |||
19 | 168k IOPS, IOThread syscalls: | ||
20 | |||
21 | 9837.115 ( 0.020 ms): IO iothread1/620155 io_submit(ctx_id: 140512552468480, nr: 16, iocbpp: 0x7fcb9f937db0) = 16 | ||
22 | 9837.158 ( 0.002 ms): IO iothread1/620155 write(fd: 103, buf: 0x556a2ef71b88, count: 8) = 8 | ||
23 | 9837.161 ( 0.001 ms): IO iothread1/620155 write(fd: 104, buf: 0x556a2ef71b88, count: 8) = 8 | ||
24 | 9837.163 ( 0.001 ms): IO iothread1/620155 ppoll(ufds: 0x7fcb90002800, nfds: 4, tsp: 0x7fcb9f1342d0, sigsetsize: 8) = 3 | ||
25 | 9837.164 ( 0.001 ms): IO iothread1/620155 read(fd: 107, buf: 0x7fcb9f939cc0, count: 512) = 8 | ||
26 | 9837.174 ( 0.001 ms): IO iothread1/620155 read(fd: 105, buf: 0x7fcb9f939cc0, count: 512) = 8 | ||
27 | 9837.176 ( 0.001 ms): IO iothread1/620155 read(fd: 106, buf: 0x7fcb9f939cc0, count: 512) = 8 | ||
28 | 9837.209 ( 0.035 ms): IO iothread1/620155 io_submit(ctx_id: 140512552468480, nr: 32, iocbpp: 0x7fca7d0cebe0) = 32 | ||
29 | |||
30 | 174k IOPS (+3.6%), IOThread syscalls: | ||
31 | |||
32 | 9809.566 ( 0.036 ms): IO iothread1/623061 io_submit(ctx_id: 140539805028352, nr: 32, iocbpp: 0x7fd0cdd62be0) = 32 | ||
33 | 9809.625 ( 0.001 ms): IO iothread1/623061 write(fd: 103, buf: 0x5647cfba5f58, count: 8) = 8 | ||
34 | 9809.627 ( 0.002 ms): IO iothread1/623061 write(fd: 104, buf: 0x5647cfba5f58, count: 8) = 8 | ||
35 | 9809.663 ( 0.036 ms): IO iothread1/623061 io_submit(ctx_id: 140539805028352, nr: 32, iocbpp: 0x7fd0d0388b50) = 32 | ||
36 | |||
37 | Notice that ppoll(2) and eventfd read(2) syscalls are eliminated because | ||
38 | the IOThread stays in polling mode instead of falling back to file | ||
39 | descriptor monitoring. | ||
40 | |||
41 | As usual, polling is not implemented on Windows so this patch ignores | ||
42 | the new io_poll_read() callback in aio-win32.c. | ||
43 | |||
44 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
45 | Reviewed-by: Stefano Garzarella <sgarzare@redhat.com> | ||
46 | Message-id: 20211207132336.36627-2-stefanha@redhat.com | ||
47 | |||
48 | [Fixed up aio_set_event_notifier() calls in | ||
49 | tests/unit/test-fdmon-epoll.c added after this series was queued. | ||
50 | --Stefan] | ||
51 | |||
52 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 14 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
53 | --- | 15 | --- |
54 | include/block/aio.h | 4 +- | 16 | include/block/block-io.h | 4 ++ |
55 | util/aio-posix.h | 1 + | 17 | include/block/block_int-common.h | 3 ++ |
56 | block/curl.c | 11 +++-- | 18 | include/block/raw-aio.h | 4 +- |
57 | block/export/fuse.c | 4 +- | 19 | include/sysemu/block-backend-io.h | 9 +++++ |
58 | block/io_uring.c | 19 ++++---- | 20 | block/block-backend.c | 61 +++++++++++++++++++++++++++++++ |
59 | block/iscsi.c | 4 +- | 21 | block/file-posix.c | 58 +++++++++++++++++++++++++---- |
60 | block/linux-aio.c | 16 ++++--- | 22 | block/io.c | 27 ++++++++++++++ |
61 | block/nfs.c | 6 +-- | 23 | block/io_uring.c | 4 ++ |
62 | block/nvme.c | 51 +++++++++++++------- | 24 | block/linux-aio.c | 3 ++ |
63 | block/ssh.c | 4 +- | 25 | block/raw-format.c | 8 ++++ |
64 | block/win32-aio.c | 4 +- | 26 | 10 files changed, 173 insertions(+), 8 deletions(-) |
65 | hw/virtio/virtio.c | 16 ++++--- | ||
66 | hw/xen/xen-bus.c | 6 +-- | ||
67 | io/channel-command.c | 6 ++- | ||
68 | io/channel-file.c | 3 +- | ||
69 | io/channel-socket.c | 3 +- | ||
70 | migration/rdma.c | 8 ++-- | ||
71 | tests/unit/test-aio.c | 4 +- | ||
72 | tests/unit/test-fdmon-epoll.c | 4 +- | ||
73 | util/aio-posix.c | 89 ++++++++++++++++++++++++++--------- | ||
74 | util/aio-win32.c | 4 +- | ||
75 | util/async.c | 10 +++- | ||
76 | util/main-loop.c | 4 +- | ||
77 | util/qemu-coroutine-io.c | 5 +- | ||
78 | util/vhost-user-server.c | 11 +++-- | ||
79 | 25 files changed, 193 insertions(+), 104 deletions(-) | ||
80 | 27 | ||
81 | diff --git a/include/block/aio.h b/include/block/aio.h | 28 | diff --git a/include/block/block-io.h b/include/block/block-io.h |
82 | index XXXXXXX..XXXXXXX 100644 | 29 | index XXXXXXX..XXXXXXX 100644 |
83 | --- a/include/block/aio.h | 30 | --- a/include/block/block-io.h |
84 | +++ b/include/block/aio.h | 31 | +++ b/include/block/block-io.h |
85 | @@ -XXX,XX +XXX,XX @@ void aio_set_fd_handler(AioContext *ctx, | 32 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn GRAPH_RDLOCK bdrv_co_zone_report(BlockDriverState *bs, |
86 | IOHandler *io_read, | 33 | int coroutine_fn GRAPH_RDLOCK bdrv_co_zone_mgmt(BlockDriverState *bs, |
87 | IOHandler *io_write, | 34 | BlockZoneOp op, |
88 | AioPollFn *io_poll, | 35 | int64_t offset, int64_t len); |
89 | + IOHandler *io_poll_ready, | 36 | +int coroutine_fn GRAPH_RDLOCK bdrv_co_zone_append(BlockDriverState *bs, |
90 | void *opaque); | 37 | + int64_t *offset, |
91 | 38 | + QEMUIOVector *qiov, | |
92 | /* Set polling begin/end callbacks for a file descriptor that has already been | 39 | + BdrvRequestFlags flags); |
93 | @@ -XXX,XX +XXX,XX @@ void aio_set_event_notifier(AioContext *ctx, | 40 | |
94 | EventNotifier *notifier, | 41 | bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs); |
95 | bool is_external, | 42 | int bdrv_block_status(BlockDriverState *bs, int64_t offset, |
96 | EventNotifierHandler *io_read, | 43 | diff --git a/include/block/block_int-common.h b/include/block/block_int-common.h |
97 | - AioPollFn *io_poll); | 44 | index XXXXXXX..XXXXXXX 100644 |
98 | + AioPollFn *io_poll, | 45 | --- a/include/block/block_int-common.h |
99 | + EventNotifierHandler *io_poll_ready); | 46 | +++ b/include/block/block_int-common.h |
100 | 47 | @@ -XXX,XX +XXX,XX @@ struct BlockDriver { | |
101 | /* Set polling begin/end callbacks for an event notifier that has already been | 48 | BlockZoneDescriptor *zones); |
102 | * registered with aio_set_event_notifier. Do nothing if the event notifier is | 49 | int coroutine_fn (*bdrv_co_zone_mgmt)(BlockDriverState *bs, BlockZoneOp op, |
103 | diff --git a/util/aio-posix.h b/util/aio-posix.h | 50 | int64_t offset, int64_t len); |
104 | index XXXXXXX..XXXXXXX 100644 | 51 | + int coroutine_fn (*bdrv_co_zone_append)(BlockDriverState *bs, |
105 | --- a/util/aio-posix.h | 52 | + int64_t *offset, QEMUIOVector *qiov, |
106 | +++ b/util/aio-posix.h | 53 | + BdrvRequestFlags flags); |
107 | @@ -XXX,XX +XXX,XX @@ struct AioHandler { | 54 | |
108 | IOHandler *io_read; | 55 | /* removable device specific */ |
109 | IOHandler *io_write; | 56 | bool coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_is_inserted)( |
110 | AioPollFn *io_poll; | 57 | diff --git a/include/block/raw-aio.h b/include/block/raw-aio.h |
111 | + IOHandler *io_poll_ready; | 58 | index XXXXXXX..XXXXXXX 100644 |
112 | IOHandler *io_poll_begin; | 59 | --- a/include/block/raw-aio.h |
113 | IOHandler *io_poll_end; | 60 | +++ b/include/block/raw-aio.h |
114 | void *opaque; | 61 | @@ -XXX,XX +XXX,XX @@ |
115 | diff --git a/block/curl.c b/block/curl.c | 62 | #define QEMU_AIO_TRUNCATE 0x0080 |
116 | index XXXXXXX..XXXXXXX 100644 | 63 | #define QEMU_AIO_ZONE_REPORT 0x0100 |
117 | --- a/block/curl.c | 64 | #define QEMU_AIO_ZONE_MGMT 0x0200 |
118 | +++ b/block/curl.c | 65 | +#define QEMU_AIO_ZONE_APPEND 0x0400 |
119 | @@ -XXX,XX +XXX,XX @@ static gboolean curl_drop_socket(void *key, void *value, void *opaque) | 66 | #define QEMU_AIO_TYPE_MASK \ |
120 | BDRVCURLState *s = socket->s; | 67 | (QEMU_AIO_READ | \ |
121 | 68 | QEMU_AIO_WRITE | \ | |
122 | aio_set_fd_handler(s->aio_context, socket->fd, false, | 69 | @@ -XXX,XX +XXX,XX @@ |
123 | - NULL, NULL, NULL, NULL); | 70 | QEMU_AIO_COPY_RANGE | \ |
124 | + NULL, NULL, NULL, NULL, NULL); | 71 | QEMU_AIO_TRUNCATE | \ |
125 | return true; | 72 | QEMU_AIO_ZONE_REPORT | \ |
126 | } | 73 | - QEMU_AIO_ZONE_MGMT) |
127 | 74 | + QEMU_AIO_ZONE_MGMT | \ | |
128 | @@ -XXX,XX +XXX,XX @@ static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action, | 75 | + QEMU_AIO_ZONE_APPEND) |
129 | switch (action) { | 76 | |
130 | case CURL_POLL_IN: | 77 | /* AIO flags */ |
131 | aio_set_fd_handler(s->aio_context, fd, false, | 78 | #define QEMU_AIO_MISALIGNED 0x1000 |
132 | - curl_multi_do, NULL, NULL, socket); | 79 | diff --git a/include/sysemu/block-backend-io.h b/include/sysemu/block-backend-io.h |
133 | + curl_multi_do, NULL, NULL, NULL, socket); | 80 | index XXXXXXX..XXXXXXX 100644 |
134 | break; | 81 | --- a/include/sysemu/block-backend-io.h |
135 | case CURL_POLL_OUT: | 82 | +++ b/include/sysemu/block-backend-io.h |
136 | aio_set_fd_handler(s->aio_context, fd, false, | 83 | @@ -XXX,XX +XXX,XX @@ BlockAIOCB *blk_aio_zone_report(BlockBackend *blk, int64_t offset, |
137 | - NULL, curl_multi_do, NULL, socket); | 84 | BlockAIOCB *blk_aio_zone_mgmt(BlockBackend *blk, BlockZoneOp op, |
138 | + NULL, curl_multi_do, NULL, NULL, socket); | 85 | int64_t offset, int64_t len, |
139 | break; | 86 | BlockCompletionFunc *cb, void *opaque); |
140 | case CURL_POLL_INOUT: | 87 | +BlockAIOCB *blk_aio_zone_append(BlockBackend *blk, int64_t *offset, |
141 | aio_set_fd_handler(s->aio_context, fd, false, | 88 | + QEMUIOVector *qiov, BdrvRequestFlags flags, |
142 | - curl_multi_do, curl_multi_do, NULL, socket); | 89 | + BlockCompletionFunc *cb, void *opaque); |
143 | + curl_multi_do, curl_multi_do, | 90 | BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes, |
144 | + NULL, NULL, socket); | 91 | BlockCompletionFunc *cb, void *opaque); |
145 | break; | 92 | void blk_aio_cancel_async(BlockAIOCB *acb); |
146 | case CURL_POLL_REMOVE: | 93 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn blk_co_zone_mgmt(BlockBackend *blk, BlockZoneOp op, |
147 | aio_set_fd_handler(s->aio_context, fd, false, | 94 | int64_t offset, int64_t len); |
148 | - NULL, NULL, NULL, NULL); | 95 | int co_wrapper_mixed blk_zone_mgmt(BlockBackend *blk, BlockZoneOp op, |
149 | + NULL, NULL, NULL, NULL, NULL); | 96 | int64_t offset, int64_t len); |
150 | break; | 97 | +int coroutine_fn blk_co_zone_append(BlockBackend *blk, int64_t *offset, |
98 | + QEMUIOVector *qiov, | ||
99 | + BdrvRequestFlags flags); | ||
100 | +int co_wrapper_mixed blk_zone_append(BlockBackend *blk, int64_t *offset, | ||
101 | + QEMUIOVector *qiov, | ||
102 | + BdrvRequestFlags flags); | ||
103 | |||
104 | int co_wrapper_mixed blk_pdiscard(BlockBackend *blk, int64_t offset, | ||
105 | int64_t bytes); | ||
106 | diff --git a/block/block-backend.c b/block/block-backend.c | ||
107 | index XXXXXXX..XXXXXXX 100644 | ||
108 | --- a/block/block-backend.c | ||
109 | +++ b/block/block-backend.c | ||
110 | @@ -XXX,XX +XXX,XX @@ BlockAIOCB *blk_aio_zone_mgmt(BlockBackend *blk, BlockZoneOp op, | ||
111 | return &acb->common; | ||
112 | } | ||
113 | |||
114 | +static void coroutine_fn blk_aio_zone_append_entry(void *opaque) | ||
115 | +{ | ||
116 | + BlkAioEmAIOCB *acb = opaque; | ||
117 | + BlkRwCo *rwco = &acb->rwco; | ||
118 | + | ||
119 | + rwco->ret = blk_co_zone_append(rwco->blk, (int64_t *)(uintptr_t)acb->bytes, | ||
120 | + rwco->iobuf, rwco->flags); | ||
121 | + blk_aio_complete(acb); | ||
122 | +} | ||
123 | + | ||
124 | +BlockAIOCB *blk_aio_zone_append(BlockBackend *blk, int64_t *offset, | ||
125 | + QEMUIOVector *qiov, BdrvRequestFlags flags, | ||
126 | + BlockCompletionFunc *cb, void *opaque) { | ||
127 | + BlkAioEmAIOCB *acb; | ||
128 | + Coroutine *co; | ||
129 | + IO_CODE(); | ||
130 | + | ||
131 | + blk_inc_in_flight(blk); | ||
132 | + acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque); | ||
133 | + acb->rwco = (BlkRwCo) { | ||
134 | + .blk = blk, | ||
135 | + .ret = NOT_DONE, | ||
136 | + .flags = flags, | ||
137 | + .iobuf = qiov, | ||
138 | + }; | ||
139 | + acb->bytes = (int64_t)(uintptr_t)offset; | ||
140 | + acb->has_returned = false; | ||
141 | + | ||
142 | + co = qemu_coroutine_create(blk_aio_zone_append_entry, acb); | ||
143 | + aio_co_enter(blk_get_aio_context(blk), co); | ||
144 | + acb->has_returned = true; | ||
145 | + if (acb->rwco.ret != NOT_DONE) { | ||
146 | + replay_bh_schedule_oneshot_event(blk_get_aio_context(blk), | ||
147 | + blk_aio_complete_bh, acb); | ||
148 | + } | ||
149 | + | ||
150 | + return &acb->common; | ||
151 | +} | ||
152 | + | ||
153 | /* | ||
154 | * Send a zone_report command. | ||
155 | * offset is a byte offset from the start of the device. No alignment | ||
156 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn blk_co_zone_mgmt(BlockBackend *blk, BlockZoneOp op, | ||
157 | return ret; | ||
158 | } | ||
159 | |||
160 | +/* | ||
161 | + * Send a zone_append command. | ||
162 | + */ | ||
163 | +int coroutine_fn blk_co_zone_append(BlockBackend *blk, int64_t *offset, | ||
164 | + QEMUIOVector *qiov, BdrvRequestFlags flags) | ||
165 | +{ | ||
166 | + int ret; | ||
167 | + IO_CODE(); | ||
168 | + | ||
169 | + blk_inc_in_flight(blk); | ||
170 | + blk_wait_while_drained(blk); | ||
171 | + GRAPH_RDLOCK_GUARD(); | ||
172 | + if (!blk_is_available(blk)) { | ||
173 | + blk_dec_in_flight(blk); | ||
174 | + return -ENOMEDIUM; | ||
175 | + } | ||
176 | + | ||
177 | + ret = bdrv_co_zone_append(blk_bs(blk), offset, qiov, flags); | ||
178 | + blk_dec_in_flight(blk); | ||
179 | + return ret; | ||
180 | +} | ||
181 | + | ||
182 | void blk_drain(BlockBackend *blk) | ||
183 | { | ||
184 | BlockDriverState *bs = blk_bs(blk); | ||
185 | diff --git a/block/file-posix.c b/block/file-posix.c | ||
186 | index XXXXXXX..XXXXXXX 100644 | ||
187 | --- a/block/file-posix.c | ||
188 | +++ b/block/file-posix.c | ||
189 | @@ -XXX,XX +XXX,XX @@ typedef struct BDRVRawState { | ||
190 | bool has_write_zeroes:1; | ||
191 | bool use_linux_aio:1; | ||
192 | bool use_linux_io_uring:1; | ||
193 | + int64_t *offset; /* offset of zone append operation */ | ||
194 | int page_cache_inconsistent; /* errno from fdatasync failure */ | ||
195 | bool has_fallocate; | ||
196 | bool needs_alignment; | ||
197 | @@ -XXX,XX +XXX,XX @@ static ssize_t handle_aiocb_rw_vector(RawPosixAIOData *aiocb) | ||
198 | ssize_t len; | ||
199 | |||
200 | len = RETRY_ON_EINTR( | ||
201 | - (aiocb->aio_type & QEMU_AIO_WRITE) ? | ||
202 | + (aiocb->aio_type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) ? | ||
203 | qemu_pwritev(aiocb->aio_fildes, | ||
204 | aiocb->io.iov, | ||
205 | aiocb->io.niov, | ||
206 | @@ -XXX,XX +XXX,XX @@ static ssize_t handle_aiocb_rw_linear(RawPosixAIOData *aiocb, char *buf) | ||
207 | ssize_t len; | ||
208 | |||
209 | while (offset < aiocb->aio_nbytes) { | ||
210 | - if (aiocb->aio_type & QEMU_AIO_WRITE) { | ||
211 | + if (aiocb->aio_type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) { | ||
212 | len = pwrite(aiocb->aio_fildes, | ||
213 | (const char *)buf + offset, | ||
214 | aiocb->aio_nbytes - offset, | ||
215 | @@ -XXX,XX +XXX,XX @@ static int handle_aiocb_rw(void *opaque) | ||
151 | } | 216 | } |
152 | 217 | ||
153 | diff --git a/block/export/fuse.c b/block/export/fuse.c | 218 | nbytes = handle_aiocb_rw_linear(aiocb, buf); |
154 | index XXXXXXX..XXXXXXX 100644 | 219 | - if (!(aiocb->aio_type & QEMU_AIO_WRITE)) { |
155 | --- a/block/export/fuse.c | 220 | + if (!(aiocb->aio_type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND))) { |
156 | +++ b/block/export/fuse.c | 221 | char *p = buf; |
157 | @@ -XXX,XX +XXX,XX @@ static int setup_fuse_export(FuseExport *exp, const char *mountpoint, | 222 | size_t count = aiocb->aio_nbytes, copy; |
158 | 223 | int i; | |
159 | aio_set_fd_handler(exp->common.ctx, | 224 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset, |
160 | fuse_session_fd(exp->fuse_session), true, | 225 | if (fd_open(bs) < 0) |
161 | - read_from_fuse_export, NULL, NULL, exp); | 226 | return -EIO; |
162 | + read_from_fuse_export, NULL, NULL, NULL, exp); | 227 | #if defined(CONFIG_BLKZONED) |
163 | exp->fd_handler_set_up = true; | 228 | - if (type & QEMU_AIO_WRITE && bs->wps) { |
164 | 229 | + if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) && bs->wps) { | |
165 | return 0; | 230 | qemu_co_mutex_lock(&bs->wps->colock); |
166 | @@ -XXX,XX +XXX,XX @@ static void fuse_export_shutdown(BlockExport *blk_exp) | 231 | + if (type & QEMU_AIO_ZONE_APPEND && bs->bl.zone_size) { |
167 | if (exp->fd_handler_set_up) { | 232 | + int index = offset / bs->bl.zone_size; |
168 | aio_set_fd_handler(exp->common.ctx, | 233 | + offset = bs->wps->wp[index]; |
169 | fuse_session_fd(exp->fuse_session), true, | 234 | + } |
170 | - NULL, NULL, NULL, NULL); | 235 | } |
171 | + NULL, NULL, NULL, NULL, NULL); | 236 | #endif |
172 | exp->fd_handler_set_up = false; | 237 | |
238 | @@ -XXX,XX +XXX,XX @@ out: | ||
239 | { | ||
240 | BlockZoneWps *wps = bs->wps; | ||
241 | if (ret == 0) { | ||
242 | - if (type & QEMU_AIO_WRITE && wps && bs->bl.zone_size) { | ||
243 | + if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) | ||
244 | + && wps && bs->bl.zone_size) { | ||
245 | uint64_t *wp = &wps->wp[offset / bs->bl.zone_size]; | ||
246 | if (!BDRV_ZT_IS_CONV(*wp)) { | ||
247 | + if (type & QEMU_AIO_ZONE_APPEND) { | ||
248 | + *s->offset = *wp; | ||
249 | + } | ||
250 | /* Advance the wp if needed */ | ||
251 | if (offset + bytes > *wp) { | ||
252 | *wp = offset + bytes; | ||
253 | @@ -XXX,XX +XXX,XX @@ out: | ||
254 | } | ||
255 | } | ||
256 | } else { | ||
257 | - if (type & QEMU_AIO_WRITE) { | ||
258 | + if (type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) { | ||
259 | update_zones_wp(bs, s->fd, 0, 1); | ||
173 | } | 260 | } |
174 | } | 261 | } |
262 | |||
263 | - if (type & QEMU_AIO_WRITE && wps) { | ||
264 | + if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) && wps) { | ||
265 | qemu_co_mutex_unlock(&wps->colock); | ||
266 | } | ||
267 | } | ||
268 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn raw_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op, | ||
269 | } | ||
270 | #endif | ||
271 | |||
272 | +#if defined(CONFIG_BLKZONED) | ||
273 | +static int coroutine_fn raw_co_zone_append(BlockDriverState *bs, | ||
274 | + int64_t *offset, | ||
275 | + QEMUIOVector *qiov, | ||
276 | + BdrvRequestFlags flags) { | ||
277 | + assert(flags == 0); | ||
278 | + int64_t zone_size_mask = bs->bl.zone_size - 1; | ||
279 | + int64_t iov_len = 0; | ||
280 | + int64_t len = 0; | ||
281 | + BDRVRawState *s = bs->opaque; | ||
282 | + s->offset = offset; | ||
283 | + | ||
284 | + if (*offset & zone_size_mask) { | ||
285 | + error_report("sector offset %" PRId64 " is not aligned to zone size " | ||
286 | + "%" PRId32 "", *offset / 512, bs->bl.zone_size / 512); | ||
287 | + return -EINVAL; | ||
288 | + } | ||
289 | + | ||
290 | + int64_t wg = bs->bl.write_granularity; | ||
291 | + int64_t wg_mask = wg - 1; | ||
292 | + for (int i = 0; i < qiov->niov; i++) { | ||
293 | + iov_len = qiov->iov[i].iov_len; | ||
294 | + if (iov_len & wg_mask) { | ||
295 | + error_report("len of IOVector[%d] %" PRId64 " is not aligned to " | ||
296 | + "block size %" PRId64 "", i, iov_len, wg); | ||
297 | + return -EINVAL; | ||
298 | + } | ||
299 | + len += iov_len; | ||
300 | + } | ||
301 | + | ||
302 | + return raw_co_prw(bs, *offset, len, qiov, QEMU_AIO_ZONE_APPEND); | ||
303 | +} | ||
304 | +#endif | ||
305 | + | ||
306 | static coroutine_fn int | ||
307 | raw_do_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes, | ||
308 | bool blkdev) | ||
309 | @@ -XXX,XX +XXX,XX @@ static BlockDriver bdrv_host_device = { | ||
310 | /* zone management operations */ | ||
311 | .bdrv_co_zone_report = raw_co_zone_report, | ||
312 | .bdrv_co_zone_mgmt = raw_co_zone_mgmt, | ||
313 | + .bdrv_co_zone_append = raw_co_zone_append, | ||
314 | #endif | ||
315 | }; | ||
316 | |||
317 | diff --git a/block/io.c b/block/io.c | ||
318 | index XXXXXXX..XXXXXXX 100644 | ||
319 | --- a/block/io.c | ||
320 | +++ b/block/io.c | ||
321 | @@ -XXX,XX +XXX,XX @@ out: | ||
322 | return co.ret; | ||
323 | } | ||
324 | |||
325 | +int coroutine_fn bdrv_co_zone_append(BlockDriverState *bs, int64_t *offset, | ||
326 | + QEMUIOVector *qiov, | ||
327 | + BdrvRequestFlags flags) | ||
328 | +{ | ||
329 | + int ret; | ||
330 | + BlockDriver *drv = bs->drv; | ||
331 | + CoroutineIOCompletion co = { | ||
332 | + .coroutine = qemu_coroutine_self(), | ||
333 | + }; | ||
334 | + IO_CODE(); | ||
335 | + | ||
336 | + ret = bdrv_check_qiov_request(*offset, qiov->size, qiov, 0, NULL); | ||
337 | + if (ret < 0) { | ||
338 | + return ret; | ||
339 | + } | ||
340 | + | ||
341 | + bdrv_inc_in_flight(bs); | ||
342 | + if (!drv || !drv->bdrv_co_zone_append || bs->bl.zoned == BLK_Z_NONE) { | ||
343 | + co.ret = -ENOTSUP; | ||
344 | + goto out; | ||
345 | + } | ||
346 | + co.ret = drv->bdrv_co_zone_append(bs, offset, qiov, flags); | ||
347 | +out: | ||
348 | + bdrv_dec_in_flight(bs); | ||
349 | + return co.ret; | ||
350 | +} | ||
351 | + | ||
352 | void *qemu_blockalign(BlockDriverState *bs, size_t size) | ||
353 | { | ||
354 | IO_CODE(); | ||
175 | diff --git a/block/io_uring.c b/block/io_uring.c | 355 | diff --git a/block/io_uring.c b/block/io_uring.c |
176 | index XXXXXXX..XXXXXXX 100644 | 356 | index XXXXXXX..XXXXXXX 100644 |
177 | --- a/block/io_uring.c | 357 | --- a/block/io_uring.c |
178 | +++ b/block/io_uring.c | 358 | +++ b/block/io_uring.c |
179 | @@ -XXX,XX +XXX,XX @@ static bool qemu_luring_poll_cb(void *opaque) | 359 | @@ -XXX,XX +XXX,XX @@ static int luring_do_submit(int fd, LuringAIOCB *luringcb, LuringState *s, |
180 | { | 360 | io_uring_prep_writev(sqes, fd, luringcb->qiov->iov, |
181 | LuringState *s = opaque; | 361 | luringcb->qiov->niov, offset); |
182 | 362 | break; | |
183 | - if (io_uring_cq_ready(&s->ring)) { | 363 | + case QEMU_AIO_ZONE_APPEND: |
184 | - luring_process_completions_and_submit(s); | 364 | + io_uring_prep_writev(sqes, fd, luringcb->qiov->iov, |
185 | - return true; | 365 | + luringcb->qiov->niov, offset); |
186 | - } | 366 | + break; |
187 | + return io_uring_cq_ready(&s->ring); | 367 | case QEMU_AIO_READ: |
188 | +} | 368 | io_uring_prep_readv(sqes, fd, luringcb->qiov->iov, |
189 | 369 | luringcb->qiov->niov, offset); | |
190 | - return false; | ||
191 | +static void qemu_luring_poll_ready(void *opaque) | ||
192 | +{ | ||
193 | + LuringState *s = opaque; | ||
194 | + | ||
195 | + luring_process_completions_and_submit(s); | ||
196 | } | ||
197 | |||
198 | static void ioq_init(LuringQueue *io_q) | ||
199 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn luring_co_submit(BlockDriverState *bs, LuringState *s, int fd, | ||
200 | |||
201 | void luring_detach_aio_context(LuringState *s, AioContext *old_context) | ||
202 | { | ||
203 | - aio_set_fd_handler(old_context, s->ring.ring_fd, false, NULL, NULL, NULL, | ||
204 | - s); | ||
205 | + aio_set_fd_handler(old_context, s->ring.ring_fd, false, | ||
206 | + NULL, NULL, NULL, NULL, s); | ||
207 | qemu_bh_delete(s->completion_bh); | ||
208 | s->aio_context = NULL; | ||
209 | } | ||
210 | @@ -XXX,XX +XXX,XX @@ void luring_attach_aio_context(LuringState *s, AioContext *new_context) | ||
211 | s->aio_context = new_context; | ||
212 | s->completion_bh = aio_bh_new(new_context, qemu_luring_completion_bh, s); | ||
213 | aio_set_fd_handler(s->aio_context, s->ring.ring_fd, false, | ||
214 | - qemu_luring_completion_cb, NULL, qemu_luring_poll_cb, s); | ||
215 | + qemu_luring_completion_cb, NULL, | ||
216 | + qemu_luring_poll_cb, qemu_luring_poll_ready, s); | ||
217 | } | ||
218 | |||
219 | LuringState *luring_init(Error **errp) | ||
220 | diff --git a/block/iscsi.c b/block/iscsi.c | ||
221 | index XXXXXXX..XXXXXXX 100644 | ||
222 | --- a/block/iscsi.c | ||
223 | +++ b/block/iscsi.c | ||
224 | @@ -XXX,XX +XXX,XX @@ iscsi_set_events(IscsiLun *iscsilun) | ||
225 | false, | ||
226 | (ev & POLLIN) ? iscsi_process_read : NULL, | ||
227 | (ev & POLLOUT) ? iscsi_process_write : NULL, | ||
228 | - NULL, | ||
229 | + NULL, NULL, | ||
230 | iscsilun); | ||
231 | iscsilun->events = ev; | ||
232 | } | ||
233 | @@ -XXX,XX +XXX,XX @@ static void iscsi_detach_aio_context(BlockDriverState *bs) | ||
234 | IscsiLun *iscsilun = bs->opaque; | ||
235 | |||
236 | aio_set_fd_handler(iscsilun->aio_context, iscsi_get_fd(iscsilun->iscsi), | ||
237 | - false, NULL, NULL, NULL, NULL); | ||
238 | + false, NULL, NULL, NULL, NULL, NULL); | ||
239 | iscsilun->events = 0; | ||
240 | |||
241 | if (iscsilun->nop_timer) { | ||
242 | diff --git a/block/linux-aio.c b/block/linux-aio.c | 370 | diff --git a/block/linux-aio.c b/block/linux-aio.c |
243 | index XXXXXXX..XXXXXXX 100644 | 371 | index XXXXXXX..XXXXXXX 100644 |
244 | --- a/block/linux-aio.c | 372 | --- a/block/linux-aio.c |
245 | +++ b/block/linux-aio.c | 373 | +++ b/block/linux-aio.c |
246 | @@ -XXX,XX +XXX,XX @@ static bool qemu_laio_poll_cb(void *opaque) | 374 | @@ -XXX,XX +XXX,XX @@ static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset, |
247 | LinuxAioState *s = container_of(e, LinuxAioState, e); | 375 | case QEMU_AIO_WRITE: |
248 | struct io_event *events; | 376 | io_prep_pwritev(iocbs, fd, qiov->iov, qiov->niov, offset); |
249 | 377 | break; | |
250 | - if (!io_getevents_peek(s->ctx, &events)) { | 378 | + case QEMU_AIO_ZONE_APPEND: |
251 | - return false; | 379 | + io_prep_pwritev(iocbs, fd, qiov->iov, qiov->niov, offset); |
252 | - } | 380 | + break; |
253 | + return io_getevents_peek(s->ctx, &events); | 381 | case QEMU_AIO_READ: |
254 | +} | 382 | io_prep_preadv(iocbs, fd, qiov->iov, qiov->niov, offset); |
255 | + | 383 | break; |
256 | +static void qemu_laio_poll_ready(EventNotifier *opaque) | 384 | diff --git a/block/raw-format.c b/block/raw-format.c |
385 | index XXXXXXX..XXXXXXX 100644 | ||
386 | --- a/block/raw-format.c | ||
387 | +++ b/block/raw-format.c | ||
388 | @@ -XXX,XX +XXX,XX @@ raw_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op, | ||
389 | return bdrv_co_zone_mgmt(bs->file->bs, op, offset, len); | ||
390 | } | ||
391 | |||
392 | +static int coroutine_fn GRAPH_RDLOCK | ||
393 | +raw_co_zone_append(BlockDriverState *bs,int64_t *offset, QEMUIOVector *qiov, | ||
394 | + BdrvRequestFlags flags) | ||
257 | +{ | 395 | +{ |
258 | + EventNotifier *e = opaque; | 396 | + return bdrv_co_zone_append(bs->file->bs, offset, qiov, flags); |
259 | + LinuxAioState *s = container_of(e, LinuxAioState, e); | 397 | +} |
260 | 398 | + | |
261 | qemu_laio_process_completions_and_submit(s); | 399 | static int64_t coroutine_fn GRAPH_RDLOCK |
262 | - return true; | 400 | raw_co_getlength(BlockDriverState *bs) |
263 | } | ||
264 | |||
265 | static void ioq_init(LaioQueue *io_q) | ||
266 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn laio_co_submit(BlockDriverState *bs, LinuxAioState *s, int fd, | ||
267 | |||
268 | void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context) | ||
269 | { | 401 | { |
270 | - aio_set_event_notifier(old_context, &s->e, false, NULL, NULL); | 402 | @@ -XXX,XX +XXX,XX @@ BlockDriver bdrv_raw = { |
271 | + aio_set_event_notifier(old_context, &s->e, false, NULL, NULL, NULL); | 403 | .bdrv_co_pdiscard = &raw_co_pdiscard, |
272 | qemu_bh_delete(s->completion_bh); | 404 | .bdrv_co_zone_report = &raw_co_zone_report, |
273 | s->aio_context = NULL; | 405 | .bdrv_co_zone_mgmt = &raw_co_zone_mgmt, |
274 | } | 406 | + .bdrv_co_zone_append = &raw_co_zone_append, |
275 | @@ -XXX,XX +XXX,XX @@ void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context) | 407 | .bdrv_co_block_status = &raw_co_block_status, |
276 | s->completion_bh = aio_bh_new(new_context, qemu_laio_completion_bh, s); | 408 | .bdrv_co_copy_range_from = &raw_co_copy_range_from, |
277 | aio_set_event_notifier(new_context, &s->e, false, | 409 | .bdrv_co_copy_range_to = &raw_co_copy_range_to, |
278 | qemu_laio_completion_cb, | ||
279 | - qemu_laio_poll_cb); | ||
280 | + qemu_laio_poll_cb, | ||
281 | + qemu_laio_poll_ready); | ||
282 | } | ||
283 | |||
284 | LinuxAioState *laio_init(Error **errp) | ||
285 | diff --git a/block/nfs.c b/block/nfs.c | ||
286 | index XXXXXXX..XXXXXXX 100644 | ||
287 | --- a/block/nfs.c | ||
288 | +++ b/block/nfs.c | ||
289 | @@ -XXX,XX +XXX,XX @@ static void nfs_set_events(NFSClient *client) | ||
290 | false, | ||
291 | (ev & POLLIN) ? nfs_process_read : NULL, | ||
292 | (ev & POLLOUT) ? nfs_process_write : NULL, | ||
293 | - NULL, client); | ||
294 | + NULL, NULL, client); | ||
295 | |||
296 | } | ||
297 | client->events = ev; | ||
298 | @@ -XXX,XX +XXX,XX @@ static void nfs_detach_aio_context(BlockDriverState *bs) | ||
299 | NFSClient *client = bs->opaque; | ||
300 | |||
301 | aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context), | ||
302 | - false, NULL, NULL, NULL, NULL); | ||
303 | + false, NULL, NULL, NULL, NULL, NULL); | ||
304 | client->events = 0; | ||
305 | } | ||
306 | |||
307 | @@ -XXX,XX +XXX,XX @@ static void nfs_client_close(NFSClient *client) | ||
308 | if (client->context) { | ||
309 | qemu_mutex_lock(&client->mutex); | ||
310 | aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context), | ||
311 | - false, NULL, NULL, NULL, NULL); | ||
312 | + false, NULL, NULL, NULL, NULL, NULL); | ||
313 | qemu_mutex_unlock(&client->mutex); | ||
314 | if (client->fh) { | ||
315 | nfs_close(client->context, client->fh); | ||
316 | diff --git a/block/nvme.c b/block/nvme.c | ||
317 | index XXXXXXX..XXXXXXX 100644 | ||
318 | --- a/block/nvme.c | ||
319 | +++ b/block/nvme.c | ||
320 | @@ -XXX,XX +XXX,XX @@ out: | ||
321 | return ret; | ||
322 | } | ||
323 | |||
324 | -static bool nvme_poll_queue(NVMeQueuePair *q) | ||
325 | +static void nvme_poll_queue(NVMeQueuePair *q) | ||
326 | { | ||
327 | - bool progress = false; | ||
328 | - | ||
329 | const size_t cqe_offset = q->cq.head * NVME_CQ_ENTRY_BYTES; | ||
330 | NvmeCqe *cqe = (NvmeCqe *)&q->cq.queue[cqe_offset]; | ||
331 | |||
332 | @@ -XXX,XX +XXX,XX @@ static bool nvme_poll_queue(NVMeQueuePair *q) | ||
333 | * cannot race with itself. | ||
334 | */ | ||
335 | if ((le16_to_cpu(cqe->status) & 0x1) == q->cq_phase) { | ||
336 | - return false; | ||
337 | + return; | ||
338 | } | ||
339 | |||
340 | qemu_mutex_lock(&q->lock); | ||
341 | while (nvme_process_completion(q)) { | ||
342 | /* Keep polling */ | ||
343 | - progress = true; | ||
344 | } | ||
345 | qemu_mutex_unlock(&q->lock); | ||
346 | - | ||
347 | - return progress; | ||
348 | } | ||
349 | |||
350 | -static bool nvme_poll_queues(BDRVNVMeState *s) | ||
351 | +static void nvme_poll_queues(BDRVNVMeState *s) | ||
352 | { | ||
353 | - bool progress = false; | ||
354 | int i; | ||
355 | |||
356 | for (i = 0; i < s->queue_count; i++) { | ||
357 | - if (nvme_poll_queue(s->queues[i])) { | ||
358 | - progress = true; | ||
359 | - } | ||
360 | + nvme_poll_queue(s->queues[i]); | ||
361 | } | ||
362 | - return progress; | ||
363 | } | ||
364 | |||
365 | static void nvme_handle_event(EventNotifier *n) | ||
366 | @@ -XXX,XX +XXX,XX @@ static bool nvme_poll_cb(void *opaque) | ||
367 | EventNotifier *e = opaque; | ||
368 | BDRVNVMeState *s = container_of(e, BDRVNVMeState, | ||
369 | irq_notifier[MSIX_SHARED_IRQ_IDX]); | ||
370 | + int i; | ||
371 | |||
372 | - return nvme_poll_queues(s); | ||
373 | + for (i = 0; i < s->queue_count; i++) { | ||
374 | + NVMeQueuePair *q = s->queues[i]; | ||
375 | + const size_t cqe_offset = q->cq.head * NVME_CQ_ENTRY_BYTES; | ||
376 | + NvmeCqe *cqe = (NvmeCqe *)&q->cq.queue[cqe_offset]; | ||
377 | + | ||
378 | + /* | ||
379 | + * q->lock isn't needed because nvme_process_completion() only runs in | ||
380 | + * the event loop thread and cannot race with itself. | ||
381 | + */ | ||
382 | + if ((le16_to_cpu(cqe->status) & 0x1) != q->cq_phase) { | ||
383 | + return true; | ||
384 | + } | ||
385 | + } | ||
386 | + return false; | ||
387 | +} | ||
388 | + | ||
389 | +static void nvme_poll_ready(EventNotifier *e) | ||
390 | +{ | ||
391 | + BDRVNVMeState *s = container_of(e, BDRVNVMeState, | ||
392 | + irq_notifier[MSIX_SHARED_IRQ_IDX]); | ||
393 | + | ||
394 | + nvme_poll_queues(s); | ||
395 | } | ||
396 | |||
397 | static int nvme_init(BlockDriverState *bs, const char *device, int namespace, | ||
398 | @@ -XXX,XX +XXX,XX @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace, | ||
399 | } | ||
400 | aio_set_event_notifier(bdrv_get_aio_context(bs), | ||
401 | &s->irq_notifier[MSIX_SHARED_IRQ_IDX], | ||
402 | - false, nvme_handle_event, nvme_poll_cb); | ||
403 | + false, nvme_handle_event, nvme_poll_cb, | ||
404 | + nvme_poll_ready); | ||
405 | |||
406 | if (!nvme_identify(bs, namespace, errp)) { | ||
407 | ret = -EIO; | ||
408 | @@ -XXX,XX +XXX,XX @@ static void nvme_close(BlockDriverState *bs) | ||
409 | g_free(s->queues); | ||
410 | aio_set_event_notifier(bdrv_get_aio_context(bs), | ||
411 | &s->irq_notifier[MSIX_SHARED_IRQ_IDX], | ||
412 | - false, NULL, NULL); | ||
413 | + false, NULL, NULL, NULL); | ||
414 | event_notifier_cleanup(&s->irq_notifier[MSIX_SHARED_IRQ_IDX]); | ||
415 | qemu_vfio_pci_unmap_bar(s->vfio, 0, s->bar0_wo_map, | ||
416 | 0, sizeof(NvmeBar) + NVME_DOORBELL_SIZE); | ||
417 | @@ -XXX,XX +XXX,XX @@ static void nvme_detach_aio_context(BlockDriverState *bs) | ||
418 | |||
419 | aio_set_event_notifier(bdrv_get_aio_context(bs), | ||
420 | &s->irq_notifier[MSIX_SHARED_IRQ_IDX], | ||
421 | - false, NULL, NULL); | ||
422 | + false, NULL, NULL, NULL); | ||
423 | } | ||
424 | |||
425 | static void nvme_attach_aio_context(BlockDriverState *bs, | ||
426 | @@ -XXX,XX +XXX,XX @@ static void nvme_attach_aio_context(BlockDriverState *bs, | ||
427 | |||
428 | s->aio_context = new_context; | ||
429 | aio_set_event_notifier(new_context, &s->irq_notifier[MSIX_SHARED_IRQ_IDX], | ||
430 | - false, nvme_handle_event, nvme_poll_cb); | ||
431 | + false, nvme_handle_event, nvme_poll_cb, | ||
432 | + nvme_poll_ready); | ||
433 | |||
434 | for (unsigned i = 0; i < s->queue_count; i++) { | ||
435 | NVMeQueuePair *q = s->queues[i]; | ||
436 | diff --git a/block/ssh.c b/block/ssh.c | ||
437 | index XXXXXXX..XXXXXXX 100644 | ||
438 | --- a/block/ssh.c | ||
439 | +++ b/block/ssh.c | ||
440 | @@ -XXX,XX +XXX,XX @@ static void restart_coroutine(void *opaque) | ||
441 | AioContext *ctx = bdrv_get_aio_context(bs); | ||
442 | |||
443 | trace_ssh_restart_coroutine(restart->co); | ||
444 | - aio_set_fd_handler(ctx, s->sock, false, NULL, NULL, NULL, NULL); | ||
445 | + aio_set_fd_handler(ctx, s->sock, false, NULL, NULL, NULL, NULL, NULL); | ||
446 | |||
447 | aio_co_wake(restart->co); | ||
448 | } | ||
449 | @@ -XXX,XX +XXX,XX @@ static coroutine_fn void co_yield(BDRVSSHState *s, BlockDriverState *bs) | ||
450 | trace_ssh_co_yield(s->sock, rd_handler, wr_handler); | ||
451 | |||
452 | aio_set_fd_handler(bdrv_get_aio_context(bs), s->sock, | ||
453 | - false, rd_handler, wr_handler, NULL, &restart); | ||
454 | + false, rd_handler, wr_handler, NULL, NULL, &restart); | ||
455 | qemu_coroutine_yield(); | ||
456 | trace_ssh_co_yield_back(s->sock); | ||
457 | } | ||
458 | diff --git a/block/win32-aio.c b/block/win32-aio.c | ||
459 | index XXXXXXX..XXXXXXX 100644 | ||
460 | --- a/block/win32-aio.c | ||
461 | +++ b/block/win32-aio.c | ||
462 | @@ -XXX,XX +XXX,XX @@ int win32_aio_attach(QEMUWin32AIOState *aio, HANDLE hfile) | ||
463 | void win32_aio_detach_aio_context(QEMUWin32AIOState *aio, | ||
464 | AioContext *old_context) | ||
465 | { | ||
466 | - aio_set_event_notifier(old_context, &aio->e, false, NULL, NULL); | ||
467 | + aio_set_event_notifier(old_context, &aio->e, false, NULL, NULL, NULL); | ||
468 | aio->aio_ctx = NULL; | ||
469 | } | ||
470 | |||
471 | @@ -XXX,XX +XXX,XX @@ void win32_aio_attach_aio_context(QEMUWin32AIOState *aio, | ||
472 | { | ||
473 | aio->aio_ctx = new_context; | ||
474 | aio_set_event_notifier(new_context, &aio->e, false, | ||
475 | - win32_aio_completion_cb, NULL); | ||
476 | + win32_aio_completion_cb, NULL, NULL); | ||
477 | } | ||
478 | |||
479 | QEMUWin32AIOState *win32_aio_init(void) | ||
480 | diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c | ||
481 | index XXXXXXX..XXXXXXX 100644 | ||
482 | --- a/hw/virtio/virtio.c | ||
483 | +++ b/hw/virtio/virtio.c | ||
484 | @@ -XXX,XX +XXX,XX @@ static bool virtio_queue_host_notifier_aio_poll(void *opaque) | ||
485 | EventNotifier *n = opaque; | ||
486 | VirtQueue *vq = container_of(n, VirtQueue, host_notifier); | ||
487 | |||
488 | - if (!vq->vring.desc || virtio_queue_empty(vq)) { | ||
489 | - return false; | ||
490 | - } | ||
491 | + return vq->vring.desc && !virtio_queue_empty(vq); | ||
492 | +} | ||
493 | |||
494 | - return virtio_queue_notify_aio_vq(vq); | ||
495 | +static void virtio_queue_host_notifier_aio_poll_ready(EventNotifier *n) | ||
496 | +{ | ||
497 | + VirtQueue *vq = container_of(n, VirtQueue, host_notifier); | ||
498 | + | ||
499 | + virtio_queue_notify_aio_vq(vq); | ||
500 | } | ||
501 | |||
502 | static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n) | ||
503 | @@ -XXX,XX +XXX,XX @@ void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx, | ||
504 | vq->handle_aio_output = handle_output; | ||
505 | aio_set_event_notifier(ctx, &vq->host_notifier, true, | ||
506 | virtio_queue_host_notifier_aio_read, | ||
507 | - virtio_queue_host_notifier_aio_poll); | ||
508 | + virtio_queue_host_notifier_aio_poll, | ||
509 | + virtio_queue_host_notifier_aio_poll_ready); | ||
510 | aio_set_event_notifier_poll(ctx, &vq->host_notifier, | ||
511 | virtio_queue_host_notifier_aio_poll_begin, | ||
512 | virtio_queue_host_notifier_aio_poll_end); | ||
513 | } else { | ||
514 | - aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL); | ||
515 | + aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL, NULL); | ||
516 | /* Test and clear notifier before after disabling event, | ||
517 | * in case poll callback didn't have time to run. */ | ||
518 | virtio_queue_host_notifier_aio_read(&vq->host_notifier); | ||
519 | diff --git a/hw/xen/xen-bus.c b/hw/xen/xen-bus.c | ||
520 | index XXXXXXX..XXXXXXX 100644 | ||
521 | --- a/hw/xen/xen-bus.c | ||
522 | +++ b/hw/xen/xen-bus.c | ||
523 | @@ -XXX,XX +XXX,XX @@ void xen_device_set_event_channel_context(XenDevice *xendev, | ||
524 | |||
525 | if (channel->ctx) | ||
526 | aio_set_fd_handler(channel->ctx, xenevtchn_fd(channel->xeh), true, | ||
527 | - NULL, NULL, NULL, NULL); | ||
528 | + NULL, NULL, NULL, NULL, NULL); | ||
529 | |||
530 | channel->ctx = ctx; | ||
531 | aio_set_fd_handler(channel->ctx, xenevtchn_fd(channel->xeh), true, | ||
532 | - xen_device_event, NULL, xen_device_poll, channel); | ||
533 | + xen_device_event, NULL, xen_device_poll, NULL, channel); | ||
534 | } | ||
535 | |||
536 | XenEventChannel *xen_device_bind_event_channel(XenDevice *xendev, | ||
537 | @@ -XXX,XX +XXX,XX @@ void xen_device_unbind_event_channel(XenDevice *xendev, | ||
538 | QLIST_REMOVE(channel, list); | ||
539 | |||
540 | aio_set_fd_handler(channel->ctx, xenevtchn_fd(channel->xeh), true, | ||
541 | - NULL, NULL, NULL, NULL); | ||
542 | + NULL, NULL, NULL, NULL, NULL); | ||
543 | |||
544 | if (xenevtchn_unbind(channel->xeh, channel->local_port) < 0) { | ||
545 | error_setg_errno(errp, errno, "xenevtchn_unbind failed"); | ||
546 | diff --git a/io/channel-command.c b/io/channel-command.c | ||
547 | index XXXXXXX..XXXXXXX 100644 | ||
548 | --- a/io/channel-command.c | ||
549 | +++ b/io/channel-command.c | ||
550 | @@ -XXX,XX +XXX,XX @@ static void qio_channel_command_set_aio_fd_handler(QIOChannel *ioc, | ||
551 | void *opaque) | ||
552 | { | ||
553 | QIOChannelCommand *cioc = QIO_CHANNEL_COMMAND(ioc); | ||
554 | - aio_set_fd_handler(ctx, cioc->readfd, false, io_read, NULL, NULL, opaque); | ||
555 | - aio_set_fd_handler(ctx, cioc->writefd, false, NULL, io_write, NULL, opaque); | ||
556 | + aio_set_fd_handler(ctx, cioc->readfd, false, | ||
557 | + io_read, NULL, NULL, NULL, opaque); | ||
558 | + aio_set_fd_handler(ctx, cioc->writefd, false, | ||
559 | + NULL, io_write, NULL, NULL, opaque); | ||
560 | } | ||
561 | |||
562 | |||
563 | diff --git a/io/channel-file.c b/io/channel-file.c | ||
564 | index XXXXXXX..XXXXXXX 100644 | ||
565 | --- a/io/channel-file.c | ||
566 | +++ b/io/channel-file.c | ||
567 | @@ -XXX,XX +XXX,XX @@ static void qio_channel_file_set_aio_fd_handler(QIOChannel *ioc, | ||
568 | void *opaque) | ||
569 | { | ||
570 | QIOChannelFile *fioc = QIO_CHANNEL_FILE(ioc); | ||
571 | - aio_set_fd_handler(ctx, fioc->fd, false, io_read, io_write, NULL, opaque); | ||
572 | + aio_set_fd_handler(ctx, fioc->fd, false, io_read, io_write, | ||
573 | + NULL, NULL, opaque); | ||
574 | } | ||
575 | |||
576 | static GSource *qio_channel_file_create_watch(QIOChannel *ioc, | ||
577 | diff --git a/io/channel-socket.c b/io/channel-socket.c | ||
578 | index XXXXXXX..XXXXXXX 100644 | ||
579 | --- a/io/channel-socket.c | ||
580 | +++ b/io/channel-socket.c | ||
581 | @@ -XXX,XX +XXX,XX @@ static void qio_channel_socket_set_aio_fd_handler(QIOChannel *ioc, | ||
582 | void *opaque) | ||
583 | { | ||
584 | QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc); | ||
585 | - aio_set_fd_handler(ctx, sioc->fd, false, io_read, io_write, NULL, opaque); | ||
586 | + aio_set_fd_handler(ctx, sioc->fd, false, | ||
587 | + io_read, io_write, NULL, NULL, opaque); | ||
588 | } | ||
589 | |||
590 | static GSource *qio_channel_socket_create_watch(QIOChannel *ioc, | ||
591 | diff --git a/migration/rdma.c b/migration/rdma.c | ||
592 | index XXXXXXX..XXXXXXX 100644 | ||
593 | --- a/migration/rdma.c | ||
594 | +++ b/migration/rdma.c | ||
595 | @@ -XXX,XX +XXX,XX @@ static void qio_channel_rdma_set_aio_fd_handler(QIOChannel *ioc, | ||
596 | QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(ioc); | ||
597 | if (io_read) { | ||
598 | aio_set_fd_handler(ctx, rioc->rdmain->recv_comp_channel->fd, | ||
599 | - false, io_read, io_write, NULL, opaque); | ||
600 | + false, io_read, io_write, NULL, NULL, opaque); | ||
601 | aio_set_fd_handler(ctx, rioc->rdmain->send_comp_channel->fd, | ||
602 | - false, io_read, io_write, NULL, opaque); | ||
603 | + false, io_read, io_write, NULL, NULL, opaque); | ||
604 | } else { | ||
605 | aio_set_fd_handler(ctx, rioc->rdmaout->recv_comp_channel->fd, | ||
606 | - false, io_read, io_write, NULL, opaque); | ||
607 | + false, io_read, io_write, NULL, NULL, opaque); | ||
608 | aio_set_fd_handler(ctx, rioc->rdmaout->send_comp_channel->fd, | ||
609 | - false, io_read, io_write, NULL, opaque); | ||
610 | + false, io_read, io_write, NULL, NULL, opaque); | ||
611 | } | ||
612 | } | ||
613 | |||
614 | diff --git a/tests/unit/test-aio.c b/tests/unit/test-aio.c | ||
615 | index XXXXXXX..XXXXXXX 100644 | ||
616 | --- a/tests/unit/test-aio.c | ||
617 | +++ b/tests/unit/test-aio.c | ||
618 | @@ -XXX,XX +XXX,XX @@ static void *test_acquire_thread(void *opaque) | ||
619 | static void set_event_notifier(AioContext *ctx, EventNotifier *notifier, | ||
620 | EventNotifierHandler *handler) | ||
621 | { | ||
622 | - aio_set_event_notifier(ctx, notifier, false, handler, NULL); | ||
623 | + aio_set_event_notifier(ctx, notifier, false, handler, NULL, NULL); | ||
624 | } | ||
625 | |||
626 | static void dummy_notifier_read(EventNotifier *n) | ||
627 | @@ -XXX,XX +XXX,XX @@ static void test_aio_external_client(void) | ||
628 | for (i = 1; i < 3; i++) { | ||
629 | EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true }; | ||
630 | event_notifier_init(&data.e, false); | ||
631 | - aio_set_event_notifier(ctx, &data.e, true, event_ready_cb, NULL); | ||
632 | + aio_set_event_notifier(ctx, &data.e, true, event_ready_cb, NULL, NULL); | ||
633 | event_notifier_set(&data.e); | ||
634 | for (j = 0; j < i; j++) { | ||
635 | aio_disable_external(ctx); | ||
636 | diff --git a/tests/unit/test-fdmon-epoll.c b/tests/unit/test-fdmon-epoll.c | ||
637 | index XXXXXXX..XXXXXXX 100644 | ||
638 | --- a/tests/unit/test-fdmon-epoll.c | ||
639 | +++ b/tests/unit/test-fdmon-epoll.c | ||
640 | @@ -XXX,XX +XXX,XX @@ static void add_event_notifiers(EventNotifier *notifiers, size_t n) | ||
641 | for (size_t i = 0; i < n; i++) { | ||
642 | event_notifier_init(¬ifiers[i], false); | ||
643 | aio_set_event_notifier(ctx, ¬ifiers[i], false, | ||
644 | - dummy_fd_handler, NULL); | ||
645 | + dummy_fd_handler, NULL, NULL); | ||
646 | } | ||
647 | } | ||
648 | |||
649 | static void remove_event_notifiers(EventNotifier *notifiers, size_t n) | ||
650 | { | ||
651 | for (size_t i = 0; i < n; i++) { | ||
652 | - aio_set_event_notifier(ctx, ¬ifiers[i], false, NULL, NULL); | ||
653 | + aio_set_event_notifier(ctx, ¬ifiers[i], false, NULL, NULL, NULL); | ||
654 | event_notifier_cleanup(¬ifiers[i]); | ||
655 | } | ||
656 | } | ||
657 | diff --git a/util/aio-posix.c b/util/aio-posix.c | ||
658 | index XXXXXXX..XXXXXXX 100644 | ||
659 | --- a/util/aio-posix.c | ||
660 | +++ b/util/aio-posix.c | ||
661 | @@ -XXX,XX +XXX,XX @@ | ||
662 | #include "trace.h" | ||
663 | #include "aio-posix.h" | ||
664 | |||
665 | +/* | ||
666 | + * G_IO_IN and G_IO_OUT are not appropriate revents values for polling, since | ||
667 | + * the handler may not need to access the file descriptor. For example, the | ||
668 | + * handler doesn't need to read from an EventNotifier if it polled a memory | ||
669 | + * location and a read syscall would be slow. Define our own unique revents | ||
670 | + * value to indicate that polling determined this AioHandler is ready. | ||
671 | + */ | ||
672 | +#define REVENTS_POLL_READY 0 | ||
673 | + | ||
674 | /* Stop userspace polling on a handler if it isn't active for some time */ | ||
675 | #define POLL_IDLE_INTERVAL_NS (7 * NANOSECONDS_PER_SECOND) | ||
676 | |||
677 | @@ -XXX,XX +XXX,XX @@ void aio_set_fd_handler(AioContext *ctx, | ||
678 | IOHandler *io_read, | ||
679 | IOHandler *io_write, | ||
680 | AioPollFn *io_poll, | ||
681 | + IOHandler *io_poll_ready, | ||
682 | void *opaque) | ||
683 | { | ||
684 | AioHandler *node; | ||
685 | @@ -XXX,XX +XXX,XX @@ void aio_set_fd_handler(AioContext *ctx, | ||
686 | bool deleted = false; | ||
687 | int poll_disable_change; | ||
688 | |||
689 | + if (io_poll && !io_poll_ready) { | ||
690 | + io_poll = NULL; /* polling only makes sense if there is a handler */ | ||
691 | + } | ||
692 | + | ||
693 | qemu_lockcnt_lock(&ctx->list_lock); | ||
694 | |||
695 | node = find_aio_handler(ctx, fd); | ||
696 | @@ -XXX,XX +XXX,XX @@ void aio_set_fd_handler(AioContext *ctx, | ||
697 | new_node->io_read = io_read; | ||
698 | new_node->io_write = io_write; | ||
699 | new_node->io_poll = io_poll; | ||
700 | + new_node->io_poll_ready = io_poll_ready; | ||
701 | new_node->opaque = opaque; | ||
702 | new_node->is_external = is_external; | ||
703 | |||
704 | @@ -XXX,XX +XXX,XX @@ void aio_set_event_notifier(AioContext *ctx, | ||
705 | EventNotifier *notifier, | ||
706 | bool is_external, | ||
707 | EventNotifierHandler *io_read, | ||
708 | - AioPollFn *io_poll) | ||
709 | + AioPollFn *io_poll, | ||
710 | + EventNotifierHandler *io_poll_ready) | ||
711 | { | ||
712 | aio_set_fd_handler(ctx, event_notifier_get_fd(notifier), is_external, | ||
713 | - (IOHandler *)io_read, NULL, io_poll, notifier); | ||
714 | + (IOHandler *)io_read, NULL, io_poll, | ||
715 | + (IOHandler *)io_poll_ready, notifier); | ||
716 | } | ||
717 | |||
718 | void aio_set_event_notifier_poll(AioContext *ctx, | ||
719 | @@ -XXX,XX +XXX,XX @@ void aio_set_event_notifier_poll(AioContext *ctx, | ||
720 | (IOHandler *)io_poll_end); | ||
721 | } | ||
722 | |||
723 | -static bool poll_set_started(AioContext *ctx, bool started) | ||
724 | +static bool poll_set_started(AioContext *ctx, AioHandlerList *ready_list, | ||
725 | + bool started) | ||
726 | { | ||
727 | AioHandler *node; | ||
728 | bool progress = false; | ||
729 | @@ -XXX,XX +XXX,XX @@ static bool poll_set_started(AioContext *ctx, bool started) | ||
730 | } | ||
731 | |||
732 | /* Poll one last time in case ->io_poll_end() raced with the event */ | ||
733 | - if (!started) { | ||
734 | - progress = node->io_poll(node->opaque) || progress; | ||
735 | + if (!started && node->io_poll(node->opaque)) { | ||
736 | + aio_add_ready_handler(ready_list, node, REVENTS_POLL_READY); | ||
737 | + progress = true; | ||
738 | } | ||
739 | } | ||
740 | qemu_lockcnt_dec(&ctx->list_lock); | ||
741 | @@ -XXX,XX +XXX,XX @@ static bool poll_set_started(AioContext *ctx, bool started) | ||
742 | |||
743 | bool aio_prepare(AioContext *ctx) | ||
744 | { | ||
745 | + AioHandlerList ready_list = QLIST_HEAD_INITIALIZER(ready_list); | ||
746 | + | ||
747 | /* Poll mode cannot be used with glib's event loop, disable it. */ | ||
748 | - poll_set_started(ctx, false); | ||
749 | + poll_set_started(ctx, &ready_list, false); | ||
750 | + /* TODO what to do with this list? */ | ||
751 | |||
752 | return false; | ||
753 | } | ||
754 | @@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handler(AioContext *ctx, AioHandler *node) | ||
755 | } | ||
756 | QLIST_INSERT_HEAD(&ctx->poll_aio_handlers, node, node_poll); | ||
757 | } | ||
758 | + if (!QLIST_IS_INSERTED(node, node_deleted) && | ||
759 | + revents == 0 && | ||
760 | + aio_node_check(ctx, node->is_external) && | ||
761 | + node->io_poll_ready) { | ||
762 | + node->io_poll_ready(node->opaque); | ||
763 | + | ||
764 | + /* | ||
765 | + * Return early since revents was zero. aio_notify() does not count as | ||
766 | + * progress. | ||
767 | + */ | ||
768 | + return node->opaque != &ctx->notifier; | ||
769 | + } | ||
770 | |||
771 | if (!QLIST_IS_INSERTED(node, node_deleted) && | ||
772 | (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) && | ||
773 | @@ -XXX,XX +XXX,XX @@ void aio_dispatch(AioContext *ctx) | ||
774 | } | ||
775 | |||
776 | static bool run_poll_handlers_once(AioContext *ctx, | ||
777 | + AioHandlerList *ready_list, | ||
778 | int64_t now, | ||
779 | int64_t *timeout) | ||
780 | { | ||
781 | @@ -XXX,XX +XXX,XX @@ static bool run_poll_handlers_once(AioContext *ctx, | ||
782 | QLIST_FOREACH_SAFE(node, &ctx->poll_aio_handlers, node_poll, tmp) { | ||
783 | if (aio_node_check(ctx, node->is_external) && | ||
784 | node->io_poll(node->opaque)) { | ||
785 | + aio_add_ready_handler(ready_list, node, REVENTS_POLL_READY); | ||
786 | + | ||
787 | node->poll_idle_timeout = now + POLL_IDLE_INTERVAL_NS; | ||
788 | |||
789 | /* | ||
790 | @@ -XXX,XX +XXX,XX @@ static bool fdmon_supports_polling(AioContext *ctx) | ||
791 | return ctx->fdmon_ops->need_wait != aio_poll_disabled; | ||
792 | } | ||
793 | |||
794 | -static bool remove_idle_poll_handlers(AioContext *ctx, int64_t now) | ||
795 | +static bool remove_idle_poll_handlers(AioContext *ctx, | ||
796 | + AioHandlerList *ready_list, | ||
797 | + int64_t now) | ||
798 | { | ||
799 | AioHandler *node; | ||
800 | AioHandler *tmp; | ||
801 | @@ -XXX,XX +XXX,XX @@ static bool remove_idle_poll_handlers(AioContext *ctx, int64_t now) | ||
802 | * Nevermind about re-adding the handler in the rare case where | ||
803 | * this causes progress. | ||
804 | */ | ||
805 | - progress = node->io_poll(node->opaque) || progress; | ||
806 | + if (node->io_poll(node->opaque)) { | ||
807 | + aio_add_ready_handler(ready_list, node, | ||
808 | + REVENTS_POLL_READY); | ||
809 | + progress = true; | ||
810 | + } | ||
811 | } | ||
812 | } | ||
813 | } | ||
814 | @@ -XXX,XX +XXX,XX @@ static bool remove_idle_poll_handlers(AioContext *ctx, int64_t now) | ||
815 | |||
816 | /* run_poll_handlers: | ||
817 | * @ctx: the AioContext | ||
818 | + * @ready_list: the list to place ready handlers on | ||
819 | * @max_ns: maximum time to poll for, in nanoseconds | ||
820 | * | ||
821 | * Polls for a given time. | ||
822 | @@ -XXX,XX +XXX,XX @@ static bool remove_idle_poll_handlers(AioContext *ctx, int64_t now) | ||
823 | * | ||
824 | * Returns: true if progress was made, false otherwise | ||
825 | */ | ||
826 | -static bool run_poll_handlers(AioContext *ctx, int64_t max_ns, int64_t *timeout) | ||
827 | +static bool run_poll_handlers(AioContext *ctx, AioHandlerList *ready_list, | ||
828 | + int64_t max_ns, int64_t *timeout) | ||
829 | { | ||
830 | bool progress; | ||
831 | int64_t start_time, elapsed_time; | ||
832 | @@ -XXX,XX +XXX,XX @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns, int64_t *timeout) | ||
833 | |||
834 | start_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); | ||
835 | do { | ||
836 | - progress = run_poll_handlers_once(ctx, start_time, timeout); | ||
837 | + progress = run_poll_handlers_once(ctx, ready_list, | ||
838 | + start_time, timeout); | ||
839 | elapsed_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start_time; | ||
840 | max_ns = qemu_soonest_timeout(*timeout, max_ns); | ||
841 | assert(!(max_ns && progress)); | ||
842 | } while (elapsed_time < max_ns && !ctx->fdmon_ops->need_wait(ctx)); | ||
843 | |||
844 | - if (remove_idle_poll_handlers(ctx, start_time + elapsed_time)) { | ||
845 | + if (remove_idle_poll_handlers(ctx, ready_list, | ||
846 | + start_time + elapsed_time)) { | ||
847 | *timeout = 0; | ||
848 | progress = true; | ||
849 | } | ||
850 | @@ -XXX,XX +XXX,XX @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns, int64_t *timeout) | ||
851 | |||
852 | /* try_poll_mode: | ||
853 | * @ctx: the AioContext | ||
854 | + * @ready_list: list to add handlers that need to be run | ||
855 | * @timeout: timeout for blocking wait, computed by the caller and updated if | ||
856 | * polling succeeds. | ||
857 | * | ||
858 | @@ -XXX,XX +XXX,XX @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns, int64_t *timeout) | ||
859 | * | ||
860 | * Returns: true if progress was made, false otherwise | ||
861 | */ | ||
862 | -static bool try_poll_mode(AioContext *ctx, int64_t *timeout) | ||
863 | +static bool try_poll_mode(AioContext *ctx, AioHandlerList *ready_list, | ||
864 | + int64_t *timeout) | ||
865 | { | ||
866 | int64_t max_ns; | ||
867 | |||
868 | @@ -XXX,XX +XXX,XX @@ static bool try_poll_mode(AioContext *ctx, int64_t *timeout) | ||
869 | |||
870 | max_ns = qemu_soonest_timeout(*timeout, ctx->poll_ns); | ||
871 | if (max_ns && !ctx->fdmon_ops->need_wait(ctx)) { | ||
872 | - poll_set_started(ctx, true); | ||
873 | + poll_set_started(ctx, ready_list, true); | ||
874 | |||
875 | - if (run_poll_handlers(ctx, max_ns, timeout)) { | ||
876 | + if (run_poll_handlers(ctx, ready_list, max_ns, timeout)) { | ||
877 | return true; | ||
878 | } | ||
879 | } | ||
880 | |||
881 | - if (poll_set_started(ctx, false)) { | ||
882 | + if (poll_set_started(ctx, ready_list, false)) { | ||
883 | *timeout = 0; | ||
884 | return true; | ||
885 | } | ||
886 | @@ -XXX,XX +XXX,XX @@ static bool try_poll_mode(AioContext *ctx, int64_t *timeout) | ||
887 | bool aio_poll(AioContext *ctx, bool blocking) | ||
888 | { | ||
889 | AioHandlerList ready_list = QLIST_HEAD_INITIALIZER(ready_list); | ||
890 | - int ret = 0; | ||
891 | bool progress; | ||
892 | bool use_notify_me; | ||
893 | int64_t timeout; | ||
894 | @@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking) | ||
895 | } | ||
896 | |||
897 | timeout = blocking ? aio_compute_timeout(ctx) : 0; | ||
898 | - progress = try_poll_mode(ctx, &timeout); | ||
899 | + progress = try_poll_mode(ctx, &ready_list, &timeout); | ||
900 | assert(!(timeout && progress)); | ||
901 | |||
902 | /* | ||
903 | @@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking) | ||
904 | * system call---a single round of run_poll_handlers_once suffices. | ||
905 | */ | ||
906 | if (timeout || ctx->fdmon_ops->need_wait(ctx)) { | ||
907 | - ret = ctx->fdmon_ops->wait(ctx, &ready_list, timeout); | ||
908 | + ctx->fdmon_ops->wait(ctx, &ready_list, timeout); | ||
909 | } | ||
910 | |||
911 | if (use_notify_me) { | ||
912 | @@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking) | ||
913 | } | ||
914 | |||
915 | progress |= aio_bh_poll(ctx); | ||
916 | - | ||
917 | - if (ret > 0) { | ||
918 | - progress |= aio_dispatch_ready_handlers(ctx, &ready_list); | ||
919 | - } | ||
920 | + progress |= aio_dispatch_ready_handlers(ctx, &ready_list); | ||
921 | |||
922 | aio_free_deleted_handlers(ctx); | ||
923 | |||
924 | diff --git a/util/aio-win32.c b/util/aio-win32.c | ||
925 | index XXXXXXX..XXXXXXX 100644 | ||
926 | --- a/util/aio-win32.c | ||
927 | +++ b/util/aio-win32.c | ||
928 | @@ -XXX,XX +XXX,XX @@ void aio_set_fd_handler(AioContext *ctx, | ||
929 | IOHandler *io_read, | ||
930 | IOHandler *io_write, | ||
931 | AioPollFn *io_poll, | ||
932 | + IOHandler *io_poll_ready, | ||
933 | void *opaque) | ||
934 | { | ||
935 | /* fd is a SOCKET in our case */ | ||
936 | @@ -XXX,XX +XXX,XX @@ void aio_set_event_notifier(AioContext *ctx, | ||
937 | EventNotifier *e, | ||
938 | bool is_external, | ||
939 | EventNotifierHandler *io_notify, | ||
940 | - AioPollFn *io_poll) | ||
941 | + AioPollFn *io_poll, | ||
942 | + EventNotifierHandler *io_poll_ready) | ||
943 | { | ||
944 | AioHandler *node; | ||
945 | |||
946 | diff --git a/util/async.c b/util/async.c | ||
947 | index XXXXXXX..XXXXXXX 100644 | ||
948 | --- a/util/async.c | ||
949 | +++ b/util/async.c | ||
950 | @@ -XXX,XX +XXX,XX @@ aio_ctx_finalize(GSource *source) | ||
951 | g_free(bh); | ||
952 | } | ||
953 | |||
954 | - aio_set_event_notifier(ctx, &ctx->notifier, false, NULL, NULL); | ||
955 | + aio_set_event_notifier(ctx, &ctx->notifier, false, NULL, NULL, NULL); | ||
956 | event_notifier_cleanup(&ctx->notifier); | ||
957 | qemu_rec_mutex_destroy(&ctx->lock); | ||
958 | qemu_lockcnt_destroy(&ctx->list_lock); | ||
959 | @@ -XXX,XX +XXX,XX @@ static bool aio_context_notifier_poll(void *opaque) | ||
960 | return qatomic_read(&ctx->notified); | ||
961 | } | ||
962 | |||
963 | +static void aio_context_notifier_poll_ready(EventNotifier *e) | ||
964 | +{ | ||
965 | + /* Do nothing, we just wanted to kick the event loop */ | ||
966 | +} | ||
967 | + | ||
968 | static void co_schedule_bh_cb(void *opaque) | ||
969 | { | ||
970 | AioContext *ctx = opaque; | ||
971 | @@ -XXX,XX +XXX,XX @@ AioContext *aio_context_new(Error **errp) | ||
972 | aio_set_event_notifier(ctx, &ctx->notifier, | ||
973 | false, | ||
974 | aio_context_notifier_cb, | ||
975 | - aio_context_notifier_poll); | ||
976 | + aio_context_notifier_poll, | ||
977 | + aio_context_notifier_poll_ready); | ||
978 | #ifdef CONFIG_LINUX_AIO | ||
979 | ctx->linux_aio = NULL; | ||
980 | #endif | ||
981 | diff --git a/util/main-loop.c b/util/main-loop.c | ||
982 | index XXXXXXX..XXXXXXX 100644 | ||
983 | --- a/util/main-loop.c | ||
984 | +++ b/util/main-loop.c | ||
985 | @@ -XXX,XX +XXX,XX @@ void qemu_set_fd_handler(int fd, | ||
986 | { | ||
987 | iohandler_init(); | ||
988 | aio_set_fd_handler(iohandler_ctx, fd, false, | ||
989 | - fd_read, fd_write, NULL, opaque); | ||
990 | + fd_read, fd_write, NULL, NULL, opaque); | ||
991 | } | ||
992 | |||
993 | void event_notifier_set_handler(EventNotifier *e, | ||
994 | @@ -XXX,XX +XXX,XX @@ void event_notifier_set_handler(EventNotifier *e, | ||
995 | { | ||
996 | iohandler_init(); | ||
997 | aio_set_event_notifier(iohandler_ctx, e, false, | ||
998 | - handler, NULL); | ||
999 | + handler, NULL, NULL); | ||
1000 | } | ||
1001 | diff --git a/util/qemu-coroutine-io.c b/util/qemu-coroutine-io.c | ||
1002 | index XXXXXXX..XXXXXXX 100644 | ||
1003 | --- a/util/qemu-coroutine-io.c | ||
1004 | +++ b/util/qemu-coroutine-io.c | ||
1005 | @@ -XXX,XX +XXX,XX @@ typedef struct { | ||
1006 | static void fd_coroutine_enter(void *opaque) | ||
1007 | { | ||
1008 | FDYieldUntilData *data = opaque; | ||
1009 | - aio_set_fd_handler(data->ctx, data->fd, false, NULL, NULL, NULL, NULL); | ||
1010 | + aio_set_fd_handler(data->ctx, data->fd, false, | ||
1011 | + NULL, NULL, NULL, NULL, NULL); | ||
1012 | qemu_coroutine_enter(data->co); | ||
1013 | } | ||
1014 | |||
1015 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn yield_until_fd_readable(int fd) | ||
1016 | data.co = qemu_coroutine_self(); | ||
1017 | data.fd = fd; | ||
1018 | aio_set_fd_handler( | ||
1019 | - data.ctx, fd, false, fd_coroutine_enter, NULL, NULL, &data); | ||
1020 | + data.ctx, fd, false, fd_coroutine_enter, NULL, NULL, NULL, &data); | ||
1021 | qemu_coroutine_yield(); | ||
1022 | } | ||
1023 | diff --git a/util/vhost-user-server.c b/util/vhost-user-server.c | ||
1024 | index XXXXXXX..XXXXXXX 100644 | ||
1025 | --- a/util/vhost-user-server.c | ||
1026 | +++ b/util/vhost-user-server.c | ||
1027 | @@ -XXX,XX +XXX,XX @@ set_watch(VuDev *vu_dev, int fd, int vu_evt, | ||
1028 | vu_fd_watch->cb = cb; | ||
1029 | qemu_set_nonblock(fd); | ||
1030 | aio_set_fd_handler(server->ioc->ctx, fd, true, kick_handler, | ||
1031 | - NULL, NULL, vu_fd_watch); | ||
1032 | + NULL, NULL, NULL, vu_fd_watch); | ||
1033 | vu_fd_watch->vu_dev = vu_dev; | ||
1034 | vu_fd_watch->pvt = pvt; | ||
1035 | } | ||
1036 | @@ -XXX,XX +XXX,XX @@ static void remove_watch(VuDev *vu_dev, int fd) | ||
1037 | if (!vu_fd_watch) { | ||
1038 | return; | ||
1039 | } | ||
1040 | - aio_set_fd_handler(server->ioc->ctx, fd, true, NULL, NULL, NULL, NULL); | ||
1041 | + aio_set_fd_handler(server->ioc->ctx, fd, true, | ||
1042 | + NULL, NULL, NULL, NULL, NULL); | ||
1043 | |||
1044 | QTAILQ_REMOVE(&server->vu_fd_watches, vu_fd_watch, next); | ||
1045 | g_free(vu_fd_watch); | ||
1046 | @@ -XXX,XX +XXX,XX @@ void vhost_user_server_stop(VuServer *server) | ||
1047 | |||
1048 | QTAILQ_FOREACH(vu_fd_watch, &server->vu_fd_watches, next) { | ||
1049 | aio_set_fd_handler(server->ctx, vu_fd_watch->fd, true, | ||
1050 | - NULL, NULL, NULL, vu_fd_watch); | ||
1051 | + NULL, NULL, NULL, NULL, vu_fd_watch); | ||
1052 | } | ||
1053 | |||
1054 | qio_channel_shutdown(server->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL); | ||
1055 | @@ -XXX,XX +XXX,XX @@ void vhost_user_server_attach_aio_context(VuServer *server, AioContext *ctx) | ||
1056 | |||
1057 | QTAILQ_FOREACH(vu_fd_watch, &server->vu_fd_watches, next) { | ||
1058 | aio_set_fd_handler(ctx, vu_fd_watch->fd, true, kick_handler, NULL, | ||
1059 | - NULL, vu_fd_watch); | ||
1060 | + NULL, NULL, vu_fd_watch); | ||
1061 | } | ||
1062 | |||
1063 | aio_co_schedule(ctx, server->co_trip); | ||
1064 | @@ -XXX,XX +XXX,XX @@ void vhost_user_server_detach_aio_context(VuServer *server) | ||
1065 | |||
1066 | QTAILQ_FOREACH(vu_fd_watch, &server->vu_fd_watches, next) { | ||
1067 | aio_set_fd_handler(server->ctx, vu_fd_watch->fd, true, | ||
1068 | - NULL, NULL, NULL, vu_fd_watch); | ||
1069 | + NULL, NULL, NULL, NULL, vu_fd_watch); | ||
1070 | } | ||
1071 | |||
1072 | qio_channel_detach_aio_context(server->ioc); | ||
1073 | -- | 410 | -- |
1074 | 2.34.1 | 411 | 2.40.0 |
1075 | |||
1076 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Sam Li <faithilikerun@gmail.com> | ||
1 | 2 | ||
3 | The patch tests zone append writes by reporting the zone wp after | ||
4 | the completion of the call. "zap -p" option can print the sector | ||
5 | offset value after completion, which should be the start sector | ||
6 | where the append write begins. | ||
7 | |||
8 | Signed-off-by: Sam Li <faithilikerun@gmail.com> | ||
9 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
10 | Message-id: 20230427172339.3709-4-faithilikerun@gmail.com | ||
11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
12 | --- | ||
13 | qemu-io-cmds.c | 75 ++++++++++++++++++++++++++++++ | ||
14 | tests/qemu-iotests/tests/zoned | 16 +++++++ | ||
15 | tests/qemu-iotests/tests/zoned.out | 16 +++++++ | ||
16 | 3 files changed, 107 insertions(+) | ||
17 | |||
18 | diff --git a/qemu-io-cmds.c b/qemu-io-cmds.c | ||
19 | index XXXXXXX..XXXXXXX 100644 | ||
20 | --- a/qemu-io-cmds.c | ||
21 | +++ b/qemu-io-cmds.c | ||
22 | @@ -XXX,XX +XXX,XX @@ static const cmdinfo_t zone_reset_cmd = { | ||
23 | .oneline = "reset a zone write pointer in zone block device", | ||
24 | }; | ||
25 | |||
26 | +static int do_aio_zone_append(BlockBackend *blk, QEMUIOVector *qiov, | ||
27 | + int64_t *offset, int flags, int *total) | ||
28 | +{ | ||
29 | + int async_ret = NOT_DONE; | ||
30 | + | ||
31 | + blk_aio_zone_append(blk, offset, qiov, flags, aio_rw_done, &async_ret); | ||
32 | + while (async_ret == NOT_DONE) { | ||
33 | + main_loop_wait(false); | ||
34 | + } | ||
35 | + | ||
36 | + *total = qiov->size; | ||
37 | + return async_ret < 0 ? async_ret : 1; | ||
38 | +} | ||
39 | + | ||
40 | +static int zone_append_f(BlockBackend *blk, int argc, char **argv) | ||
41 | +{ | ||
42 | + int ret; | ||
43 | + bool pflag = false; | ||
44 | + int flags = 0; | ||
45 | + int total = 0; | ||
46 | + int64_t offset; | ||
47 | + char *buf; | ||
48 | + int c, nr_iov; | ||
49 | + int pattern = 0xcd; | ||
50 | + QEMUIOVector qiov; | ||
51 | + | ||
52 | + if (optind > argc - 3) { | ||
53 | + return -EINVAL; | ||
54 | + } | ||
55 | + | ||
56 | + if ((c = getopt(argc, argv, "p")) != -1) { | ||
57 | + pflag = true; | ||
58 | + } | ||
59 | + | ||
60 | + offset = cvtnum(argv[optind]); | ||
61 | + if (offset < 0) { | ||
62 | + print_cvtnum_err(offset, argv[optind]); | ||
63 | + return offset; | ||
64 | + } | ||
65 | + optind++; | ||
66 | + nr_iov = argc - optind; | ||
67 | + buf = create_iovec(blk, &qiov, &argv[optind], nr_iov, pattern, | ||
68 | + flags & BDRV_REQ_REGISTERED_BUF); | ||
69 | + if (buf == NULL) { | ||
70 | + return -EINVAL; | ||
71 | + } | ||
72 | + ret = do_aio_zone_append(blk, &qiov, &offset, flags, &total); | ||
73 | + if (ret < 0) { | ||
74 | + printf("zone append failed: %s\n", strerror(-ret)); | ||
75 | + goto out; | ||
76 | + } | ||
77 | + | ||
78 | + if (pflag) { | ||
79 | + printf("After zap done, the append sector is 0x%" PRIx64 "\n", | ||
80 | + tosector(offset)); | ||
81 | + } | ||
82 | + | ||
83 | +out: | ||
84 | + qemu_io_free(blk, buf, qiov.size, | ||
85 | + flags & BDRV_REQ_REGISTERED_BUF); | ||
86 | + qemu_iovec_destroy(&qiov); | ||
87 | + return ret; | ||
88 | +} | ||
89 | + | ||
90 | +static const cmdinfo_t zone_append_cmd = { | ||
91 | + .name = "zone_append", | ||
92 | + .altname = "zap", | ||
93 | + .cfunc = zone_append_f, | ||
94 | + .argmin = 3, | ||
95 | + .argmax = 4, | ||
96 | + .args = "offset len [len..]", | ||
97 | + .oneline = "append write a number of bytes at a specified offset", | ||
98 | +}; | ||
99 | + | ||
100 | static int truncate_f(BlockBackend *blk, int argc, char **argv); | ||
101 | static const cmdinfo_t truncate_cmd = { | ||
102 | .name = "truncate", | ||
103 | @@ -XXX,XX +XXX,XX @@ static void __attribute((constructor)) init_qemuio_commands(void) | ||
104 | qemuio_add_command(&zone_close_cmd); | ||
105 | qemuio_add_command(&zone_finish_cmd); | ||
106 | qemuio_add_command(&zone_reset_cmd); | ||
107 | + qemuio_add_command(&zone_append_cmd); | ||
108 | qemuio_add_command(&truncate_cmd); | ||
109 | qemuio_add_command(&length_cmd); | ||
110 | qemuio_add_command(&info_cmd); | ||
111 | diff --git a/tests/qemu-iotests/tests/zoned b/tests/qemu-iotests/tests/zoned | ||
112 | index XXXXXXX..XXXXXXX 100755 | ||
113 | --- a/tests/qemu-iotests/tests/zoned | ||
114 | +++ b/tests/qemu-iotests/tests/zoned | ||
115 | @@ -XXX,XX +XXX,XX @@ echo "(5) resetting the second zone" | ||
116 | $QEMU_IO $IMG -c "zrs 268435456 268435456" | ||
117 | echo "After resetting a zone:" | ||
118 | $QEMU_IO $IMG -c "zrp 268435456 1" | ||
119 | +echo | ||
120 | +echo | ||
121 | +echo "(6) append write" # the physical block size of the device is 4096 | ||
122 | +$QEMU_IO $IMG -c "zrp 0 1" | ||
123 | +$QEMU_IO $IMG -c "zap -p 0 0x1000 0x2000" | ||
124 | +echo "After appending the first zone firstly:" | ||
125 | +$QEMU_IO $IMG -c "zrp 0 1" | ||
126 | +$QEMU_IO $IMG -c "zap -p 0 0x1000 0x2000" | ||
127 | +echo "After appending the first zone secondly:" | ||
128 | +$QEMU_IO $IMG -c "zrp 0 1" | ||
129 | +$QEMU_IO $IMG -c "zap -p 268435456 0x1000 0x2000" | ||
130 | +echo "After appending the second zone firstly:" | ||
131 | +$QEMU_IO $IMG -c "zrp 268435456 1" | ||
132 | +$QEMU_IO $IMG -c "zap -p 268435456 0x1000 0x2000" | ||
133 | +echo "After appending the second zone secondly:" | ||
134 | +$QEMU_IO $IMG -c "zrp 268435456 1" | ||
135 | |||
136 | # success, all done | ||
137 | echo "*** done" | ||
138 | diff --git a/tests/qemu-iotests/tests/zoned.out b/tests/qemu-iotests/tests/zoned.out | ||
139 | index XXXXXXX..XXXXXXX 100644 | ||
140 | --- a/tests/qemu-iotests/tests/zoned.out | ||
141 | +++ b/tests/qemu-iotests/tests/zoned.out | ||
142 | @@ -XXX,XX +XXX,XX @@ start: 0x80000, len 0x80000, cap 0x80000, wptr 0x100000, zcond:14, [type: 2] | ||
143 | (5) resetting the second zone | ||
144 | After resetting a zone: | ||
145 | start: 0x80000, len 0x80000, cap 0x80000, wptr 0x80000, zcond:1, [type: 2] | ||
146 | + | ||
147 | + | ||
148 | +(6) append write | ||
149 | +start: 0x0, len 0x80000, cap 0x80000, wptr 0x0, zcond:1, [type: 2] | ||
150 | +After zap done, the append sector is 0x0 | ||
151 | +After appending the first zone firstly: | ||
152 | +start: 0x0, len 0x80000, cap 0x80000, wptr 0x18, zcond:2, [type: 2] | ||
153 | +After zap done, the append sector is 0x18 | ||
154 | +After appending the first zone secondly: | ||
155 | +start: 0x0, len 0x80000, cap 0x80000, wptr 0x30, zcond:2, [type: 2] | ||
156 | +After zap done, the append sector is 0x80000 | ||
157 | +After appending the second zone firstly: | ||
158 | +start: 0x80000, len 0x80000, cap 0x80000, wptr 0x80018, zcond:2, [type: 2] | ||
159 | +After zap done, the append sector is 0x80018 | ||
160 | +After appending the second zone secondly: | ||
161 | +start: 0x80000, len 0x80000, cap 0x80000, wptr 0x80030, zcond:2, [type: 2] | ||
162 | *** done | ||
163 | -- | ||
164 | 2.40.0 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Sam Li <faithilikerun@gmail.com> | ||
1 | 2 | ||
3 | Signed-off-by: Sam Li <faithilikerun@gmail.com> | ||
4 | Reviewed-by: Dmitry Fomichev <dmitry.fomichev@wdc.com> | ||
5 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
6 | Message-id: 20230427172339.3709-5-faithilikerun@gmail.com | ||
7 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
8 | --- | ||
9 | block/file-posix.c | 3 +++ | ||
10 | block/trace-events | 2 ++ | ||
11 | 2 files changed, 5 insertions(+) | ||
12 | |||
13 | diff --git a/block/file-posix.c b/block/file-posix.c | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/block/file-posix.c | ||
16 | +++ b/block/file-posix.c | ||
17 | @@ -XXX,XX +XXX,XX @@ out: | ||
18 | if (!BDRV_ZT_IS_CONV(*wp)) { | ||
19 | if (type & QEMU_AIO_ZONE_APPEND) { | ||
20 | *s->offset = *wp; | ||
21 | + trace_zbd_zone_append_complete(bs, *s->offset | ||
22 | + >> BDRV_SECTOR_BITS); | ||
23 | } | ||
24 | /* Advance the wp if needed */ | ||
25 | if (offset + bytes > *wp) { | ||
26 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn raw_co_zone_append(BlockDriverState *bs, | ||
27 | len += iov_len; | ||
28 | } | ||
29 | |||
30 | + trace_zbd_zone_append(bs, *offset >> BDRV_SECTOR_BITS); | ||
31 | return raw_co_prw(bs, *offset, len, qiov, QEMU_AIO_ZONE_APPEND); | ||
32 | } | ||
33 | #endif | ||
34 | diff --git a/block/trace-events b/block/trace-events | ||
35 | index XXXXXXX..XXXXXXX 100644 | ||
36 | --- a/block/trace-events | ||
37 | +++ b/block/trace-events | ||
38 | @@ -XXX,XX +XXX,XX @@ file_hdev_is_sg(int type, int version) "SG device found: type=%d, version=%d" | ||
39 | file_flush_fdatasync_failed(int err) "errno %d" | ||
40 | zbd_zone_report(void *bs, unsigned int nr_zones, int64_t sector) "bs %p report %d zones starting at sector offset 0x%" PRIx64 "" | ||
41 | zbd_zone_mgmt(void *bs, const char *op_name, int64_t sector, int64_t len) "bs %p %s starts at sector offset 0x%" PRIx64 " over a range of 0x%" PRIx64 " sectors" | ||
42 | +zbd_zone_append(void *bs, int64_t sector) "bs %p append at sector offset 0x%" PRIx64 "" | ||
43 | +zbd_zone_append_complete(void *bs, int64_t sector) "bs %p returns append sector 0x%" PRIx64 "" | ||
44 | |||
45 | # ssh.c | ||
46 | sftp_error(const char *op, const char *ssh_err, int ssh_err_code, int sftp_err_code) "%s failed: %s (libssh error code: %d, sftp error code: %d)" | ||
47 | -- | ||
48 | 2.40.0 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Sam Li <faithilikerun@gmail.com> | ||
1 | 2 | ||
3 | Use scripts/update-linux-headers.sh to update headers to 6.3-rc1. | ||
4 | |||
5 | Signed-off-by: Sam Li <faithilikerun@gmail.com> | ||
6 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
7 | Reviewed-by: Dmitry Fomichev <dmitry.fomichev@wdc.com> | ||
8 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
9 | [Reran scripts/update-linux-headers.sh on Linux v6.3. The only change | ||
10 | was the use of __virtioXX types instead of uintXX_t. | ||
11 | --Stefan] | ||
12 | Message-Id: <20230407082528.18841-2-faithilikerun@gmail.com> | ||
13 | --- | ||
14 | include/standard-headers/drm/drm_fourcc.h | 12 +++ | ||
15 | include/standard-headers/linux/ethtool.h | 48 ++++++++- | ||
16 | include/standard-headers/linux/fuse.h | 45 +++++++- | ||
17 | include/standard-headers/linux/pci_regs.h | 1 + | ||
18 | include/standard-headers/linux/vhost_types.h | 2 + | ||
19 | include/standard-headers/linux/virtio_blk.h | 105 +++++++++++++++++++ | ||
20 | linux-headers/asm-arm64/kvm.h | 1 + | ||
21 | linux-headers/asm-x86/kvm.h | 34 +++++- | ||
22 | linux-headers/linux/kvm.h | 9 ++ | ||
23 | linux-headers/linux/vfio.h | 15 +-- | ||
24 | linux-headers/linux/vhost.h | 8 ++ | ||
25 | 11 files changed, 270 insertions(+), 10 deletions(-) | ||
26 | |||
27 | diff --git a/include/standard-headers/drm/drm_fourcc.h b/include/standard-headers/drm/drm_fourcc.h | ||
28 | index XXXXXXX..XXXXXXX 100644 | ||
29 | --- a/include/standard-headers/drm/drm_fourcc.h | ||
30 | +++ b/include/standard-headers/drm/drm_fourcc.h | ||
31 | @@ -XXX,XX +XXX,XX @@ extern "C" { | ||
32 | * | ||
33 | * The authoritative list of format modifier codes is found in | ||
34 | * `include/uapi/drm/drm_fourcc.h` | ||
35 | + * | ||
36 | + * Open Source User Waiver | ||
37 | + * ----------------------- | ||
38 | + * | ||
39 | + * Because this is the authoritative source for pixel formats and modifiers | ||
40 | + * referenced by GL, Vulkan extensions and other standards and hence used both | ||
41 | + * by open source and closed source driver stacks, the usual requirement for an | ||
42 | + * upstream in-kernel or open source userspace user does not apply. | ||
43 | + * | ||
44 | + * To ensure, as much as feasible, compatibility across stacks and avoid | ||
45 | + * confusion with incompatible enumerations stakeholders for all relevant driver | ||
46 | + * stacks should approve additions. | ||
47 | */ | ||
48 | |||
49 | #define fourcc_code(a, b, c, d) ((uint32_t)(a) | ((uint32_t)(b) << 8) | \ | ||
50 | diff --git a/include/standard-headers/linux/ethtool.h b/include/standard-headers/linux/ethtool.h | ||
51 | index XXXXXXX..XXXXXXX 100644 | ||
52 | --- a/include/standard-headers/linux/ethtool.h | ||
53 | +++ b/include/standard-headers/linux/ethtool.h | ||
54 | @@ -XXX,XX +XXX,XX @@ enum ethtool_stringset { | ||
55 | ETH_SS_COUNT | ||
56 | }; | ||
57 | |||
58 | +/** | ||
59 | + * enum ethtool_mac_stats_src - source of ethtool MAC statistics | ||
60 | + * @ETHTOOL_MAC_STATS_SRC_AGGREGATE: | ||
61 | + * if device supports a MAC merge layer, this retrieves the aggregate | ||
62 | + * statistics of the eMAC and pMAC. Otherwise, it retrieves just the | ||
63 | + * statistics of the single (express) MAC. | ||
64 | + * @ETHTOOL_MAC_STATS_SRC_EMAC: | ||
65 | + * if device supports a MM layer, this retrieves the eMAC statistics. | ||
66 | + * Otherwise, it retrieves the statistics of the single (express) MAC. | ||
67 | + * @ETHTOOL_MAC_STATS_SRC_PMAC: | ||
68 | + * if device supports a MM layer, this retrieves the pMAC statistics. | ||
69 | + */ | ||
70 | +enum ethtool_mac_stats_src { | ||
71 | + ETHTOOL_MAC_STATS_SRC_AGGREGATE, | ||
72 | + ETHTOOL_MAC_STATS_SRC_EMAC, | ||
73 | + ETHTOOL_MAC_STATS_SRC_PMAC, | ||
74 | +}; | ||
75 | + | ||
76 | /** | ||
77 | * enum ethtool_module_power_mode_policy - plug-in module power mode policy | ||
78 | * @ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH: Module is always in high power mode. | ||
79 | @@ -XXX,XX +XXX,XX @@ enum ethtool_podl_pse_pw_d_status { | ||
80 | ETHTOOL_PODL_PSE_PW_D_STATUS_ERROR, | ||
81 | }; | ||
82 | |||
83 | +/** | ||
84 | + * enum ethtool_mm_verify_status - status of MAC Merge Verify function | ||
85 | + * @ETHTOOL_MM_VERIFY_STATUS_UNKNOWN: | ||
86 | + * verification status is unknown | ||
87 | + * @ETHTOOL_MM_VERIFY_STATUS_INITIAL: | ||
88 | + * the 802.3 Verify State diagram is in the state INIT_VERIFICATION | ||
89 | + * @ETHTOOL_MM_VERIFY_STATUS_VERIFYING: | ||
90 | + * the Verify State diagram is in the state VERIFICATION_IDLE, | ||
91 | + * SEND_VERIFY or WAIT_FOR_RESPONSE | ||
92 | + * @ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED: | ||
93 | + * indicates that the Verify State diagram is in the state VERIFIED | ||
94 | + * @ETHTOOL_MM_VERIFY_STATUS_FAILED: | ||
95 | + * the Verify State diagram is in the state VERIFY_FAIL | ||
96 | + * @ETHTOOL_MM_VERIFY_STATUS_DISABLED: | ||
97 | + * verification of preemption operation is disabled | ||
98 | + */ | ||
99 | +enum ethtool_mm_verify_status { | ||
100 | + ETHTOOL_MM_VERIFY_STATUS_UNKNOWN, | ||
101 | + ETHTOOL_MM_VERIFY_STATUS_INITIAL, | ||
102 | + ETHTOOL_MM_VERIFY_STATUS_VERIFYING, | ||
103 | + ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED, | ||
104 | + ETHTOOL_MM_VERIFY_STATUS_FAILED, | ||
105 | + ETHTOOL_MM_VERIFY_STATUS_DISABLED, | ||
106 | +}; | ||
107 | + | ||
108 | /** | ||
109 | * struct ethtool_gstrings - string set for data tagging | ||
110 | * @cmd: Command number = %ETHTOOL_GSTRINGS | ||
111 | @@ -XXX,XX +XXX,XX @@ struct ethtool_rxnfc { | ||
112 | uint32_t rule_cnt; | ||
113 | uint32_t rss_context; | ||
114 | }; | ||
115 | - uint32_t rule_locs[0]; | ||
116 | + uint32_t rule_locs[]; | ||
117 | }; | ||
118 | |||
119 | |||
120 | @@ -XXX,XX +XXX,XX @@ enum ethtool_link_mode_bit_indices { | ||
121 | ETHTOOL_LINK_MODE_800000baseDR8_2_Full_BIT = 96, | ||
122 | ETHTOOL_LINK_MODE_800000baseSR8_Full_BIT = 97, | ||
123 | ETHTOOL_LINK_MODE_800000baseVR8_Full_BIT = 98, | ||
124 | + ETHTOOL_LINK_MODE_10baseT1S_Full_BIT = 99, | ||
125 | + ETHTOOL_LINK_MODE_10baseT1S_Half_BIT = 100, | ||
126 | + ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT = 101, | ||
127 | |||
128 | /* must be last entry */ | ||
129 | __ETHTOOL_LINK_MODE_MASK_NBITS | ||
130 | diff --git a/include/standard-headers/linux/fuse.h b/include/standard-headers/linux/fuse.h | ||
131 | index XXXXXXX..XXXXXXX 100644 | ||
132 | --- a/include/standard-headers/linux/fuse.h | ||
133 | +++ b/include/standard-headers/linux/fuse.h | ||
134 | @@ -XXX,XX +XXX,XX @@ | ||
135 | * 7.38 | ||
136 | * - add FUSE_EXPIRE_ONLY flag to fuse_notify_inval_entry | ||
137 | * - add FOPEN_PARALLEL_DIRECT_WRITES | ||
138 | + * - add total_extlen to fuse_in_header | ||
139 | + * - add FUSE_MAX_NR_SECCTX | ||
140 | + * - add extension header | ||
141 | + * - add FUSE_EXT_GROUPS | ||
142 | + * - add FUSE_CREATE_SUPP_GROUP | ||
143 | */ | ||
144 | |||
145 | #ifndef _LINUX_FUSE_H | ||
146 | @@ -XXX,XX +XXX,XX @@ struct fuse_file_lock { | ||
147 | * FUSE_SECURITY_CTX: add security context to create, mkdir, symlink, and | ||
148 | * mknod | ||
149 | * FUSE_HAS_INODE_DAX: use per inode DAX | ||
150 | + * FUSE_CREATE_SUPP_GROUP: add supplementary group info to create, mkdir, | ||
151 | + * symlink and mknod (single group that matches parent) | ||
152 | */ | ||
153 | #define FUSE_ASYNC_READ (1 << 0) | ||
154 | #define FUSE_POSIX_LOCKS (1 << 1) | ||
155 | @@ -XXX,XX +XXX,XX @@ struct fuse_file_lock { | ||
156 | /* bits 32..63 get shifted down 32 bits into the flags2 field */ | ||
157 | #define FUSE_SECURITY_CTX (1ULL << 32) | ||
158 | #define FUSE_HAS_INODE_DAX (1ULL << 33) | ||
159 | +#define FUSE_CREATE_SUPP_GROUP (1ULL << 34) | ||
160 | |||
161 | /** | ||
162 | * CUSE INIT request/reply flags | ||
163 | @@ -XXX,XX +XXX,XX @@ struct fuse_file_lock { | ||
164 | */ | ||
165 | #define FUSE_EXPIRE_ONLY (1 << 0) | ||
166 | |||
167 | +/** | ||
168 | + * extension type | ||
169 | + * FUSE_MAX_NR_SECCTX: maximum value of &fuse_secctx_header.nr_secctx | ||
170 | + * FUSE_EXT_GROUPS: &fuse_supp_groups extension | ||
171 | + */ | ||
172 | +enum fuse_ext_type { | ||
173 | + /* Types 0..31 are reserved for fuse_secctx_header */ | ||
174 | + FUSE_MAX_NR_SECCTX = 31, | ||
175 | + FUSE_EXT_GROUPS = 32, | ||
176 | +}; | ||
177 | + | ||
178 | enum fuse_opcode { | ||
179 | FUSE_LOOKUP = 1, | ||
180 | FUSE_FORGET = 2, /* no reply */ | ||
181 | @@ -XXX,XX +XXX,XX @@ struct fuse_in_header { | ||
182 | uint32_t uid; | ||
183 | uint32_t gid; | ||
184 | uint32_t pid; | ||
185 | - uint32_t padding; | ||
186 | + uint16_t total_extlen; /* length of extensions in 8byte units */ | ||
187 | + uint16_t padding; | ||
188 | }; | ||
189 | |||
190 | struct fuse_out_header { | ||
191 | @@ -XXX,XX +XXX,XX @@ struct fuse_secctx_header { | ||
192 | uint32_t nr_secctx; | ||
193 | }; | ||
194 | |||
195 | +/** | ||
196 | + * struct fuse_ext_header - extension header | ||
197 | + * @size: total size of this extension including this header | ||
198 | + * @type: type of extension | ||
199 | + * | ||
200 | + * This is made compatible with fuse_secctx_header by using type values > | ||
201 | + * FUSE_MAX_NR_SECCTX | ||
202 | + */ | ||
203 | +struct fuse_ext_header { | ||
204 | + uint32_t size; | ||
205 | + uint32_t type; | ||
206 | +}; | ||
207 | + | ||
208 | +/** | ||
209 | + * struct fuse_supp_groups - Supplementary group extension | ||
210 | + * @nr_groups: number of supplementary groups | ||
211 | + * @groups: flexible array of group IDs | ||
212 | + */ | ||
213 | +struct fuse_supp_groups { | ||
214 | + uint32_t nr_groups; | ||
215 | + uint32_t groups[]; | ||
216 | +}; | ||
217 | + | ||
218 | #endif /* _LINUX_FUSE_H */ | ||
219 | diff --git a/include/standard-headers/linux/pci_regs.h b/include/standard-headers/linux/pci_regs.h | ||
220 | index XXXXXXX..XXXXXXX 100644 | ||
221 | --- a/include/standard-headers/linux/pci_regs.h | ||
222 | +++ b/include/standard-headers/linux/pci_regs.h | ||
223 | @@ -XXX,XX +XXX,XX @@ | ||
224 | #define PCI_EXP_LNKCTL2_TX_MARGIN 0x0380 /* Transmit Margin */ | ||
225 | #define PCI_EXP_LNKCTL2_HASD 0x0020 /* HW Autonomous Speed Disable */ | ||
226 | #define PCI_EXP_LNKSTA2 0x32 /* Link Status 2 */ | ||
227 | +#define PCI_EXP_LNKSTA2_FLIT 0x0400 /* Flit Mode Status */ | ||
228 | #define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 0x32 /* end of v2 EPs w/ link */ | ||
229 | #define PCI_EXP_SLTCAP2 0x34 /* Slot Capabilities 2 */ | ||
230 | #define PCI_EXP_SLTCAP2_IBPD 0x00000001 /* In-band PD Disable Supported */ | ||
231 | diff --git a/include/standard-headers/linux/vhost_types.h b/include/standard-headers/linux/vhost_types.h | ||
232 | index XXXXXXX..XXXXXXX 100644 | ||
233 | --- a/include/standard-headers/linux/vhost_types.h | ||
234 | +++ b/include/standard-headers/linux/vhost_types.h | ||
235 | @@ -XXX,XX +XXX,XX @@ struct vhost_vdpa_iova_range { | ||
236 | #define VHOST_BACKEND_F_IOTLB_ASID 0x3 | ||
237 | /* Device can be suspended */ | ||
238 | #define VHOST_BACKEND_F_SUSPEND 0x4 | ||
239 | +/* Device can be resumed */ | ||
240 | +#define VHOST_BACKEND_F_RESUME 0x5 | ||
241 | |||
242 | #endif | ||
243 | diff --git a/include/standard-headers/linux/virtio_blk.h b/include/standard-headers/linux/virtio_blk.h | ||
244 | index XXXXXXX..XXXXXXX 100644 | ||
245 | --- a/include/standard-headers/linux/virtio_blk.h | ||
246 | +++ b/include/standard-headers/linux/virtio_blk.h | ||
247 | @@ -XXX,XX +XXX,XX @@ | ||
248 | #define VIRTIO_BLK_F_DISCARD 13 /* DISCARD is supported */ | ||
249 | #define VIRTIO_BLK_F_WRITE_ZEROES 14 /* WRITE ZEROES is supported */ | ||
250 | #define VIRTIO_BLK_F_SECURE_ERASE 16 /* Secure Erase is supported */ | ||
251 | +#define VIRTIO_BLK_F_ZONED 17 /* Zoned block device */ | ||
252 | |||
253 | /* Legacy feature bits */ | ||
254 | #ifndef VIRTIO_BLK_NO_LEGACY | ||
255 | @@ -XXX,XX +XXX,XX @@ struct virtio_blk_config { | ||
256 | /* Secure erase commands must be aligned to this number of sectors. */ | ||
257 | __virtio32 secure_erase_sector_alignment; | ||
258 | |||
259 | + /* Zoned block device characteristics (if VIRTIO_BLK_F_ZONED) */ | ||
260 | + struct virtio_blk_zoned_characteristics { | ||
261 | + __virtio32 zone_sectors; | ||
262 | + __virtio32 max_open_zones; | ||
263 | + __virtio32 max_active_zones; | ||
264 | + __virtio32 max_append_sectors; | ||
265 | + __virtio32 write_granularity; | ||
266 | + uint8_t model; | ||
267 | + uint8_t unused2[3]; | ||
268 | + } zoned; | ||
269 | } QEMU_PACKED; | ||
270 | |||
271 | /* | ||
272 | @@ -XXX,XX +XXX,XX @@ struct virtio_blk_config { | ||
273 | /* Secure erase command */ | ||
274 | #define VIRTIO_BLK_T_SECURE_ERASE 14 | ||
275 | |||
276 | +/* Zone append command */ | ||
277 | +#define VIRTIO_BLK_T_ZONE_APPEND 15 | ||
278 | + | ||
279 | +/* Report zones command */ | ||
280 | +#define VIRTIO_BLK_T_ZONE_REPORT 16 | ||
281 | + | ||
282 | +/* Open zone command */ | ||
283 | +#define VIRTIO_BLK_T_ZONE_OPEN 18 | ||
284 | + | ||
285 | +/* Close zone command */ | ||
286 | +#define VIRTIO_BLK_T_ZONE_CLOSE 20 | ||
287 | + | ||
288 | +/* Finish zone command */ | ||
289 | +#define VIRTIO_BLK_T_ZONE_FINISH 22 | ||
290 | + | ||
291 | +/* Reset zone command */ | ||
292 | +#define VIRTIO_BLK_T_ZONE_RESET 24 | ||
293 | + | ||
294 | +/* Reset All zones command */ | ||
295 | +#define VIRTIO_BLK_T_ZONE_RESET_ALL 26 | ||
296 | + | ||
297 | #ifndef VIRTIO_BLK_NO_LEGACY | ||
298 | /* Barrier before this op. */ | ||
299 | #define VIRTIO_BLK_T_BARRIER 0x80000000 | ||
300 | @@ -XXX,XX +XXX,XX @@ struct virtio_blk_outhdr { | ||
301 | __virtio64 sector; | ||
302 | }; | ||
303 | |||
304 | +/* | ||
305 | + * Supported zoned device models. | ||
306 | + */ | ||
307 | + | ||
308 | +/* Regular block device */ | ||
309 | +#define VIRTIO_BLK_Z_NONE 0 | ||
310 | +/* Host-managed zoned device */ | ||
311 | +#define VIRTIO_BLK_Z_HM 1 | ||
312 | +/* Host-aware zoned device */ | ||
313 | +#define VIRTIO_BLK_Z_HA 2 | ||
314 | + | ||
315 | +/* | ||
316 | + * Zone descriptor. A part of VIRTIO_BLK_T_ZONE_REPORT command reply. | ||
317 | + */ | ||
318 | +struct virtio_blk_zone_descriptor { | ||
319 | + /* Zone capacity */ | ||
320 | + __virtio64 z_cap; | ||
321 | + /* The starting sector of the zone */ | ||
322 | + __virtio64 z_start; | ||
323 | + /* Zone write pointer position in sectors */ | ||
324 | + __virtio64 z_wp; | ||
325 | + /* Zone type */ | ||
326 | + uint8_t z_type; | ||
327 | + /* Zone state */ | ||
328 | + uint8_t z_state; | ||
329 | + uint8_t reserved[38]; | ||
330 | +}; | ||
331 | + | ||
332 | +struct virtio_blk_zone_report { | ||
333 | + __virtio64 nr_zones; | ||
334 | + uint8_t reserved[56]; | ||
335 | + struct virtio_blk_zone_descriptor zones[]; | ||
336 | +}; | ||
337 | + | ||
338 | +/* | ||
339 | + * Supported zone types. | ||
340 | + */ | ||
341 | + | ||
342 | +/* Conventional zone */ | ||
343 | +#define VIRTIO_BLK_ZT_CONV 1 | ||
344 | +/* Sequential Write Required zone */ | ||
345 | +#define VIRTIO_BLK_ZT_SWR 2 | ||
346 | +/* Sequential Write Preferred zone */ | ||
347 | +#define VIRTIO_BLK_ZT_SWP 3 | ||
348 | + | ||
349 | +/* | ||
350 | + * Zone states that are available for zones of all types. | ||
351 | + */ | ||
352 | + | ||
353 | +/* Not a write pointer (conventional zones only) */ | ||
354 | +#define VIRTIO_BLK_ZS_NOT_WP 0 | ||
355 | +/* Empty */ | ||
356 | +#define VIRTIO_BLK_ZS_EMPTY 1 | ||
357 | +/* Implicitly Open */ | ||
358 | +#define VIRTIO_BLK_ZS_IOPEN 2 | ||
359 | +/* Explicitly Open */ | ||
360 | +#define VIRTIO_BLK_ZS_EOPEN 3 | ||
361 | +/* Closed */ | ||
362 | +#define VIRTIO_BLK_ZS_CLOSED 4 | ||
363 | +/* Read-Only */ | ||
364 | +#define VIRTIO_BLK_ZS_RDONLY 13 | ||
365 | +/* Full */ | ||
366 | +#define VIRTIO_BLK_ZS_FULL 14 | ||
367 | +/* Offline */ | ||
368 | +#define VIRTIO_BLK_ZS_OFFLINE 15 | ||
369 | + | ||
370 | /* Unmap this range (only valid for write zeroes command) */ | ||
371 | #define VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP 0x00000001 | ||
372 | |||
373 | @@ -XXX,XX +XXX,XX @@ struct virtio_scsi_inhdr { | ||
374 | #define VIRTIO_BLK_S_OK 0 | ||
375 | #define VIRTIO_BLK_S_IOERR 1 | ||
376 | #define VIRTIO_BLK_S_UNSUPP 2 | ||
377 | + | ||
378 | +/* Error codes that are specific to zoned block devices */ | ||
379 | +#define VIRTIO_BLK_S_ZONE_INVALID_CMD 3 | ||
380 | +#define VIRTIO_BLK_S_ZONE_UNALIGNED_WP 4 | ||
381 | +#define VIRTIO_BLK_S_ZONE_OPEN_RESOURCE 5 | ||
382 | +#define VIRTIO_BLK_S_ZONE_ACTIVE_RESOURCE 6 | ||
383 | + | ||
384 | #endif /* _LINUX_VIRTIO_BLK_H */ | ||
385 | diff --git a/linux-headers/asm-arm64/kvm.h b/linux-headers/asm-arm64/kvm.h | ||
386 | index XXXXXXX..XXXXXXX 100644 | ||
387 | --- a/linux-headers/asm-arm64/kvm.h | ||
388 | +++ b/linux-headers/asm-arm64/kvm.h | ||
389 | @@ -XXX,XX +XXX,XX @@ struct kvm_regs { | ||
390 | #define KVM_ARM_VCPU_SVE 4 /* enable SVE for this CPU */ | ||
391 | #define KVM_ARM_VCPU_PTRAUTH_ADDRESS 5 /* VCPU uses address authentication */ | ||
392 | #define KVM_ARM_VCPU_PTRAUTH_GENERIC 6 /* VCPU uses generic authentication */ | ||
393 | +#define KVM_ARM_VCPU_HAS_EL2 7 /* Support nested virtualization */ | ||
394 | |||
395 | struct kvm_vcpu_init { | ||
396 | __u32 target; | ||
397 | diff --git a/linux-headers/asm-x86/kvm.h b/linux-headers/asm-x86/kvm.h | ||
398 | index XXXXXXX..XXXXXXX 100644 | ||
399 | --- a/linux-headers/asm-x86/kvm.h | ||
400 | +++ b/linux-headers/asm-x86/kvm.h | ||
401 | @@ -XXX,XX +XXX,XX @@ | ||
402 | |||
403 | #include <linux/types.h> | ||
404 | #include <linux/ioctl.h> | ||
405 | +#include <linux/stddef.h> | ||
406 | |||
407 | #define KVM_PIO_PAGE_OFFSET 1 | ||
408 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 2 | ||
409 | @@ -XXX,XX +XXX,XX @@ struct kvm_nested_state { | ||
410 | * KVM_{GET,PUT}_NESTED_STATE ioctl values. | ||
411 | */ | ||
412 | union { | ||
413 | - struct kvm_vmx_nested_state_data vmx[0]; | ||
414 | - struct kvm_svm_nested_state_data svm[0]; | ||
415 | + __DECLARE_FLEX_ARRAY(struct kvm_vmx_nested_state_data, vmx); | ||
416 | + __DECLARE_FLEX_ARRAY(struct kvm_svm_nested_state_data, svm); | ||
417 | } data; | ||
418 | }; | ||
419 | |||
420 | @@ -XXX,XX +XXX,XX @@ struct kvm_pmu_event_filter { | ||
421 | #define KVM_PMU_EVENT_ALLOW 0 | ||
422 | #define KVM_PMU_EVENT_DENY 1 | ||
423 | |||
424 | +#define KVM_PMU_EVENT_FLAG_MASKED_EVENTS BIT(0) | ||
425 | +#define KVM_PMU_EVENT_FLAGS_VALID_MASK (KVM_PMU_EVENT_FLAG_MASKED_EVENTS) | ||
426 | + | ||
427 | +/* | ||
428 | + * Masked event layout. | ||
429 | + * Bits Description | ||
430 | + * ---- ----------- | ||
431 | + * 7:0 event select (low bits) | ||
432 | + * 15:8 umask match | ||
433 | + * 31:16 unused | ||
434 | + * 35:32 event select (high bits) | ||
435 | + * 36:54 unused | ||
436 | + * 55 exclude bit | ||
437 | + * 63:56 umask mask | ||
438 | + */ | ||
439 | + | ||
440 | +#define KVM_PMU_ENCODE_MASKED_ENTRY(event_select, mask, match, exclude) \ | ||
441 | + (((event_select) & 0xFFULL) | (((event_select) & 0XF00ULL) << 24) | \ | ||
442 | + (((mask) & 0xFFULL) << 56) | \ | ||
443 | + (((match) & 0xFFULL) << 8) | \ | ||
444 | + ((__u64)(!!(exclude)) << 55)) | ||
445 | + | ||
446 | +#define KVM_PMU_MASKED_ENTRY_EVENT_SELECT \ | ||
447 | + (GENMASK_ULL(7, 0) | GENMASK_ULL(35, 32)) | ||
448 | +#define KVM_PMU_MASKED_ENTRY_UMASK_MASK (GENMASK_ULL(63, 56)) | ||
449 | +#define KVM_PMU_MASKED_ENTRY_UMASK_MATCH (GENMASK_ULL(15, 8)) | ||
450 | +#define KVM_PMU_MASKED_ENTRY_EXCLUDE (BIT_ULL(55)) | ||
451 | +#define KVM_PMU_MASKED_ENTRY_UMASK_MASK_SHIFT (56) | ||
452 | + | ||
453 | /* for KVM_{GET,SET,HAS}_DEVICE_ATTR */ | ||
454 | #define KVM_VCPU_TSC_CTRL 0 /* control group for the timestamp counter (TSC) */ | ||
455 | #define KVM_VCPU_TSC_OFFSET 0 /* attribute for the TSC offset */ | ||
456 | diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h | ||
457 | index XXXXXXX..XXXXXXX 100644 | ||
458 | --- a/linux-headers/linux/kvm.h | ||
459 | +++ b/linux-headers/linux/kvm.h | ||
460 | @@ -XXX,XX +XXX,XX @@ struct kvm_s390_mem_op { | ||
461 | struct { | ||
462 | __u8 ar; /* the access register number */ | ||
463 | __u8 key; /* access key, ignored if flag unset */ | ||
464 | + __u8 pad1[6]; /* ignored */ | ||
465 | + __u64 old_addr; /* ignored if cmpxchg flag unset */ | ||
466 | }; | ||
467 | __u32 sida_offset; /* offset into the sida */ | ||
468 | __u8 reserved[32]; /* ignored */ | ||
469 | @@ -XXX,XX +XXX,XX @@ struct kvm_s390_mem_op { | ||
470 | #define KVM_S390_MEMOP_SIDA_WRITE 3 | ||
471 | #define KVM_S390_MEMOP_ABSOLUTE_READ 4 | ||
472 | #define KVM_S390_MEMOP_ABSOLUTE_WRITE 5 | ||
473 | +#define KVM_S390_MEMOP_ABSOLUTE_CMPXCHG 6 | ||
474 | + | ||
475 | /* flags for kvm_s390_mem_op->flags */ | ||
476 | #define KVM_S390_MEMOP_F_CHECK_ONLY (1ULL << 0) | ||
477 | #define KVM_S390_MEMOP_F_INJECT_EXCEPTION (1ULL << 1) | ||
478 | #define KVM_S390_MEMOP_F_SKEY_PROTECTION (1ULL << 2) | ||
479 | |||
480 | +/* flags specifying extension support via KVM_CAP_S390_MEM_OP_EXTENSION */ | ||
481 | +#define KVM_S390_MEMOP_EXTENSION_CAP_BASE (1 << 0) | ||
482 | +#define KVM_S390_MEMOP_EXTENSION_CAP_CMPXCHG (1 << 1) | ||
483 | + | ||
484 | /* for KVM_INTERRUPT */ | ||
485 | struct kvm_interrupt { | ||
486 | /* in */ | ||
487 | @@ -XXX,XX +XXX,XX @@ struct kvm_ppc_resize_hpt { | ||
488 | #define KVM_CAP_DIRTY_LOG_RING_ACQ_REL 223 | ||
489 | #define KVM_CAP_S390_PROTECTED_ASYNC_DISABLE 224 | ||
490 | #define KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP 225 | ||
491 | +#define KVM_CAP_PMU_EVENT_MASKED_EVENTS 226 | ||
492 | |||
493 | #ifdef KVM_CAP_IRQ_ROUTING | ||
494 | |||
495 | diff --git a/linux-headers/linux/vfio.h b/linux-headers/linux/vfio.h | ||
496 | index XXXXXXX..XXXXXXX 100644 | ||
497 | --- a/linux-headers/linux/vfio.h | ||
498 | +++ b/linux-headers/linux/vfio.h | ||
499 | @@ -XXX,XX +XXX,XX @@ | ||
500 | /* Supports VFIO_DMA_UNMAP_FLAG_ALL */ | ||
501 | #define VFIO_UNMAP_ALL 9 | ||
502 | |||
503 | -/* Supports the vaddr flag for DMA map and unmap */ | ||
504 | +/* | ||
505 | + * Supports the vaddr flag for DMA map and unmap. Not supported for mediated | ||
506 | + * devices, so this capability is subject to change as groups are added or | ||
507 | + * removed. | ||
508 | + */ | ||
509 | #define VFIO_UPDATE_VADDR 10 | ||
510 | |||
511 | /* | ||
512 | @@ -XXX,XX +XXX,XX @@ struct vfio_iommu_type1_info_dma_avail { | ||
513 | * Map process virtual addresses to IO virtual addresses using the | ||
514 | * provided struct vfio_dma_map. Caller sets argsz. READ &/ WRITE required. | ||
515 | * | ||
516 | - * If flags & VFIO_DMA_MAP_FLAG_VADDR, update the base vaddr for iova, and | ||
517 | - * unblock translation of host virtual addresses in the iova range. The vaddr | ||
518 | + * If flags & VFIO_DMA_MAP_FLAG_VADDR, update the base vaddr for iova. The vaddr | ||
519 | * must have previously been invalidated with VFIO_DMA_UNMAP_FLAG_VADDR. To | ||
520 | * maintain memory consistency within the user application, the updated vaddr | ||
521 | * must address the same memory object as originally mapped. Failure to do so | ||
522 | @@ -XXX,XX +XXX,XX @@ struct vfio_bitmap { | ||
523 | * must be 0. This cannot be combined with the get-dirty-bitmap flag. | ||
524 | * | ||
525 | * If flags & VFIO_DMA_UNMAP_FLAG_VADDR, do not unmap, but invalidate host | ||
526 | - * virtual addresses in the iova range. Tasks that attempt to translate an | ||
527 | - * iova's vaddr will block. DMA to already-mapped pages continues. This | ||
528 | - * cannot be combined with the get-dirty-bitmap flag. | ||
529 | + * virtual addresses in the iova range. DMA to already-mapped pages continues. | ||
530 | + * Groups may not be added to the container while any addresses are invalid. | ||
531 | + * This cannot be combined with the get-dirty-bitmap flag. | ||
532 | */ | ||
533 | struct vfio_iommu_type1_dma_unmap { | ||
534 | __u32 argsz; | ||
535 | diff --git a/linux-headers/linux/vhost.h b/linux-headers/linux/vhost.h | ||
536 | index XXXXXXX..XXXXXXX 100644 | ||
537 | --- a/linux-headers/linux/vhost.h | ||
538 | +++ b/linux-headers/linux/vhost.h | ||
539 | @@ -XXX,XX +XXX,XX @@ | ||
540 | */ | ||
541 | #define VHOST_VDPA_SUSPEND _IO(VHOST_VIRTIO, 0x7D) | ||
542 | |||
543 | +/* Resume a device so it can resume processing virtqueue requests | ||
544 | + * | ||
545 | + * After the return of this ioctl the device will have restored all the | ||
546 | + * necessary states and it is fully operational to continue processing the | ||
547 | + * virtqueue descriptors. | ||
548 | + */ | ||
549 | +#define VHOST_VDPA_RESUME _IO(VHOST_VIRTIO, 0x7E) | ||
550 | + | ||
551 | #endif | ||
552 | -- | ||
553 | 2.40.0 | diff view generated by jsdifflib |
1 | The return value of virtio_blk_handle_vq() is no longer used. Get rid of | 1 | From: Sam Li <faithilikerun@gmail.com> |
---|---|---|---|
2 | it. This is a step towards unifying the dataplane and non-dataplane | ||
3 | virtqueue handler functions. | ||
4 | 2 | ||
5 | Prepare virtio_blk_handle_output() to be used by both dataplane and | 3 | This patch extends virtio-blk emulation to handle zoned device commands |
6 | non-dataplane by making the condition for starting ioeventfd more | 4 | by calling the new block layer APIs to perform zoned device I/O on |
7 | specific. This way it won't trigger when dataplane has already been | 5 | behalf of the guest. It supports Report Zone, four zone oparations (open, |
8 | started. | 6 | close, finish, reset), and Append Zone. |
9 | 7 | ||
8 | The VIRTIO_BLK_F_ZONED feature bit will only be set if the host does | ||
9 | support zoned block devices. Regular block devices(conventional zones) | ||
10 | will not be set. | ||
11 | |||
12 | The guest os can use blktests, fio to test those commands on zoned devices. | ||
13 | Furthermore, using zonefs to test zone append write is also supported. | ||
14 | |||
15 | Signed-off-by: Sam Li <faithilikerun@gmail.com> | ||
10 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 16 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
11 | Reviewed-by: Stefano Garzarella <sgarzare@redhat.com> | 17 | Message-Id: <20230407082528.18841-3-faithilikerun@gmail.com> |
12 | Message-id: 20211207132336.36627-4-stefanha@redhat.com | ||
13 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
14 | --- | 18 | --- |
15 | include/hw/virtio/virtio-blk.h | 2 +- | 19 | hw/block/virtio-blk-common.c | 2 + |
16 | hw/block/virtio-blk.c | 14 +++----------- | 20 | hw/block/virtio-blk.c | 389 +++++++++++++++++++++++++++++++++++ |
17 | 2 files changed, 4 insertions(+), 12 deletions(-) | 21 | hw/virtio/virtio-qmp.c | 2 + |
22 | 3 files changed, 393 insertions(+) | ||
18 | 23 | ||
19 | diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h | 24 | diff --git a/hw/block/virtio-blk-common.c b/hw/block/virtio-blk-common.c |
20 | index XXXXXXX..XXXXXXX 100644 | 25 | index XXXXXXX..XXXXXXX 100644 |
21 | --- a/include/hw/virtio/virtio-blk.h | 26 | --- a/hw/block/virtio-blk-common.c |
22 | +++ b/include/hw/virtio/virtio-blk.h | 27 | +++ b/hw/block/virtio-blk-common.c |
23 | @@ -XXX,XX +XXX,XX @@ typedef struct MultiReqBuffer { | 28 | @@ -XXX,XX +XXX,XX @@ static const VirtIOFeature feature_sizes[] = { |
24 | bool is_write; | 29 | .end = endof(struct virtio_blk_config, discard_sector_alignment)}, |
25 | } MultiReqBuffer; | 30 | {.flags = 1ULL << VIRTIO_BLK_F_WRITE_ZEROES, |
26 | 31 | .end = endof(struct virtio_blk_config, write_zeroes_may_unmap)}, | |
27 | -bool virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq); | 32 | + {.flags = 1ULL << VIRTIO_BLK_F_ZONED, |
28 | +void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq); | 33 | + .end = endof(struct virtio_blk_config, zoned)}, |
29 | void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh); | 34 | {} |
30 | 35 | }; | |
31 | #endif | 36 | |
32 | diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c | 37 | diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c |
33 | index XXXXXXX..XXXXXXX 100644 | 38 | index XXXXXXX..XXXXXXX 100644 |
34 | --- a/hw/block/virtio-blk.c | 39 | --- a/hw/block/virtio-blk.c |
35 | +++ b/hw/block/virtio-blk.c | 40 | +++ b/hw/block/virtio-blk.c |
41 | @@ -XXX,XX +XXX,XX @@ | ||
42 | #include "qemu/module.h" | ||
43 | #include "qemu/error-report.h" | ||
44 | #include "qemu/main-loop.h" | ||
45 | +#include "block/block_int.h" | ||
46 | #include "trace.h" | ||
47 | #include "hw/block/block.h" | ||
48 | #include "hw/qdev-properties.h" | ||
49 | @@ -XXX,XX +XXX,XX @@ err: | ||
50 | return err_status; | ||
51 | } | ||
52 | |||
53 | +typedef struct ZoneCmdData { | ||
54 | + VirtIOBlockReq *req; | ||
55 | + struct iovec *in_iov; | ||
56 | + unsigned in_num; | ||
57 | + union { | ||
58 | + struct { | ||
59 | + unsigned int nr_zones; | ||
60 | + BlockZoneDescriptor *zones; | ||
61 | + } zone_report_data; | ||
62 | + struct { | ||
63 | + int64_t offset; | ||
64 | + } zone_append_data; | ||
65 | + }; | ||
66 | +} ZoneCmdData; | ||
67 | + | ||
68 | +/* | ||
69 | + * check zoned_request: error checking before issuing requests. If all checks | ||
70 | + * passed, return true. | ||
71 | + * append: true if only zone append requests issued. | ||
72 | + */ | ||
73 | +static bool check_zoned_request(VirtIOBlock *s, int64_t offset, int64_t len, | ||
74 | + bool append, uint8_t *status) { | ||
75 | + BlockDriverState *bs = blk_bs(s->blk); | ||
76 | + int index; | ||
77 | + | ||
78 | + if (!virtio_has_feature(s->host_features, VIRTIO_BLK_F_ZONED)) { | ||
79 | + *status = VIRTIO_BLK_S_UNSUPP; | ||
80 | + return false; | ||
81 | + } | ||
82 | + | ||
83 | + if (offset < 0 || len < 0 || len > (bs->total_sectors << BDRV_SECTOR_BITS) | ||
84 | + || offset > (bs->total_sectors << BDRV_SECTOR_BITS) - len) { | ||
85 | + *status = VIRTIO_BLK_S_ZONE_INVALID_CMD; | ||
86 | + return false; | ||
87 | + } | ||
88 | + | ||
89 | + if (append) { | ||
90 | + if (bs->bl.write_granularity) { | ||
91 | + if ((offset % bs->bl.write_granularity) != 0) { | ||
92 | + *status = VIRTIO_BLK_S_ZONE_UNALIGNED_WP; | ||
93 | + return false; | ||
94 | + } | ||
95 | + } | ||
96 | + | ||
97 | + index = offset / bs->bl.zone_size; | ||
98 | + if (BDRV_ZT_IS_CONV(bs->wps->wp[index])) { | ||
99 | + *status = VIRTIO_BLK_S_ZONE_INVALID_CMD; | ||
100 | + return false; | ||
101 | + } | ||
102 | + | ||
103 | + if (len / 512 > bs->bl.max_append_sectors) { | ||
104 | + if (bs->bl.max_append_sectors == 0) { | ||
105 | + *status = VIRTIO_BLK_S_UNSUPP; | ||
106 | + } else { | ||
107 | + *status = VIRTIO_BLK_S_ZONE_INVALID_CMD; | ||
108 | + } | ||
109 | + return false; | ||
110 | + } | ||
111 | + } | ||
112 | + return true; | ||
113 | +} | ||
114 | + | ||
115 | +static void virtio_blk_zone_report_complete(void *opaque, int ret) | ||
116 | +{ | ||
117 | + ZoneCmdData *data = opaque; | ||
118 | + VirtIOBlockReq *req = data->req; | ||
119 | + VirtIOBlock *s = req->dev; | ||
120 | + VirtIODevice *vdev = VIRTIO_DEVICE(req->dev); | ||
121 | + struct iovec *in_iov = data->in_iov; | ||
122 | + unsigned in_num = data->in_num; | ||
123 | + int64_t zrp_size, n, j = 0; | ||
124 | + int64_t nz = data->zone_report_data.nr_zones; | ||
125 | + int8_t err_status = VIRTIO_BLK_S_OK; | ||
126 | + | ||
127 | + if (ret) { | ||
128 | + err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD; | ||
129 | + goto out; | ||
130 | + } | ||
131 | + | ||
132 | + struct virtio_blk_zone_report zrp_hdr = (struct virtio_blk_zone_report) { | ||
133 | + .nr_zones = cpu_to_le64(nz), | ||
134 | + }; | ||
135 | + zrp_size = sizeof(struct virtio_blk_zone_report) | ||
136 | + + sizeof(struct virtio_blk_zone_descriptor) * nz; | ||
137 | + n = iov_from_buf(in_iov, in_num, 0, &zrp_hdr, sizeof(zrp_hdr)); | ||
138 | + if (n != sizeof(zrp_hdr)) { | ||
139 | + virtio_error(vdev, "Driver provided input buffer that is too small!"); | ||
140 | + err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD; | ||
141 | + goto out; | ||
142 | + } | ||
143 | + | ||
144 | + for (size_t i = sizeof(zrp_hdr); i < zrp_size; | ||
145 | + i += sizeof(struct virtio_blk_zone_descriptor), ++j) { | ||
146 | + struct virtio_blk_zone_descriptor desc = | ||
147 | + (struct virtio_blk_zone_descriptor) { | ||
148 | + .z_start = cpu_to_le64(data->zone_report_data.zones[j].start | ||
149 | + >> BDRV_SECTOR_BITS), | ||
150 | + .z_cap = cpu_to_le64(data->zone_report_data.zones[j].cap | ||
151 | + >> BDRV_SECTOR_BITS), | ||
152 | + .z_wp = cpu_to_le64(data->zone_report_data.zones[j].wp | ||
153 | + >> BDRV_SECTOR_BITS), | ||
154 | + }; | ||
155 | + | ||
156 | + switch (data->zone_report_data.zones[j].type) { | ||
157 | + case BLK_ZT_CONV: | ||
158 | + desc.z_type = VIRTIO_BLK_ZT_CONV; | ||
159 | + break; | ||
160 | + case BLK_ZT_SWR: | ||
161 | + desc.z_type = VIRTIO_BLK_ZT_SWR; | ||
162 | + break; | ||
163 | + case BLK_ZT_SWP: | ||
164 | + desc.z_type = VIRTIO_BLK_ZT_SWP; | ||
165 | + break; | ||
166 | + default: | ||
167 | + g_assert_not_reached(); | ||
168 | + } | ||
169 | + | ||
170 | + switch (data->zone_report_data.zones[j].state) { | ||
171 | + case BLK_ZS_RDONLY: | ||
172 | + desc.z_state = VIRTIO_BLK_ZS_RDONLY; | ||
173 | + break; | ||
174 | + case BLK_ZS_OFFLINE: | ||
175 | + desc.z_state = VIRTIO_BLK_ZS_OFFLINE; | ||
176 | + break; | ||
177 | + case BLK_ZS_EMPTY: | ||
178 | + desc.z_state = VIRTIO_BLK_ZS_EMPTY; | ||
179 | + break; | ||
180 | + case BLK_ZS_CLOSED: | ||
181 | + desc.z_state = VIRTIO_BLK_ZS_CLOSED; | ||
182 | + break; | ||
183 | + case BLK_ZS_FULL: | ||
184 | + desc.z_state = VIRTIO_BLK_ZS_FULL; | ||
185 | + break; | ||
186 | + case BLK_ZS_EOPEN: | ||
187 | + desc.z_state = VIRTIO_BLK_ZS_EOPEN; | ||
188 | + break; | ||
189 | + case BLK_ZS_IOPEN: | ||
190 | + desc.z_state = VIRTIO_BLK_ZS_IOPEN; | ||
191 | + break; | ||
192 | + case BLK_ZS_NOT_WP: | ||
193 | + desc.z_state = VIRTIO_BLK_ZS_NOT_WP; | ||
194 | + break; | ||
195 | + default: | ||
196 | + g_assert_not_reached(); | ||
197 | + } | ||
198 | + | ||
199 | + /* TODO: it takes O(n^2) time complexity. Optimizations required. */ | ||
200 | + n = iov_from_buf(in_iov, in_num, i, &desc, sizeof(desc)); | ||
201 | + if (n != sizeof(desc)) { | ||
202 | + virtio_error(vdev, "Driver provided input buffer " | ||
203 | + "for descriptors that is too small!"); | ||
204 | + err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD; | ||
205 | + } | ||
206 | + } | ||
207 | + | ||
208 | +out: | ||
209 | + aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); | ||
210 | + virtio_blk_req_complete(req, err_status); | ||
211 | + virtio_blk_free_request(req); | ||
212 | + aio_context_release(blk_get_aio_context(s->conf.conf.blk)); | ||
213 | + g_free(data->zone_report_data.zones); | ||
214 | + g_free(data); | ||
215 | +} | ||
216 | + | ||
217 | +static void virtio_blk_handle_zone_report(VirtIOBlockReq *req, | ||
218 | + struct iovec *in_iov, | ||
219 | + unsigned in_num) | ||
220 | +{ | ||
221 | + VirtIOBlock *s = req->dev; | ||
222 | + VirtIODevice *vdev = VIRTIO_DEVICE(s); | ||
223 | + unsigned int nr_zones; | ||
224 | + ZoneCmdData *data; | ||
225 | + int64_t zone_size, offset; | ||
226 | + uint8_t err_status; | ||
227 | + | ||
228 | + if (req->in_len < sizeof(struct virtio_blk_inhdr) + | ||
229 | + sizeof(struct virtio_blk_zone_report) + | ||
230 | + sizeof(struct virtio_blk_zone_descriptor)) { | ||
231 | + virtio_error(vdev, "in buffer too small for zone report"); | ||
232 | + return; | ||
233 | + } | ||
234 | + | ||
235 | + /* start byte offset of the zone report */ | ||
236 | + offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS; | ||
237 | + if (!check_zoned_request(s, offset, 0, false, &err_status)) { | ||
238 | + goto out; | ||
239 | + } | ||
240 | + nr_zones = (req->in_len - sizeof(struct virtio_blk_inhdr) - | ||
241 | + sizeof(struct virtio_blk_zone_report)) / | ||
242 | + sizeof(struct virtio_blk_zone_descriptor); | ||
243 | + | ||
244 | + zone_size = sizeof(BlockZoneDescriptor) * nr_zones; | ||
245 | + data = g_malloc(sizeof(ZoneCmdData)); | ||
246 | + data->req = req; | ||
247 | + data->in_iov = in_iov; | ||
248 | + data->in_num = in_num; | ||
249 | + data->zone_report_data.nr_zones = nr_zones; | ||
250 | + data->zone_report_data.zones = g_malloc(zone_size), | ||
251 | + | ||
252 | + blk_aio_zone_report(s->blk, offset, &data->zone_report_data.nr_zones, | ||
253 | + data->zone_report_data.zones, | ||
254 | + virtio_blk_zone_report_complete, data); | ||
255 | + return; | ||
256 | +out: | ||
257 | + virtio_blk_req_complete(req, err_status); | ||
258 | + virtio_blk_free_request(req); | ||
259 | +} | ||
260 | + | ||
261 | +static void virtio_blk_zone_mgmt_complete(void *opaque, int ret) | ||
262 | +{ | ||
263 | + VirtIOBlockReq *req = opaque; | ||
264 | + VirtIOBlock *s = req->dev; | ||
265 | + int8_t err_status = VIRTIO_BLK_S_OK; | ||
266 | + | ||
267 | + if (ret) { | ||
268 | + err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD; | ||
269 | + } | ||
270 | + | ||
271 | + aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); | ||
272 | + virtio_blk_req_complete(req, err_status); | ||
273 | + virtio_blk_free_request(req); | ||
274 | + aio_context_release(blk_get_aio_context(s->conf.conf.blk)); | ||
275 | +} | ||
276 | + | ||
277 | +static int virtio_blk_handle_zone_mgmt(VirtIOBlockReq *req, BlockZoneOp op) | ||
278 | +{ | ||
279 | + VirtIOBlock *s = req->dev; | ||
280 | + VirtIODevice *vdev = VIRTIO_DEVICE(s); | ||
281 | + BlockDriverState *bs = blk_bs(s->blk); | ||
282 | + int64_t offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS; | ||
283 | + uint64_t len; | ||
284 | + uint64_t capacity = bs->total_sectors << BDRV_SECTOR_BITS; | ||
285 | + uint8_t err_status = VIRTIO_BLK_S_OK; | ||
286 | + | ||
287 | + uint32_t type = virtio_ldl_p(vdev, &req->out.type); | ||
288 | + if (type == VIRTIO_BLK_T_ZONE_RESET_ALL) { | ||
289 | + /* Entire drive capacity */ | ||
290 | + offset = 0; | ||
291 | + len = capacity; | ||
292 | + } else { | ||
293 | + if (bs->bl.zone_size > capacity - offset) { | ||
294 | + /* The zoned device allows the last smaller zone. */ | ||
295 | + len = capacity - bs->bl.zone_size * (bs->bl.nr_zones - 1); | ||
296 | + } else { | ||
297 | + len = bs->bl.zone_size; | ||
298 | + } | ||
299 | + } | ||
300 | + | ||
301 | + if (!check_zoned_request(s, offset, len, false, &err_status)) { | ||
302 | + goto out; | ||
303 | + } | ||
304 | + | ||
305 | + blk_aio_zone_mgmt(s->blk, op, offset, len, | ||
306 | + virtio_blk_zone_mgmt_complete, req); | ||
307 | + | ||
308 | + return 0; | ||
309 | +out: | ||
310 | + virtio_blk_req_complete(req, err_status); | ||
311 | + virtio_blk_free_request(req); | ||
312 | + return err_status; | ||
313 | +} | ||
314 | + | ||
315 | +static void virtio_blk_zone_append_complete(void *opaque, int ret) | ||
316 | +{ | ||
317 | + ZoneCmdData *data = opaque; | ||
318 | + VirtIOBlockReq *req = data->req; | ||
319 | + VirtIOBlock *s = req->dev; | ||
320 | + VirtIODevice *vdev = VIRTIO_DEVICE(req->dev); | ||
321 | + int64_t append_sector, n; | ||
322 | + uint8_t err_status = VIRTIO_BLK_S_OK; | ||
323 | + | ||
324 | + if (ret) { | ||
325 | + err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD; | ||
326 | + goto out; | ||
327 | + } | ||
328 | + | ||
329 | + virtio_stq_p(vdev, &append_sector, | ||
330 | + data->zone_append_data.offset >> BDRV_SECTOR_BITS); | ||
331 | + n = iov_from_buf(data->in_iov, data->in_num, 0, &append_sector, | ||
332 | + sizeof(append_sector)); | ||
333 | + if (n != sizeof(append_sector)) { | ||
334 | + virtio_error(vdev, "Driver provided input buffer less than size of " | ||
335 | + "append_sector"); | ||
336 | + err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD; | ||
337 | + goto out; | ||
338 | + } | ||
339 | + | ||
340 | +out: | ||
341 | + aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); | ||
342 | + virtio_blk_req_complete(req, err_status); | ||
343 | + virtio_blk_free_request(req); | ||
344 | + aio_context_release(blk_get_aio_context(s->conf.conf.blk)); | ||
345 | + g_free(data); | ||
346 | +} | ||
347 | + | ||
348 | +static int virtio_blk_handle_zone_append(VirtIOBlockReq *req, | ||
349 | + struct iovec *out_iov, | ||
350 | + struct iovec *in_iov, | ||
351 | + uint64_t out_num, | ||
352 | + unsigned in_num) { | ||
353 | + VirtIOBlock *s = req->dev; | ||
354 | + VirtIODevice *vdev = VIRTIO_DEVICE(s); | ||
355 | + uint8_t err_status = VIRTIO_BLK_S_OK; | ||
356 | + | ||
357 | + int64_t offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS; | ||
358 | + int64_t len = iov_size(out_iov, out_num); | ||
359 | + | ||
360 | + if (!check_zoned_request(s, offset, len, true, &err_status)) { | ||
361 | + goto out; | ||
362 | + } | ||
363 | + | ||
364 | + ZoneCmdData *data = g_malloc(sizeof(ZoneCmdData)); | ||
365 | + data->req = req; | ||
366 | + data->in_iov = in_iov; | ||
367 | + data->in_num = in_num; | ||
368 | + data->zone_append_data.offset = offset; | ||
369 | + qemu_iovec_init_external(&req->qiov, out_iov, out_num); | ||
370 | + blk_aio_zone_append(s->blk, &data->zone_append_data.offset, &req->qiov, 0, | ||
371 | + virtio_blk_zone_append_complete, data); | ||
372 | + return 0; | ||
373 | + | ||
374 | +out: | ||
375 | + aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); | ||
376 | + virtio_blk_req_complete(req, err_status); | ||
377 | + virtio_blk_free_request(req); | ||
378 | + aio_context_release(blk_get_aio_context(s->conf.conf.blk)); | ||
379 | + return err_status; | ||
380 | +} | ||
381 | + | ||
382 | static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) | ||
383 | { | ||
384 | uint32_t type; | ||
36 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) | 385 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) |
37 | return 0; | 386 | case VIRTIO_BLK_T_FLUSH: |
387 | virtio_blk_handle_flush(req, mrb); | ||
388 | break; | ||
389 | + case VIRTIO_BLK_T_ZONE_REPORT: | ||
390 | + virtio_blk_handle_zone_report(req, in_iov, in_num); | ||
391 | + break; | ||
392 | + case VIRTIO_BLK_T_ZONE_OPEN: | ||
393 | + virtio_blk_handle_zone_mgmt(req, BLK_ZO_OPEN); | ||
394 | + break; | ||
395 | + case VIRTIO_BLK_T_ZONE_CLOSE: | ||
396 | + virtio_blk_handle_zone_mgmt(req, BLK_ZO_CLOSE); | ||
397 | + break; | ||
398 | + case VIRTIO_BLK_T_ZONE_FINISH: | ||
399 | + virtio_blk_handle_zone_mgmt(req, BLK_ZO_FINISH); | ||
400 | + break; | ||
401 | + case VIRTIO_BLK_T_ZONE_RESET: | ||
402 | + virtio_blk_handle_zone_mgmt(req, BLK_ZO_RESET); | ||
403 | + break; | ||
404 | + case VIRTIO_BLK_T_ZONE_RESET_ALL: | ||
405 | + virtio_blk_handle_zone_mgmt(req, BLK_ZO_RESET); | ||
406 | + break; | ||
407 | case VIRTIO_BLK_T_SCSI_CMD: | ||
408 | virtio_blk_handle_scsi(req); | ||
409 | break; | ||
410 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) | ||
411 | virtio_blk_free_request(req); | ||
412 | break; | ||
413 | } | ||
414 | + case VIRTIO_BLK_T_ZONE_APPEND & ~VIRTIO_BLK_T_OUT: | ||
415 | + /* | ||
416 | + * Passing out_iov/out_num and in_iov/in_num is not safe | ||
417 | + * to access req->elem.out_sg directly because it may be | ||
418 | + * modified by virtio_blk_handle_request(). | ||
419 | + */ | ||
420 | + virtio_blk_handle_zone_append(req, out_iov, in_iov, out_num, in_num); | ||
421 | + break; | ||
422 | /* | ||
423 | * VIRTIO_BLK_T_DISCARD and VIRTIO_BLK_T_WRITE_ZEROES are defined with | ||
424 | * VIRTIO_BLK_T_OUT flag set. We masked this flag in the switch statement, | ||
425 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config) | ||
426 | { | ||
427 | VirtIOBlock *s = VIRTIO_BLK(vdev); | ||
428 | BlockConf *conf = &s->conf.conf; | ||
429 | + BlockDriverState *bs = blk_bs(s->blk); | ||
430 | struct virtio_blk_config blkcfg; | ||
431 | uint64_t capacity; | ||
432 | int64_t length; | ||
433 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config) | ||
434 | blkcfg.write_zeroes_may_unmap = 1; | ||
435 | virtio_stl_p(vdev, &blkcfg.max_write_zeroes_seg, 1); | ||
436 | } | ||
437 | + if (bs->bl.zoned != BLK_Z_NONE) { | ||
438 | + switch (bs->bl.zoned) { | ||
439 | + case BLK_Z_HM: | ||
440 | + blkcfg.zoned.model = VIRTIO_BLK_Z_HM; | ||
441 | + break; | ||
442 | + case BLK_Z_HA: | ||
443 | + blkcfg.zoned.model = VIRTIO_BLK_Z_HA; | ||
444 | + break; | ||
445 | + default: | ||
446 | + g_assert_not_reached(); | ||
447 | + } | ||
448 | + | ||
449 | + virtio_stl_p(vdev, &blkcfg.zoned.zone_sectors, | ||
450 | + bs->bl.zone_size / 512); | ||
451 | + virtio_stl_p(vdev, &blkcfg.zoned.max_active_zones, | ||
452 | + bs->bl.max_active_zones); | ||
453 | + virtio_stl_p(vdev, &blkcfg.zoned.max_open_zones, | ||
454 | + bs->bl.max_open_zones); | ||
455 | + virtio_stl_p(vdev, &blkcfg.zoned.write_granularity, blk_size); | ||
456 | + virtio_stl_p(vdev, &blkcfg.zoned.max_append_sectors, | ||
457 | + bs->bl.max_append_sectors); | ||
458 | + } else { | ||
459 | + blkcfg.zoned.model = VIRTIO_BLK_Z_NONE; | ||
460 | + } | ||
461 | memcpy(config, &blkcfg, s->config_size); | ||
38 | } | 462 | } |
39 | 463 | ||
40 | -bool virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq) | 464 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp) |
41 | +void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq) | 465 | VirtIODevice *vdev = VIRTIO_DEVICE(dev); |
42 | { | 466 | VirtIOBlock *s = VIRTIO_BLK(dev); |
43 | VirtIOBlockReq *req; | 467 | VirtIOBlkConf *conf = &s->conf; |
44 | MultiReqBuffer mrb = {}; | 468 | + BlockDriverState *bs = blk_bs(conf->conf.blk); |
45 | bool suppress_notifications = virtio_queue_get_notification(vq); | 469 | Error *err = NULL; |
46 | - bool progress = false; | 470 | unsigned i; |
47 | 471 | ||
48 | aio_context_acquire(blk_get_aio_context(s->blk)); | 472 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp) |
49 | blk_io_plug(s->blk); | 473 | return; |
50 | @@ -XXX,XX +XXX,XX @@ bool virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq) | ||
51 | } | ||
52 | |||
53 | while ((req = virtio_blk_get_request(s, vq))) { | ||
54 | - progress = true; | ||
55 | if (virtio_blk_handle_request(req, &mrb)) { | ||
56 | virtqueue_detach_element(req->vq, &req->elem, 0); | ||
57 | virtio_blk_free_request(req); | ||
58 | @@ -XXX,XX +XXX,XX @@ bool virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq) | ||
59 | |||
60 | blk_io_unplug(s->blk); | ||
61 | aio_context_release(blk_get_aio_context(s->blk)); | ||
62 | - return progress; | ||
63 | -} | ||
64 | - | ||
65 | -static void virtio_blk_handle_output_do(VirtIOBlock *s, VirtQueue *vq) | ||
66 | -{ | ||
67 | - virtio_blk_handle_vq(s, vq); | ||
68 | } | ||
69 | |||
70 | static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq) | ||
71 | { | ||
72 | VirtIOBlock *s = (VirtIOBlock *)vdev; | ||
73 | |||
74 | - if (s->dataplane) { | ||
75 | + if (s->dataplane && !s->dataplane_started) { | ||
76 | /* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start | ||
77 | * dataplane here instead of waiting for .set_status(). | ||
78 | */ | ||
79 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq) | ||
80 | return; | ||
81 | } | ||
82 | } | 474 | } |
83 | - virtio_blk_handle_output_do(s, vq); | 475 | |
84 | + virtio_blk_handle_vq(s, vq); | 476 | + if (bs->bl.zoned != BLK_Z_NONE) { |
85 | } | 477 | + virtio_add_feature(&s->host_features, VIRTIO_BLK_F_ZONED); |
86 | 478 | + if (bs->bl.zoned == BLK_Z_HM) { | |
87 | void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh) | 479 | + virtio_clear_feature(&s->host_features, VIRTIO_BLK_F_DISCARD); |
480 | + } | ||
481 | + } | ||
482 | + | ||
483 | if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_DISCARD) && | ||
484 | (!conf->max_discard_sectors || | ||
485 | conf->max_discard_sectors > BDRV_REQUEST_MAX_SECTORS)) { | ||
486 | diff --git a/hw/virtio/virtio-qmp.c b/hw/virtio/virtio-qmp.c | ||
487 | index XXXXXXX..XXXXXXX 100644 | ||
488 | --- a/hw/virtio/virtio-qmp.c | ||
489 | +++ b/hw/virtio/virtio-qmp.c | ||
490 | @@ -XXX,XX +XXX,XX @@ static const qmp_virtio_feature_map_t virtio_blk_feature_map[] = { | ||
491 | "VIRTIO_BLK_F_DISCARD: Discard command supported"), | ||
492 | FEATURE_ENTRY(VIRTIO_BLK_F_WRITE_ZEROES, \ | ||
493 | "VIRTIO_BLK_F_WRITE_ZEROES: Write zeroes command supported"), | ||
494 | + FEATURE_ENTRY(VIRTIO_BLK_F_ZONED, \ | ||
495 | + "VIRTIO_BLK_F_ZONED: Zoned block devices"), | ||
496 | #ifndef VIRTIO_BLK_NO_LEGACY | ||
497 | FEATURE_ENTRY(VIRTIO_BLK_F_BARRIER, \ | ||
498 | "VIRTIO_BLK_F_BARRIER: Request barriers supported"), | ||
88 | -- | 499 | -- |
89 | 2.34.1 | 500 | 2.40.0 |
90 | |||
91 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Sam Li <faithilikerun@gmail.com> | ||
1 | 2 | ||
3 | Taking account of the new zone append write operation for zoned devices, | ||
4 | BLOCK_ACCT_ZONE_APPEND enum is introduced as other I/O request type (read, | ||
5 | write, flush). | ||
6 | |||
7 | Signed-off-by: Sam Li <faithilikerun@gmail.com> | ||
8 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
9 | Message-Id: <20230407082528.18841-4-faithilikerun@gmail.com> | ||
10 | --- | ||
11 | qapi/block-core.json | 68 ++++++++++++++++++++++++++++++++------ | ||
12 | qapi/block.json | 4 +++ | ||
13 | include/block/accounting.h | 1 + | ||
14 | block/qapi-sysemu.c | 11 ++++++ | ||
15 | block/qapi.c | 18 ++++++++++ | ||
16 | hw/block/virtio-blk.c | 4 +++ | ||
17 | 6 files changed, 95 insertions(+), 11 deletions(-) | ||
18 | |||
19 | diff --git a/qapi/block-core.json b/qapi/block-core.json | ||
20 | index XXXXXXX..XXXXXXX 100644 | ||
21 | --- a/qapi/block-core.json | ||
22 | +++ b/qapi/block-core.json | ||
23 | @@ -XXX,XX +XXX,XX @@ | ||
24 | # @min_wr_latency_ns: Minimum latency of write operations in the | ||
25 | # defined interval, in nanoseconds. | ||
26 | # | ||
27 | +# @min_zone_append_latency_ns: Minimum latency of zone append operations | ||
28 | +# in the defined interval, in nanoseconds | ||
29 | +# (since 8.1) | ||
30 | +# | ||
31 | # @min_flush_latency_ns: Minimum latency of flush operations in the | ||
32 | # defined interval, in nanoseconds. | ||
33 | # | ||
34 | @@ -XXX,XX +XXX,XX @@ | ||
35 | # @max_wr_latency_ns: Maximum latency of write operations in the | ||
36 | # defined interval, in nanoseconds. | ||
37 | # | ||
38 | +# @max_zone_append_latency_ns: Maximum latency of zone append operations | ||
39 | +# in the defined interval, in nanoseconds | ||
40 | +# (since 8.1) | ||
41 | +# | ||
42 | # @max_flush_latency_ns: Maximum latency of flush operations in the | ||
43 | # defined interval, in nanoseconds. | ||
44 | # | ||
45 | @@ -XXX,XX +XXX,XX @@ | ||
46 | # @avg_wr_latency_ns: Average latency of write operations in the | ||
47 | # defined interval, in nanoseconds. | ||
48 | # | ||
49 | +# @avg_zone_append_latency_ns: Average latency of zone append operations | ||
50 | +# in the defined interval, in nanoseconds | ||
51 | +# (since 8.1) | ||
52 | +# | ||
53 | # @avg_flush_latency_ns: Average latency of flush operations in the | ||
54 | # defined interval, in nanoseconds. | ||
55 | # | ||
56 | @@ -XXX,XX +XXX,XX @@ | ||
57 | # @avg_wr_queue_depth: Average number of pending write operations | ||
58 | # in the defined interval. | ||
59 | # | ||
60 | +# @avg_zone_append_queue_depth: Average number of pending zone append | ||
61 | +# operations in the defined interval | ||
62 | +# (since 8.1). | ||
63 | +# | ||
64 | # Since: 2.5 | ||
65 | ## | ||
66 | { 'struct': 'BlockDeviceTimedStats', | ||
67 | 'data': { 'interval_length': 'int', 'min_rd_latency_ns': 'int', | ||
68 | 'max_rd_latency_ns': 'int', 'avg_rd_latency_ns': 'int', | ||
69 | 'min_wr_latency_ns': 'int', 'max_wr_latency_ns': 'int', | ||
70 | - 'avg_wr_latency_ns': 'int', 'min_flush_latency_ns': 'int', | ||
71 | - 'max_flush_latency_ns': 'int', 'avg_flush_latency_ns': 'int', | ||
72 | - 'avg_rd_queue_depth': 'number', 'avg_wr_queue_depth': 'number' } } | ||
73 | + 'avg_wr_latency_ns': 'int', 'min_zone_append_latency_ns': 'int', | ||
74 | + 'max_zone_append_latency_ns': 'int', | ||
75 | + 'avg_zone_append_latency_ns': 'int', | ||
76 | + 'min_flush_latency_ns': 'int', 'max_flush_latency_ns': 'int', | ||
77 | + 'avg_flush_latency_ns': 'int', 'avg_rd_queue_depth': 'number', | ||
78 | + 'avg_wr_queue_depth': 'number', | ||
79 | + 'avg_zone_append_queue_depth': 'number' } } | ||
80 | |||
81 | ## | ||
82 | # @BlockDeviceStats: | ||
83 | @@ -XXX,XX +XXX,XX @@ | ||
84 | # | ||
85 | # @wr_bytes: The number of bytes written by the device. | ||
86 | # | ||
87 | +# @zone_append_bytes: The number of bytes appended by the zoned devices | ||
88 | +# (since 8.1) | ||
89 | +# | ||
90 | # @unmap_bytes: The number of bytes unmapped by the device (Since 4.2) | ||
91 | # | ||
92 | # @rd_operations: The number of read operations performed by the device. | ||
93 | # | ||
94 | # @wr_operations: The number of write operations performed by the device. | ||
95 | # | ||
96 | +# @zone_append_operations: The number of zone append operations performed | ||
97 | +# by the zoned devices (since 8.1) | ||
98 | +# | ||
99 | # @flush_operations: The number of cache flush operations performed by the | ||
100 | # device (since 0.15) | ||
101 | # | ||
102 | @@ -XXX,XX +XXX,XX @@ | ||
103 | # | ||
104 | # @wr_total_time_ns: Total time spent on writes in nanoseconds (since 0.15). | ||
105 | # | ||
106 | +# @zone_append_total_time_ns: Total time spent on zone append writes | ||
107 | +# in nanoseconds (since 8.1) | ||
108 | +# | ||
109 | # @flush_total_time_ns: Total time spent on cache flushes in nanoseconds | ||
110 | # (since 0.15). | ||
111 | # | ||
112 | @@ -XXX,XX +XXX,XX @@ | ||
113 | # @wr_merged: Number of write requests that have been merged into another | ||
114 | # request (Since 2.3). | ||
115 | # | ||
116 | +# @zone_append_merged: Number of zone append requests that have been merged | ||
117 | +# into another request (since 8.1) | ||
118 | +# | ||
119 | # @unmap_merged: Number of unmap requests that have been merged into another | ||
120 | # request (Since 4.2) | ||
121 | # | ||
122 | @@ -XXX,XX +XXX,XX @@ | ||
123 | # @failed_wr_operations: The number of failed write operations | ||
124 | # performed by the device (Since 2.5) | ||
125 | # | ||
126 | +# @failed_zone_append_operations: The number of failed zone append write | ||
127 | +# operations performed by the zoned devices | ||
128 | +# (since 8.1) | ||
129 | +# | ||
130 | # @failed_flush_operations: The number of failed flush operations | ||
131 | # performed by the device (Since 2.5) | ||
132 | # | ||
133 | @@ -XXX,XX +XXX,XX @@ | ||
134 | # @invalid_wr_operations: The number of invalid write operations | ||
135 | # performed by the device (Since 2.5) | ||
136 | # | ||
137 | +# @invalid_zone_append_operations: The number of invalid zone append operations | ||
138 | +# performed by the zoned device (since 8.1) | ||
139 | +# | ||
140 | # @invalid_flush_operations: The number of invalid flush operations | ||
141 | # performed by the device (Since 2.5) | ||
142 | # | ||
143 | @@ -XXX,XX +XXX,XX @@ | ||
144 | # | ||
145 | # @wr_latency_histogram: @BlockLatencyHistogramInfo. (Since 4.0) | ||
146 | # | ||
147 | +# @zone_append_latency_histogram: @BlockLatencyHistogramInfo. (since 8.1) | ||
148 | +# | ||
149 | # @flush_latency_histogram: @BlockLatencyHistogramInfo. (Since 4.0) | ||
150 | # | ||
151 | # Since: 0.14 | ||
152 | ## | ||
153 | { 'struct': 'BlockDeviceStats', | ||
154 | - 'data': {'rd_bytes': 'int', 'wr_bytes': 'int', 'unmap_bytes' : 'int', | ||
155 | - 'rd_operations': 'int', 'wr_operations': 'int', | ||
156 | + 'data': {'rd_bytes': 'int', 'wr_bytes': 'int', 'zone_append_bytes': 'int', | ||
157 | + 'unmap_bytes' : 'int', 'rd_operations': 'int', | ||
158 | + 'wr_operations': 'int', 'zone_append_operations': 'int', | ||
159 | 'flush_operations': 'int', 'unmap_operations': 'int', | ||
160 | 'rd_total_time_ns': 'int', 'wr_total_time_ns': 'int', | ||
161 | - 'flush_total_time_ns': 'int', 'unmap_total_time_ns': 'int', | ||
162 | - 'wr_highest_offset': 'int', | ||
163 | - 'rd_merged': 'int', 'wr_merged': 'int', 'unmap_merged': 'int', | ||
164 | - '*idle_time_ns': 'int', | ||
165 | + 'zone_append_total_time_ns': 'int', 'flush_total_time_ns': 'int', | ||
166 | + 'unmap_total_time_ns': 'int', 'wr_highest_offset': 'int', | ||
167 | + 'rd_merged': 'int', 'wr_merged': 'int', 'zone_append_merged': 'int', | ||
168 | + 'unmap_merged': 'int', '*idle_time_ns': 'int', | ||
169 | 'failed_rd_operations': 'int', 'failed_wr_operations': 'int', | ||
170 | - 'failed_flush_operations': 'int', 'failed_unmap_operations': 'int', | ||
171 | - 'invalid_rd_operations': 'int', 'invalid_wr_operations': 'int', | ||
172 | + 'failed_zone_append_operations': 'int', | ||
173 | + 'failed_flush_operations': 'int', | ||
174 | + 'failed_unmap_operations': 'int', 'invalid_rd_operations': 'int', | ||
175 | + 'invalid_wr_operations': 'int', | ||
176 | + 'invalid_zone_append_operations': 'int', | ||
177 | 'invalid_flush_operations': 'int', 'invalid_unmap_operations': 'int', | ||
178 | 'account_invalid': 'bool', 'account_failed': 'bool', | ||
179 | 'timed_stats': ['BlockDeviceTimedStats'], | ||
180 | '*rd_latency_histogram': 'BlockLatencyHistogramInfo', | ||
181 | '*wr_latency_histogram': 'BlockLatencyHistogramInfo', | ||
182 | + '*zone_append_latency_histogram': 'BlockLatencyHistogramInfo', | ||
183 | '*flush_latency_histogram': 'BlockLatencyHistogramInfo' } } | ||
184 | |||
185 | ## | ||
186 | diff --git a/qapi/block.json b/qapi/block.json | ||
187 | index XXXXXXX..XXXXXXX 100644 | ||
188 | --- a/qapi/block.json | ||
189 | +++ b/qapi/block.json | ||
190 | @@ -XXX,XX +XXX,XX @@ | ||
191 | # @boundaries-write: list of interval boundary values for write latency | ||
192 | # histogram. | ||
193 | # | ||
194 | +# @boundaries-zap: list of interval boundary values for zone append write | ||
195 | +# latency histogram. | ||
196 | +# | ||
197 | # @boundaries-flush: list of interval boundary values for flush latency | ||
198 | # histogram. | ||
199 | # | ||
200 | @@ -XXX,XX +XXX,XX @@ | ||
201 | '*boundaries': ['uint64'], | ||
202 | '*boundaries-read': ['uint64'], | ||
203 | '*boundaries-write': ['uint64'], | ||
204 | + '*boundaries-zap': ['uint64'], | ||
205 | '*boundaries-flush': ['uint64'] }, | ||
206 | 'allow-preconfig': true } | ||
207 | diff --git a/include/block/accounting.h b/include/block/accounting.h | ||
208 | index XXXXXXX..XXXXXXX 100644 | ||
209 | --- a/include/block/accounting.h | ||
210 | +++ b/include/block/accounting.h | ||
211 | @@ -XXX,XX +XXX,XX @@ enum BlockAcctType { | ||
212 | BLOCK_ACCT_READ, | ||
213 | BLOCK_ACCT_WRITE, | ||
214 | BLOCK_ACCT_FLUSH, | ||
215 | + BLOCK_ACCT_ZONE_APPEND, | ||
216 | BLOCK_ACCT_UNMAP, | ||
217 | BLOCK_MAX_IOTYPE, | ||
218 | }; | ||
219 | diff --git a/block/qapi-sysemu.c b/block/qapi-sysemu.c | ||
220 | index XXXXXXX..XXXXXXX 100644 | ||
221 | --- a/block/qapi-sysemu.c | ||
222 | +++ b/block/qapi-sysemu.c | ||
223 | @@ -XXX,XX +XXX,XX @@ void qmp_block_latency_histogram_set( | ||
224 | bool has_boundaries, uint64List *boundaries, | ||
225 | bool has_boundaries_read, uint64List *boundaries_read, | ||
226 | bool has_boundaries_write, uint64List *boundaries_write, | ||
227 | + bool has_boundaries_append, uint64List *boundaries_append, | ||
228 | bool has_boundaries_flush, uint64List *boundaries_flush, | ||
229 | Error **errp) | ||
230 | { | ||
231 | @@ -XXX,XX +XXX,XX @@ void qmp_block_latency_histogram_set( | ||
232 | } | ||
233 | } | ||
234 | |||
235 | + if (has_boundaries || has_boundaries_append) { | ||
236 | + ret = block_latency_histogram_set( | ||
237 | + stats, BLOCK_ACCT_ZONE_APPEND, | ||
238 | + has_boundaries_append ? boundaries_append : boundaries); | ||
239 | + if (ret) { | ||
240 | + error_setg(errp, "Device '%s' set append write boundaries fail", id); | ||
241 | + return; | ||
242 | + } | ||
243 | + } | ||
244 | + | ||
245 | if (has_boundaries || has_boundaries_flush) { | ||
246 | ret = block_latency_histogram_set( | ||
247 | stats, BLOCK_ACCT_FLUSH, | ||
248 | diff --git a/block/qapi.c b/block/qapi.c | ||
249 | index XXXXXXX..XXXXXXX 100644 | ||
250 | --- a/block/qapi.c | ||
251 | +++ b/block/qapi.c | ||
252 | @@ -XXX,XX +XXX,XX @@ static void bdrv_query_blk_stats(BlockDeviceStats *ds, BlockBackend *blk) | ||
253 | |||
254 | ds->rd_bytes = stats->nr_bytes[BLOCK_ACCT_READ]; | ||
255 | ds->wr_bytes = stats->nr_bytes[BLOCK_ACCT_WRITE]; | ||
256 | + ds->zone_append_bytes = stats->nr_bytes[BLOCK_ACCT_ZONE_APPEND]; | ||
257 | ds->unmap_bytes = stats->nr_bytes[BLOCK_ACCT_UNMAP]; | ||
258 | ds->rd_operations = stats->nr_ops[BLOCK_ACCT_READ]; | ||
259 | ds->wr_operations = stats->nr_ops[BLOCK_ACCT_WRITE]; | ||
260 | + ds->zone_append_operations = stats->nr_ops[BLOCK_ACCT_ZONE_APPEND]; | ||
261 | ds->unmap_operations = stats->nr_ops[BLOCK_ACCT_UNMAP]; | ||
262 | |||
263 | ds->failed_rd_operations = stats->failed_ops[BLOCK_ACCT_READ]; | ||
264 | ds->failed_wr_operations = stats->failed_ops[BLOCK_ACCT_WRITE]; | ||
265 | + ds->failed_zone_append_operations = | ||
266 | + stats->failed_ops[BLOCK_ACCT_ZONE_APPEND]; | ||
267 | ds->failed_flush_operations = stats->failed_ops[BLOCK_ACCT_FLUSH]; | ||
268 | ds->failed_unmap_operations = stats->failed_ops[BLOCK_ACCT_UNMAP]; | ||
269 | |||
270 | ds->invalid_rd_operations = stats->invalid_ops[BLOCK_ACCT_READ]; | ||
271 | ds->invalid_wr_operations = stats->invalid_ops[BLOCK_ACCT_WRITE]; | ||
272 | + ds->invalid_zone_append_operations = | ||
273 | + stats->invalid_ops[BLOCK_ACCT_ZONE_APPEND]; | ||
274 | ds->invalid_flush_operations = | ||
275 | stats->invalid_ops[BLOCK_ACCT_FLUSH]; | ||
276 | ds->invalid_unmap_operations = stats->invalid_ops[BLOCK_ACCT_UNMAP]; | ||
277 | |||
278 | ds->rd_merged = stats->merged[BLOCK_ACCT_READ]; | ||
279 | ds->wr_merged = stats->merged[BLOCK_ACCT_WRITE]; | ||
280 | + ds->zone_append_merged = stats->merged[BLOCK_ACCT_ZONE_APPEND]; | ||
281 | ds->unmap_merged = stats->merged[BLOCK_ACCT_UNMAP]; | ||
282 | ds->flush_operations = stats->nr_ops[BLOCK_ACCT_FLUSH]; | ||
283 | ds->wr_total_time_ns = stats->total_time_ns[BLOCK_ACCT_WRITE]; | ||
284 | + ds->zone_append_total_time_ns = | ||
285 | + stats->total_time_ns[BLOCK_ACCT_ZONE_APPEND]; | ||
286 | ds->rd_total_time_ns = stats->total_time_ns[BLOCK_ACCT_READ]; | ||
287 | ds->flush_total_time_ns = stats->total_time_ns[BLOCK_ACCT_FLUSH]; | ||
288 | ds->unmap_total_time_ns = stats->total_time_ns[BLOCK_ACCT_UNMAP]; | ||
289 | @@ -XXX,XX +XXX,XX @@ static void bdrv_query_blk_stats(BlockDeviceStats *ds, BlockBackend *blk) | ||
290 | |||
291 | TimedAverage *rd = &ts->latency[BLOCK_ACCT_READ]; | ||
292 | TimedAverage *wr = &ts->latency[BLOCK_ACCT_WRITE]; | ||
293 | + TimedAverage *zap = &ts->latency[BLOCK_ACCT_ZONE_APPEND]; | ||
294 | TimedAverage *fl = &ts->latency[BLOCK_ACCT_FLUSH]; | ||
295 | |||
296 | dev_stats->interval_length = ts->interval_length; | ||
297 | @@ -XXX,XX +XXX,XX @@ static void bdrv_query_blk_stats(BlockDeviceStats *ds, BlockBackend *blk) | ||
298 | dev_stats->max_wr_latency_ns = timed_average_max(wr); | ||
299 | dev_stats->avg_wr_latency_ns = timed_average_avg(wr); | ||
300 | |||
301 | + dev_stats->min_zone_append_latency_ns = timed_average_min(zap); | ||
302 | + dev_stats->max_zone_append_latency_ns = timed_average_max(zap); | ||
303 | + dev_stats->avg_zone_append_latency_ns = timed_average_avg(zap); | ||
304 | + | ||
305 | dev_stats->min_flush_latency_ns = timed_average_min(fl); | ||
306 | dev_stats->max_flush_latency_ns = timed_average_max(fl); | ||
307 | dev_stats->avg_flush_latency_ns = timed_average_avg(fl); | ||
308 | @@ -XXX,XX +XXX,XX @@ static void bdrv_query_blk_stats(BlockDeviceStats *ds, BlockBackend *blk) | ||
309 | block_acct_queue_depth(ts, BLOCK_ACCT_READ); | ||
310 | dev_stats->avg_wr_queue_depth = | ||
311 | block_acct_queue_depth(ts, BLOCK_ACCT_WRITE); | ||
312 | + dev_stats->avg_zone_append_queue_depth = | ||
313 | + block_acct_queue_depth(ts, BLOCK_ACCT_ZONE_APPEND); | ||
314 | |||
315 | QAPI_LIST_PREPEND(ds->timed_stats, dev_stats); | ||
316 | } | ||
317 | @@ -XXX,XX +XXX,XX @@ static void bdrv_query_blk_stats(BlockDeviceStats *ds, BlockBackend *blk) | ||
318 | = bdrv_latency_histogram_stats(&hgram[BLOCK_ACCT_READ]); | ||
319 | ds->wr_latency_histogram | ||
320 | = bdrv_latency_histogram_stats(&hgram[BLOCK_ACCT_WRITE]); | ||
321 | + ds->zone_append_latency_histogram | ||
322 | + = bdrv_latency_histogram_stats(&hgram[BLOCK_ACCT_ZONE_APPEND]); | ||
323 | ds->flush_latency_histogram | ||
324 | = bdrv_latency_histogram_stats(&hgram[BLOCK_ACCT_FLUSH]); | ||
325 | } | ||
326 | diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c | ||
327 | index XXXXXXX..XXXXXXX 100644 | ||
328 | --- a/hw/block/virtio-blk.c | ||
329 | +++ b/hw/block/virtio-blk.c | ||
330 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_zone_append(VirtIOBlockReq *req, | ||
331 | data->in_num = in_num; | ||
332 | data->zone_append_data.offset = offset; | ||
333 | qemu_iovec_init_external(&req->qiov, out_iov, out_num); | ||
334 | + | ||
335 | + block_acct_start(blk_get_stats(s->blk), &req->acct, len, | ||
336 | + BLOCK_ACCT_ZONE_APPEND); | ||
337 | + | ||
338 | blk_aio_zone_append(s->blk, &data->zone_append_data.offset, &req->qiov, 0, | ||
339 | virtio_blk_zone_append_complete, data); | ||
340 | return 0; | ||
341 | -- | ||
342 | 2.40.0 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Sam Li <faithilikerun@gmail.com> | ||
1 | 2 | ||
3 | Signed-off-by: Sam Li <faithilikerun@gmail.com> | ||
4 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
5 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
6 | Message-Id: <20230407082528.18841-5-faithilikerun@gmail.com> | ||
7 | --- | ||
8 | hw/block/virtio-blk.c | 12 ++++++++++++ | ||
9 | hw/block/trace-events | 7 +++++++ | ||
10 | 2 files changed, 19 insertions(+) | ||
11 | |||
12 | diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c | ||
13 | index XXXXXXX..XXXXXXX 100644 | ||
14 | --- a/hw/block/virtio-blk.c | ||
15 | +++ b/hw/block/virtio-blk.c | ||
16 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_zone_report_complete(void *opaque, int ret) | ||
17 | int64_t nz = data->zone_report_data.nr_zones; | ||
18 | int8_t err_status = VIRTIO_BLK_S_OK; | ||
19 | |||
20 | + trace_virtio_blk_zone_report_complete(vdev, req, nz, ret); | ||
21 | if (ret) { | ||
22 | err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD; | ||
23 | goto out; | ||
24 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_handle_zone_report(VirtIOBlockReq *req, | ||
25 | nr_zones = (req->in_len - sizeof(struct virtio_blk_inhdr) - | ||
26 | sizeof(struct virtio_blk_zone_report)) / | ||
27 | sizeof(struct virtio_blk_zone_descriptor); | ||
28 | + trace_virtio_blk_handle_zone_report(vdev, req, | ||
29 | + offset >> BDRV_SECTOR_BITS, nr_zones); | ||
30 | |||
31 | zone_size = sizeof(BlockZoneDescriptor) * nr_zones; | ||
32 | data = g_malloc(sizeof(ZoneCmdData)); | ||
33 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_zone_mgmt_complete(void *opaque, int ret) | ||
34 | { | ||
35 | VirtIOBlockReq *req = opaque; | ||
36 | VirtIOBlock *s = req->dev; | ||
37 | + VirtIODevice *vdev = VIRTIO_DEVICE(s); | ||
38 | int8_t err_status = VIRTIO_BLK_S_OK; | ||
39 | + trace_virtio_blk_zone_mgmt_complete(vdev, req,ret); | ||
40 | |||
41 | if (ret) { | ||
42 | err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD; | ||
43 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_zone_mgmt(VirtIOBlockReq *req, BlockZoneOp op) | ||
44 | /* Entire drive capacity */ | ||
45 | offset = 0; | ||
46 | len = capacity; | ||
47 | + trace_virtio_blk_handle_zone_reset_all(vdev, req, 0, | ||
48 | + bs->total_sectors); | ||
49 | } else { | ||
50 | if (bs->bl.zone_size > capacity - offset) { | ||
51 | /* The zoned device allows the last smaller zone. */ | ||
52 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_zone_mgmt(VirtIOBlockReq *req, BlockZoneOp op) | ||
53 | } else { | ||
54 | len = bs->bl.zone_size; | ||
55 | } | ||
56 | + trace_virtio_blk_handle_zone_mgmt(vdev, req, op, | ||
57 | + offset >> BDRV_SECTOR_BITS, | ||
58 | + len >> BDRV_SECTOR_BITS); | ||
59 | } | ||
60 | |||
61 | if (!check_zoned_request(s, offset, len, false, &err_status)) { | ||
62 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_zone_append_complete(void *opaque, int ret) | ||
63 | err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD; | ||
64 | goto out; | ||
65 | } | ||
66 | + trace_virtio_blk_zone_append_complete(vdev, req, append_sector, ret); | ||
67 | |||
68 | out: | ||
69 | aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); | ||
70 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_zone_append(VirtIOBlockReq *req, | ||
71 | int64_t offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS; | ||
72 | int64_t len = iov_size(out_iov, out_num); | ||
73 | |||
74 | + trace_virtio_blk_handle_zone_append(vdev, req, offset >> BDRV_SECTOR_BITS); | ||
75 | if (!check_zoned_request(s, offset, len, true, &err_status)) { | ||
76 | goto out; | ||
77 | } | ||
78 | diff --git a/hw/block/trace-events b/hw/block/trace-events | ||
79 | index XXXXXXX..XXXXXXX 100644 | ||
80 | --- a/hw/block/trace-events | ||
81 | +++ b/hw/block/trace-events | ||
82 | @@ -XXX,XX +XXX,XX @@ pflash_write_unknown(const char *name, uint8_t cmd) "%s: unknown command 0x%02x" | ||
83 | # virtio-blk.c | ||
84 | virtio_blk_req_complete(void *vdev, void *req, int status) "vdev %p req %p status %d" | ||
85 | virtio_blk_rw_complete(void *vdev, void *req, int ret) "vdev %p req %p ret %d" | ||
86 | +virtio_blk_zone_report_complete(void *vdev, void *req, unsigned int nr_zones, int ret) "vdev %p req %p nr_zones %u ret %d" | ||
87 | +virtio_blk_zone_mgmt_complete(void *vdev, void *req, int ret) "vdev %p req %p ret %d" | ||
88 | +virtio_blk_zone_append_complete(void *vdev, void *req, int64_t sector, int ret) "vdev %p req %p, append sector 0x%" PRIx64 " ret %d" | ||
89 | virtio_blk_handle_write(void *vdev, void *req, uint64_t sector, size_t nsectors) "vdev %p req %p sector %"PRIu64" nsectors %zu" | ||
90 | virtio_blk_handle_read(void *vdev, void *req, uint64_t sector, size_t nsectors) "vdev %p req %p sector %"PRIu64" nsectors %zu" | ||
91 | virtio_blk_submit_multireq(void *vdev, void *mrb, int start, int num_reqs, uint64_t offset, size_t size, bool is_write) "vdev %p mrb %p start %d num_reqs %d offset %"PRIu64" size %zu is_write %d" | ||
92 | +virtio_blk_handle_zone_report(void *vdev, void *req, int64_t sector, unsigned int nr_zones) "vdev %p req %p sector 0x%" PRIx64 " nr_zones %u" | ||
93 | +virtio_blk_handle_zone_mgmt(void *vdev, void *req, uint8_t op, int64_t sector, int64_t len) "vdev %p req %p op 0x%x sector 0x%" PRIx64 " len 0x%" PRIx64 "" | ||
94 | +virtio_blk_handle_zone_reset_all(void *vdev, void *req, int64_t sector, int64_t len) "vdev %p req %p sector 0x%" PRIx64 " cap 0x%" PRIx64 "" | ||
95 | +virtio_blk_handle_zone_append(void *vdev, void *req, int64_t sector) "vdev %p req %p, append sector 0x%" PRIx64 "" | ||
96 | |||
97 | # hd-geometry.c | ||
98 | hd_geometry_lchs_guess(void *blk, int cyls, int heads, int secs) "blk %p LCHS %d %d %d" | ||
99 | -- | ||
100 | 2.40.0 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Sam Li <faithilikerun@gmail.com> | ||
1 | 2 | ||
3 | Add the documentation about the example of using virtio-blk driver | ||
4 | to pass the zoned block devices through to the guest. | ||
5 | |||
6 | Signed-off-by: Sam Li <faithilikerun@gmail.com> | ||
7 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
8 | [Fix rST syntax | ||
9 | --Stefan] | ||
10 | Message-Id: <20230407082528.18841-6-faithilikerun@gmail.com> | ||
11 | --- | ||
12 | docs/devel/zoned-storage.rst | 19 +++++++++++++++++++ | ||
13 | 1 file changed, 19 insertions(+) | ||
14 | |||
15 | diff --git a/docs/devel/zoned-storage.rst b/docs/devel/zoned-storage.rst | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/docs/devel/zoned-storage.rst | ||
18 | +++ b/docs/devel/zoned-storage.rst | ||
19 | @@ -XXX,XX +XXX,XX @@ APIs for zoned storage emulation or testing. | ||
20 | For example, to test zone_report on a null_blk device using qemu-io is:: | ||
21 | |||
22 | $ path/to/qemu-io --image-opts -n driver=host_device,filename=/dev/nullb0 -c "zrp offset nr_zones" | ||
23 | + | ||
24 | +To expose the host's zoned block device through virtio-blk, the command line | ||
25 | +can be (includes the -device parameter):: | ||
26 | + | ||
27 | + -blockdev node-name=drive0,driver=host_device,filename=/dev/nullb0,cache.direct=on \ | ||
28 | + -device virtio-blk-pci,drive=drive0 | ||
29 | + | ||
30 | +Or only use the -drive parameter:: | ||
31 | + | ||
32 | + -driver driver=host_device,file=/dev/nullb0,if=virtio,cache.direct=on | ||
33 | + | ||
34 | +Additionally, QEMU has several ways of supporting zoned storage, including: | ||
35 | +(1) Using virtio-scsi: --device scsi-block allows for the passing through of | ||
36 | +SCSI ZBC devices, enabling the attachment of ZBC or ZAC HDDs to QEMU. | ||
37 | +(2) PCI device pass-through: While NVMe ZNS emulation is available for testing | ||
38 | +purposes, it cannot yet pass through a zoned device from the host. To pass on | ||
39 | +the NVMe ZNS device to the guest, use VFIO PCI pass the entire NVMe PCI adapter | ||
40 | +through to the guest. Likewise, an HDD HBA can be passed on to QEMU all HDDs | ||
41 | +attached to the HBA. | ||
42 | -- | ||
43 | 2.40.0 | diff view generated by jsdifflib |