1 | The following changes since commit 03a3a62fbd0aa5227e978eef3c67d3978aec9e5f: | 1 | The following changes since commit 56f9e46b841c7be478ca038d8d4085d776ab4b0d: |
---|---|---|---|
2 | 2 | ||
3 | Merge tag 'for-upstream' of https://gitlab.com/bonzini/qemu into staging (2023-09-07 10:29:06 -0400) | 3 | Merge remote-tracking branch 'remotes/armbru/tags/pull-qapi-2017-02-20' into staging (2017-02-20 17:42:47 +0000) |
4 | 4 | ||
5 | are available in the Git repository at: | 5 | are available in the git repository at: |
6 | 6 | ||
7 | https://gitlab.com/stefanha/qemu.git tags/block-pull-request | 7 | git://github.com/stefanha/qemu.git tags/block-pull-request |
8 | 8 | ||
9 | for you to fetch changes up to 631c872614aca91eaf947c1748f0f27f99635d92: | 9 | for you to fetch changes up to a7b91d35bab97a2d3e779d0c64c9b837b52a6cf7: |
10 | 10 | ||
11 | tests/qtest: Introduce tests for UFS (2023-09-07 14:01:29 -0400) | 11 | coroutine-lock: make CoRwlock thread-safe and fair (2017-02-21 11:39:40 +0000) |
12 | 12 | ||
13 | ---------------------------------------------------------------- | 13 | ---------------------------------------------------------------- |
14 | Pull request | 14 | Pull request |
15 | 15 | ||
16 | - Jeuk Kim's emulated UFS device | 16 | v2: |
17 | - Fabiano Rosas' IOThread GSource "name" debugging aid | 17 | * Rebased to resolve scsi conflicts |
18 | 18 | ||
19 | ---------------------------------------------------------------- | 19 | ---------------------------------------------------------------- |
20 | 20 | ||
21 | Fabiano Rosas (1): | 21 | Paolo Bonzini (24): |
22 | iothread: Set the GSource "name" field | 22 | block: move AioContext, QEMUTimer, main-loop to libqemuutil |
23 | aio: introduce aio_co_schedule and aio_co_wake | ||
24 | block-backend: allow blk_prw from coroutine context | ||
25 | test-thread-pool: use generic AioContext infrastructure | ||
26 | io: add methods to set I/O handlers on AioContext | ||
27 | io: make qio_channel_yield aware of AioContexts | ||
28 | nbd: convert to use qio_channel_yield | ||
29 | coroutine-lock: reschedule coroutine on the AioContext it was running | ||
30 | on | ||
31 | blkdebug: reschedule coroutine on the AioContext it is running on | ||
32 | qed: introduce qed_aio_start_io and qed_aio_next_io_cb | ||
33 | aio: push aio_context_acquire/release down to dispatching | ||
34 | block: explicitly acquire aiocontext in timers that need it | ||
35 | block: explicitly acquire aiocontext in callbacks that need it | ||
36 | block: explicitly acquire aiocontext in bottom halves that need it | ||
37 | block: explicitly acquire aiocontext in aio callbacks that need it | ||
38 | aio-posix: partially inline aio_dispatch into aio_poll | ||
39 | async: remove unnecessary inc/dec pairs | ||
40 | block: document fields protected by AioContext lock | ||
41 | coroutine-lock: make CoMutex thread-safe | ||
42 | coroutine-lock: add limited spinning to CoMutex | ||
43 | test-aio-multithread: add performance comparison with thread-based | ||
44 | mutexes | ||
45 | coroutine-lock: place CoMutex before CoQueue in header | ||
46 | coroutine-lock: add mutex argument to CoQueue APIs | ||
47 | coroutine-lock: make CoRwlock thread-safe and fair | ||
23 | 48 | ||
24 | Jeuk Kim (4): | 49 | Makefile.objs | 4 - |
25 | hw/ufs: Initial commit for emulated Universal-Flash-Storage | 50 | stubs/Makefile.objs | 1 + |
26 | hw/ufs: Support for Query Transfer Requests | 51 | tests/Makefile.include | 19 +- |
27 | hw/ufs: Support for UFS logical unit | 52 | util/Makefile.objs | 6 +- |
28 | tests/qtest: Introduce tests for UFS | 53 | block/nbd-client.h | 2 +- |
29 | 54 | block/qed.h | 3 + | |
30 | MAINTAINERS | 7 + | 55 | include/block/aio.h | 38 ++- |
31 | docs/specs/pci-ids.rst | 2 + | 56 | include/block/block_int.h | 64 +++-- |
32 | meson.build | 1 + | 57 | include/io/channel.h | 72 +++++- |
33 | hw/ufs/trace.h | 1 + | 58 | include/qemu/coroutine.h | 84 ++++--- |
34 | hw/ufs/ufs.h | 131 ++++ | 59 | include/qemu/coroutine_int.h | 11 +- |
35 | include/block/ufs.h | 1090 +++++++++++++++++++++++++++ | 60 | include/sysemu/block-backend.h | 14 +- |
36 | include/hw/pci/pci.h | 1 + | 61 | tests/iothread.h | 25 ++ |
37 | include/hw/pci/pci_ids.h | 1 + | 62 | block/backup.c | 2 +- |
38 | include/scsi/constants.h | 1 + | 63 | block/blkdebug.c | 9 +- |
39 | hw/ufs/lu.c | 1445 ++++++++++++++++++++++++++++++++++++ | 64 | block/blkreplay.c | 2 +- |
40 | hw/ufs/ufs.c | 1502 ++++++++++++++++++++++++++++++++++++++ | 65 | block/block-backend.c | 13 +- |
41 | iothread.c | 14 +- | 66 | block/curl.c | 44 +++- |
42 | tests/qtest/ufs-test.c | 587 +++++++++++++++ | 67 | block/gluster.c | 9 +- |
43 | hw/Kconfig | 1 + | 68 | block/io.c | 42 +--- |
44 | hw/meson.build | 1 + | 69 | block/iscsi.c | 15 +- |
45 | hw/ufs/Kconfig | 4 + | 70 | block/linux-aio.c | 10 +- |
46 | hw/ufs/meson.build | 1 + | 71 | block/mirror.c | 12 +- |
47 | hw/ufs/trace-events | 58 ++ | 72 | block/nbd-client.c | 119 +++++---- |
48 | tests/qtest/meson.build | 1 + | 73 | block/nfs.c | 9 +- |
49 | 19 files changed, 4843 insertions(+), 6 deletions(-) | 74 | block/qcow2-cluster.c | 4 +- |
50 | create mode 100644 hw/ufs/trace.h | 75 | block/qed-cluster.c | 2 + |
51 | create mode 100644 hw/ufs/ufs.h | 76 | block/qed-table.c | 12 +- |
52 | create mode 100644 include/block/ufs.h | 77 | block/qed.c | 58 +++-- |
53 | create mode 100644 hw/ufs/lu.c | 78 | block/sheepdog.c | 31 +-- |
54 | create mode 100644 hw/ufs/ufs.c | 79 | block/ssh.c | 29 +-- |
55 | create mode 100644 tests/qtest/ufs-test.c | 80 | block/throttle-groups.c | 4 +- |
56 | create mode 100644 hw/ufs/Kconfig | 81 | block/win32-aio.c | 9 +- |
57 | create mode 100644 hw/ufs/meson.build | 82 | dma-helpers.c | 2 + |
58 | create mode 100644 hw/ufs/trace-events | 83 | hw/9pfs/9p.c | 2 +- |
84 | hw/block/virtio-blk.c | 19 +- | ||
85 | hw/scsi/scsi-bus.c | 2 + | ||
86 | hw/scsi/scsi-disk.c | 15 ++ | ||
87 | hw/scsi/scsi-generic.c | 20 +- | ||
88 | hw/scsi/virtio-scsi.c | 7 + | ||
89 | io/channel-command.c | 13 + | ||
90 | io/channel-file.c | 11 + | ||
91 | io/channel-socket.c | 16 +- | ||
92 | io/channel-tls.c | 12 + | ||
93 | io/channel-watch.c | 6 + | ||
94 | io/channel.c | 97 ++++++-- | ||
95 | nbd/client.c | 2 +- | ||
96 | nbd/common.c | 9 +- | ||
97 | nbd/server.c | 94 +++----- | ||
98 | stubs/linux-aio.c | 32 +++ | ||
99 | stubs/set-fd-handler.c | 11 - | ||
100 | tests/iothread.c | 91 +++++++ | ||
101 | tests/test-aio-multithread.c | 463 ++++++++++++++++++++++++++++++++++++ | ||
102 | tests/test-thread-pool.c | 12 +- | ||
103 | aio-posix.c => util/aio-posix.c | 62 ++--- | ||
104 | aio-win32.c => util/aio-win32.c | 30 +-- | ||
105 | util/aiocb.c | 55 +++++ | ||
106 | async.c => util/async.c | 84 ++++++- | ||
107 | iohandler.c => util/iohandler.c | 0 | ||
108 | main-loop.c => util/main-loop.c | 0 | ||
109 | util/qemu-coroutine-lock.c | 254 ++++++++++++++++++-- | ||
110 | util/qemu-coroutine-sleep.c | 2 +- | ||
111 | util/qemu-coroutine.c | 8 + | ||
112 | qemu-timer.c => util/qemu-timer.c | 0 | ||
113 | thread-pool.c => util/thread-pool.c | 8 +- | ||
114 | trace-events | 11 - | ||
115 | util/trace-events | 17 +- | ||
116 | 67 files changed, 1712 insertions(+), 533 deletions(-) | ||
117 | create mode 100644 tests/iothread.h | ||
118 | create mode 100644 stubs/linux-aio.c | ||
119 | create mode 100644 tests/iothread.c | ||
120 | create mode 100644 tests/test-aio-multithread.c | ||
121 | rename aio-posix.c => util/aio-posix.c (94%) | ||
122 | rename aio-win32.c => util/aio-win32.c (95%) | ||
123 | create mode 100644 util/aiocb.c | ||
124 | rename async.c => util/async.c (82%) | ||
125 | rename iohandler.c => util/iohandler.c (100%) | ||
126 | rename main-loop.c => util/main-loop.c (100%) | ||
127 | rename qemu-timer.c => util/qemu-timer.c (100%) | ||
128 | rename thread-pool.c => util/thread-pool.c (97%) | ||
59 | 129 | ||
60 | -- | 130 | -- |
61 | 2.41.0 | 131 | 2.9.3 |
132 | |||
133 | diff view generated by jsdifflib |
1 | From: Jeuk Kim <jeuk20.kim@samsung.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | This patch includes the following tests | 3 | AioContext is fairly self contained, the only dependency is QEMUTimer but |
4 | Test mmio read | 4 | that in turn doesn't need anything else. So move them out of block-obj-y |
5 | Test ufs device initialization and ufs-lu recognition | 5 | to avoid introducing a dependency from io/ to block-obj-y. |
6 | Test I/O (Performs a write followed by a read to verify) | 6 | |
7 | 7 | main-loop and its dependency iohandler also need to be moved, because | |
8 | Signed-off-by: Jeuk Kim <jeuk20.kim@samsung.com> | 8 | later in this series io/ will call iohandler_get_aio_context. |
9 | Acked-by: Thomas Huth <thuth@redhat.com> | 9 | |
10 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | 10 | [Changed copyright "the QEMU team" to "other QEMU contributors" as |
11 | Message-id: 9e9207f54505e9ba30931849f949ff6f474ac333.1693980783.git.jeuk20.kim@gmail.com | 11 | suggested by Daniel Berrange and agreed by Paolo. |
12 | --Stefan] | ||
13 | |||
14 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
15 | Reviewed-by: Fam Zheng <famz@redhat.com> | ||
16 | Message-id: 20170213135235.12274-2-pbonzini@redhat.com | ||
12 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 17 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
13 | --- | 18 | --- |
14 | MAINTAINERS | 1 + | 19 | Makefile.objs | 4 --- |
15 | tests/qtest/ufs-test.c | 587 ++++++++++++++++++++++++++++++++++++++++ | 20 | stubs/Makefile.objs | 1 + |
16 | tests/qtest/meson.build | 1 + | 21 | tests/Makefile.include | 11 ++++---- |
17 | 3 files changed, 589 insertions(+) | 22 | util/Makefile.objs | 6 +++- |
18 | create mode 100644 tests/qtest/ufs-test.c | 23 | block/io.c | 29 ------------------- |
19 | 24 | stubs/linux-aio.c | 32 +++++++++++++++++++++ | |
20 | diff --git a/MAINTAINERS b/MAINTAINERS | 25 | stubs/set-fd-handler.c | 11 -------- |
21 | index XXXXXXX..XXXXXXX 100644 | 26 | aio-posix.c => util/aio-posix.c | 2 +- |
22 | --- a/MAINTAINERS | 27 | aio-win32.c => util/aio-win32.c | 0 |
23 | +++ b/MAINTAINERS | 28 | util/aiocb.c | 55 +++++++++++++++++++++++++++++++++++++ |
24 | @@ -XXX,XX +XXX,XX @@ M: Jeuk Kim <jeuk20.kim@samsung.com> | 29 | async.c => util/async.c | 3 +- |
25 | S: Supported | 30 | iohandler.c => util/iohandler.c | 0 |
26 | F: hw/ufs/* | 31 | main-loop.c => util/main-loop.c | 0 |
27 | F: include/block/ufs.h | 32 | qemu-timer.c => util/qemu-timer.c | 0 |
28 | +F: tests/qtest/ufs-test.c | 33 | thread-pool.c => util/thread-pool.c | 2 +- |
29 | 34 | trace-events | 11 -------- | |
30 | megasas | 35 | util/trace-events | 11 ++++++++ |
31 | M: Hannes Reinecke <hare@suse.com> | 36 | 17 files changed, 114 insertions(+), 64 deletions(-) |
32 | diff --git a/tests/qtest/ufs-test.c b/tests/qtest/ufs-test.c | 37 | create mode 100644 stubs/linux-aio.c |
38 | rename aio-posix.c => util/aio-posix.c (99%) | ||
39 | rename aio-win32.c => util/aio-win32.c (100%) | ||
40 | create mode 100644 util/aiocb.c | ||
41 | rename async.c => util/async.c (99%) | ||
42 | rename iohandler.c => util/iohandler.c (100%) | ||
43 | rename main-loop.c => util/main-loop.c (100%) | ||
44 | rename qemu-timer.c => util/qemu-timer.c (100%) | ||
45 | rename thread-pool.c => util/thread-pool.c (99%) | ||
46 | |||
47 | diff --git a/Makefile.objs b/Makefile.objs | ||
48 | index XXXXXXX..XXXXXXX 100644 | ||
49 | --- a/Makefile.objs | ||
50 | +++ b/Makefile.objs | ||
51 | @@ -XXX,XX +XXX,XX @@ chardev-obj-y = chardev/ | ||
52 | ####################################################################### | ||
53 | # block-obj-y is code used by both qemu system emulation and qemu-img | ||
54 | |||
55 | -block-obj-y = async.o thread-pool.o | ||
56 | block-obj-y += nbd/ | ||
57 | block-obj-y += block.o blockjob.o | ||
58 | -block-obj-y += main-loop.o iohandler.o qemu-timer.o | ||
59 | -block-obj-$(CONFIG_POSIX) += aio-posix.o | ||
60 | -block-obj-$(CONFIG_WIN32) += aio-win32.o | ||
61 | block-obj-y += block/ | ||
62 | block-obj-y += qemu-io-cmds.o | ||
63 | block-obj-$(CONFIG_REPLICATION) += replication.o | ||
64 | diff --git a/stubs/Makefile.objs b/stubs/Makefile.objs | ||
65 | index XXXXXXX..XXXXXXX 100644 | ||
66 | --- a/stubs/Makefile.objs | ||
67 | +++ b/stubs/Makefile.objs | ||
68 | @@ -XXX,XX +XXX,XX @@ stub-obj-y += get-vm-name.o | ||
69 | stub-obj-y += iothread.o | ||
70 | stub-obj-y += iothread-lock.o | ||
71 | stub-obj-y += is-daemonized.o | ||
72 | +stub-obj-$(CONFIG_LINUX_AIO) += linux-aio.o | ||
73 | stub-obj-y += machine-init-done.o | ||
74 | stub-obj-y += migr-blocker.o | ||
75 | stub-obj-y += monitor.o | ||
76 | diff --git a/tests/Makefile.include b/tests/Makefile.include | ||
77 | index XXXXXXX..XXXXXXX 100644 | ||
78 | --- a/tests/Makefile.include | ||
79 | +++ b/tests/Makefile.include | ||
80 | @@ -XXX,XX +XXX,XX @@ check-unit-y += tests/test-visitor-serialization$(EXESUF) | ||
81 | check-unit-y += tests/test-iov$(EXESUF) | ||
82 | gcov-files-test-iov-y = util/iov.c | ||
83 | check-unit-y += tests/test-aio$(EXESUF) | ||
84 | +gcov-files-test-aio-y = util/async.c util/qemu-timer.o | ||
85 | +gcov-files-test-aio-$(CONFIG_WIN32) += util/aio-win32.c | ||
86 | +gcov-files-test-aio-$(CONFIG_POSIX) += util/aio-posix.c | ||
87 | check-unit-y += tests/test-throttle$(EXESUF) | ||
88 | gcov-files-test-aio-$(CONFIG_WIN32) = aio-win32.c | ||
89 | gcov-files-test-aio-$(CONFIG_POSIX) = aio-posix.c | ||
90 | @@ -XXX,XX +XXX,XX @@ tests/check-qjson$(EXESUF): tests/check-qjson.o $(test-util-obj-y) | ||
91 | tests/check-qom-interface$(EXESUF): tests/check-qom-interface.o $(test-qom-obj-y) | ||
92 | tests/check-qom-proplist$(EXESUF): tests/check-qom-proplist.o $(test-qom-obj-y) | ||
93 | |||
94 | -tests/test-char$(EXESUF): tests/test-char.o qemu-timer.o \ | ||
95 | - $(test-util-obj-y) $(qtest-obj-y) $(test-block-obj-y) $(chardev-obj-y) | ||
96 | +tests/test-char$(EXESUF): tests/test-char.o $(test-util-obj-y) $(qtest-obj-y) $(test-io-obj-y) $(chardev-obj-y) | ||
97 | tests/test-coroutine$(EXESUF): tests/test-coroutine.o $(test-block-obj-y) | ||
98 | tests/test-aio$(EXESUF): tests/test-aio.o $(test-block-obj-y) | ||
99 | tests/test-throttle$(EXESUF): tests/test-throttle.o $(test-block-obj-y) | ||
100 | @@ -XXX,XX +XXX,XX @@ tests/test-vmstate$(EXESUF): tests/test-vmstate.o \ | ||
101 | migration/vmstate.o migration/qemu-file.o \ | ||
102 | migration/qemu-file-channel.o migration/qjson.o \ | ||
103 | $(test-io-obj-y) | ||
104 | -tests/test-timed-average$(EXESUF): tests/test-timed-average.o qemu-timer.o \ | ||
105 | - $(test-util-obj-y) | ||
106 | +tests/test-timed-average$(EXESUF): tests/test-timed-average.o $(test-util-obj-y) | ||
107 | tests/test-base64$(EXESUF): tests/test-base64.o \ | ||
108 | libqemuutil.a libqemustub.a | ||
109 | tests/ptimer-test$(EXESUF): tests/ptimer-test.o tests/ptimer-test-stubs.o hw/core/ptimer.o libqemustub.a | ||
110 | @@ -XXX,XX +XXX,XX @@ tests/usb-hcd-ehci-test$(EXESUF): tests/usb-hcd-ehci-test.o $(libqos-usb-obj-y) | ||
111 | tests/usb-hcd-xhci-test$(EXESUF): tests/usb-hcd-xhci-test.o $(libqos-usb-obj-y) | ||
112 | tests/pc-cpu-test$(EXESUF): tests/pc-cpu-test.o | ||
113 | tests/postcopy-test$(EXESUF): tests/postcopy-test.o | ||
114 | -tests/vhost-user-test$(EXESUF): tests/vhost-user-test.o qemu-timer.o \ | ||
115 | +tests/vhost-user-test$(EXESUF): tests/vhost-user-test.o $(test-util-obj-y) \ | ||
116 | $(qtest-obj-y) $(test-io-obj-y) $(libqos-virtio-obj-y) $(libqos-pc-obj-y) \ | ||
117 | $(chardev-obj-y) | ||
118 | tests/qemu-iotests/socket_scm_helper$(EXESUF): tests/qemu-iotests/socket_scm_helper.o | ||
119 | diff --git a/util/Makefile.objs b/util/Makefile.objs | ||
120 | index XXXXXXX..XXXXXXX 100644 | ||
121 | --- a/util/Makefile.objs | ||
122 | +++ b/util/Makefile.objs | ||
123 | @@ -XXX,XX +XXX,XX @@ | ||
124 | util-obj-y = osdep.o cutils.o unicode.o qemu-timer-common.o | ||
125 | util-obj-y += bufferiszero.o | ||
126 | util-obj-y += lockcnt.o | ||
127 | +util-obj-y += aiocb.o async.o thread-pool.o qemu-timer.o | ||
128 | +util-obj-y += main-loop.o iohandler.o | ||
129 | +util-obj-$(CONFIG_POSIX) += aio-posix.o | ||
130 | util-obj-$(CONFIG_POSIX) += compatfd.o | ||
131 | util-obj-$(CONFIG_POSIX) += event_notifier-posix.o | ||
132 | util-obj-$(CONFIG_POSIX) += mmap-alloc.o | ||
133 | util-obj-$(CONFIG_POSIX) += oslib-posix.o | ||
134 | util-obj-$(CONFIG_POSIX) += qemu-openpty.o | ||
135 | util-obj-$(CONFIG_POSIX) += qemu-thread-posix.o | ||
136 | -util-obj-$(CONFIG_WIN32) += event_notifier-win32.o | ||
137 | util-obj-$(CONFIG_POSIX) += memfd.o | ||
138 | +util-obj-$(CONFIG_WIN32) += aio-win32.o | ||
139 | +util-obj-$(CONFIG_WIN32) += event_notifier-win32.o | ||
140 | util-obj-$(CONFIG_WIN32) += oslib-win32.o | ||
141 | util-obj-$(CONFIG_WIN32) += qemu-thread-win32.o | ||
142 | util-obj-y += envlist.o path.o module.o | ||
143 | diff --git a/block/io.c b/block/io.c | ||
144 | index XXXXXXX..XXXXXXX 100644 | ||
145 | --- a/block/io.c | ||
146 | +++ b/block/io.c | ||
147 | @@ -XXX,XX +XXX,XX @@ BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs, | ||
148 | return &acb->common; | ||
149 | } | ||
150 | |||
151 | -void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs, | ||
152 | - BlockCompletionFunc *cb, void *opaque) | ||
153 | -{ | ||
154 | - BlockAIOCB *acb; | ||
155 | - | ||
156 | - acb = g_malloc(aiocb_info->aiocb_size); | ||
157 | - acb->aiocb_info = aiocb_info; | ||
158 | - acb->bs = bs; | ||
159 | - acb->cb = cb; | ||
160 | - acb->opaque = opaque; | ||
161 | - acb->refcnt = 1; | ||
162 | - return acb; | ||
163 | -} | ||
164 | - | ||
165 | -void qemu_aio_ref(void *p) | ||
166 | -{ | ||
167 | - BlockAIOCB *acb = p; | ||
168 | - acb->refcnt++; | ||
169 | -} | ||
170 | - | ||
171 | -void qemu_aio_unref(void *p) | ||
172 | -{ | ||
173 | - BlockAIOCB *acb = p; | ||
174 | - assert(acb->refcnt > 0); | ||
175 | - if (--acb->refcnt == 0) { | ||
176 | - g_free(acb); | ||
177 | - } | ||
178 | -} | ||
179 | - | ||
180 | /**************************************************************/ | ||
181 | /* Coroutine block device emulation */ | ||
182 | |||
183 | diff --git a/stubs/linux-aio.c b/stubs/linux-aio.c | ||
33 | new file mode 100644 | 184 | new file mode 100644 |
34 | index XXXXXXX..XXXXXXX | 185 | index XXXXXXX..XXXXXXX |
35 | --- /dev/null | 186 | --- /dev/null |
36 | +++ b/tests/qtest/ufs-test.c | 187 | +++ b/stubs/linux-aio.c |
37 | @@ -XXX,XX +XXX,XX @@ | 188 | @@ -XXX,XX +XXX,XX @@ |
38 | +/* | 189 | +/* |
39 | + * QTest testcase for UFS | 190 | + * Linux native AIO support. |
40 | + * | 191 | + * |
41 | + * Copyright (c) 2023 Samsung Electronics Co., Ltd. All rights reserved. | 192 | + * Copyright (C) 2009 IBM, Corp. |
42 | + * | 193 | + * Copyright (C) 2009 Red Hat, Inc. |
43 | + * SPDX-License-Identifier: GPL-2.0-or-later | 194 | + * |
195 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. | ||
196 | + * See the COPYING file in the top-level directory. | ||
44 | + */ | 197 | + */ |
45 | + | ||
46 | +#include "qemu/osdep.h" | 198 | +#include "qemu/osdep.h" |
47 | +#include "qemu/module.h" | 199 | +#include "block/aio.h" |
48 | +#include "qemu/units.h" | 200 | +#include "block/raw-aio.h" |
49 | +#include "libqtest.h" | 201 | + |
50 | +#include "libqos/qgraph.h" | 202 | +void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context) |
51 | +#include "libqos/pci.h" | 203 | +{ |
52 | +#include "scsi/constants.h" | 204 | + abort(); |
53 | +#include "include/block/ufs.h" | 205 | +} |
54 | + | 206 | + |
55 | +/* Test images sizes in Bytes */ | 207 | +void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context) |
56 | +#define TEST_IMAGE_SIZE (64 * 1024 * 1024) | 208 | +{ |
57 | +/* Timeout for various operations, in seconds. */ | 209 | + abort(); |
58 | +#define TIMEOUT_SECONDS 10 | 210 | +} |
59 | +/* Maximum PRD entry count */ | 211 | + |
60 | +#define MAX_PRD_ENTRY_COUNT 10 | 212 | +LinuxAioState *laio_init(void) |
61 | +#define PRD_ENTRY_DATA_SIZE 4096 | 213 | +{ |
62 | +/* Constants to build upiu */ | 214 | + abort(); |
63 | +#define UTP_COMMAND_DESCRIPTOR_SIZE 4096 | 215 | +} |
64 | +#define UTP_RESPONSE_UPIU_OFFSET 1024 | 216 | + |
65 | +#define UTP_PRDT_UPIU_OFFSET 2048 | 217 | +void laio_cleanup(LinuxAioState *s) |
66 | + | 218 | +{ |
67 | +typedef struct QUfs QUfs; | 219 | + abort(); |
68 | + | 220 | +} |
69 | +struct QUfs { | 221 | diff --git a/stubs/set-fd-handler.c b/stubs/set-fd-handler.c |
70 | + QOSGraphObject obj; | 222 | index XXXXXXX..XXXXXXX 100644 |
71 | + QPCIDevice dev; | 223 | --- a/stubs/set-fd-handler.c |
72 | + QPCIBar bar; | 224 | +++ b/stubs/set-fd-handler.c |
73 | + | 225 | @@ -XXX,XX +XXX,XX @@ void qemu_set_fd_handler(int fd, |
74 | + uint64_t utrlba; | 226 | { |
75 | + uint64_t utmrlba; | 227 | abort(); |
76 | + uint64_t cmd_desc_addr; | 228 | } |
77 | + uint64_t data_buffer_addr; | 229 | - |
78 | + | 230 | -void aio_set_fd_handler(AioContext *ctx, |
79 | + bool enabled; | 231 | - int fd, |
80 | +}; | 232 | - bool is_external, |
81 | + | 233 | - IOHandler *io_read, |
82 | +static inline uint32_t ufs_rreg(QUfs *ufs, size_t offset) | 234 | - IOHandler *io_write, |
83 | +{ | 235 | - AioPollFn *io_poll, |
84 | + return qpci_io_readl(&ufs->dev, ufs->bar, offset); | 236 | - void *opaque) |
85 | +} | 237 | -{ |
86 | + | 238 | - abort(); |
87 | +static inline void ufs_wreg(QUfs *ufs, size_t offset, uint32_t value) | 239 | -} |
88 | +{ | 240 | diff --git a/aio-posix.c b/util/aio-posix.c |
89 | + qpci_io_writel(&ufs->dev, ufs->bar, offset, value); | 241 | similarity index 99% |
90 | +} | 242 | rename from aio-posix.c |
91 | + | 243 | rename to util/aio-posix.c |
92 | +static void ufs_wait_for_irq(QUfs *ufs) | 244 | index XXXXXXX..XXXXXXX 100644 |
93 | +{ | 245 | --- a/aio-posix.c |
94 | + uint64_t end_time; | 246 | +++ b/util/aio-posix.c |
95 | + uint32_t is; | 247 | @@ -XXX,XX +XXX,XX @@ |
96 | + /* Wait for device to reset as the linux driver does. */ | 248 | #include "qemu/rcu_queue.h" |
97 | + end_time = g_get_monotonic_time() + TIMEOUT_SECONDS * G_TIME_SPAN_SECOND; | 249 | #include "qemu/sockets.h" |
98 | + do { | 250 | #include "qemu/cutils.h" |
99 | + qtest_clock_step(ufs->dev.bus->qts, 100); | 251 | -#include "trace-root.h" |
100 | + is = ufs_rreg(ufs, A_IS); | 252 | +#include "trace.h" |
101 | + } while (is == 0 && g_get_monotonic_time() < end_time); | 253 | #ifdef CONFIG_EPOLL_CREATE1 |
102 | +} | 254 | #include <sys/epoll.h> |
103 | + | 255 | #endif |
104 | +static UtpTransferReqDesc ufs_build_req_utrd(uint64_t cmd_desc_addr, | 256 | diff --git a/aio-win32.c b/util/aio-win32.c |
105 | + uint8_t slot, | 257 | similarity index 100% |
106 | + uint32_t data_direction, | 258 | rename from aio-win32.c |
107 | + uint16_t prd_table_length) | 259 | rename to util/aio-win32.c |
108 | +{ | 260 | diff --git a/util/aiocb.c b/util/aiocb.c |
109 | + UtpTransferReqDesc req = { 0 }; | 261 | new file mode 100644 |
110 | + uint64_t command_desc_base_addr = | 262 | index XXXXXXX..XXXXXXX |
111 | + cmd_desc_addr + slot * UTP_COMMAND_DESCRIPTOR_SIZE; | 263 | --- /dev/null |
112 | + | 264 | +++ b/util/aiocb.c |
113 | + req.header.dword_0 = | 265 | @@ -XXX,XX +XXX,XX @@ |
114 | + cpu_to_le32(1 << 28 | data_direction | UFS_UTP_REQ_DESC_INT_CMD); | 266 | +/* |
115 | + req.header.dword_2 = cpu_to_le32(UFS_OCS_INVALID_COMMAND_STATUS); | 267 | + * BlockAIOCB allocation |
116 | + | 268 | + * |
117 | + req.command_desc_base_addr_hi = cpu_to_le32(command_desc_base_addr >> 32); | 269 | + * Copyright (c) 2003-2017 Fabrice Bellard and other QEMU contributors |
118 | + req.command_desc_base_addr_lo = | 270 | + * |
119 | + cpu_to_le32(command_desc_base_addr & 0xffffffff); | 271 | + * Permission is hereby granted, free of charge, to any person obtaining a copy |
120 | + req.response_upiu_offset = | 272 | + * of this software and associated documentation files (the "Software"), to deal |
121 | + cpu_to_le16(UTP_RESPONSE_UPIU_OFFSET / sizeof(uint32_t)); | 273 | + * in the Software without restriction, including without limitation the rights |
122 | + req.response_upiu_length = cpu_to_le16(sizeof(UtpUpiuRsp)); | 274 | + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
123 | + req.prd_table_offset = cpu_to_le16(UTP_PRDT_UPIU_OFFSET / sizeof(uint32_t)); | 275 | + * copies of the Software, and to permit persons to whom the Software is |
124 | + req.prd_table_length = cpu_to_le16(prd_table_length); | 276 | + * furnished to do so, subject to the following conditions: |
125 | + return req; | 277 | + * |
126 | +} | 278 | + * The above copyright notice and this permission notice shall be included in |
127 | + | 279 | + * all copies or substantial portions of the Software. |
128 | +static void ufs_send_nop_out(QUfs *ufs, uint8_t slot, | 280 | + * |
129 | + UtpTransferReqDesc *utrd_out, UtpUpiuRsp *rsp_out) | 281 | + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
130 | +{ | 282 | + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
131 | + /* Build up utp transfer request descriptor */ | 283 | + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
132 | + UtpTransferReqDesc utrd = ufs_build_req_utrd(ufs->cmd_desc_addr, slot, | 284 | + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
133 | + UFS_UTP_NO_DATA_TRANSFER, 0); | 285 | + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
134 | + uint64_t utrd_addr = ufs->utrlba + slot * sizeof(UtpTransferReqDesc); | 286 | + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN |
135 | + uint64_t req_upiu_addr = | 287 | + * THE SOFTWARE. |
136 | + ufs->cmd_desc_addr + slot * UTP_COMMAND_DESCRIPTOR_SIZE; | 288 | + */ |
137 | + uint64_t rsp_upiu_addr = req_upiu_addr + UTP_RESPONSE_UPIU_OFFSET; | 289 | + |
138 | + qtest_memwrite(ufs->dev.bus->qts, utrd_addr, &utrd, sizeof(utrd)); | 290 | +#include "qemu/osdep.h" |
139 | + | 291 | +#include "block/aio.h" |
140 | + /* Build up request upiu */ | 292 | + |
141 | + UtpUpiuReq req_upiu = { 0 }; | 293 | +void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs, |
142 | + req_upiu.header.trans_type = UFS_UPIU_TRANSACTION_NOP_OUT; | 294 | + BlockCompletionFunc *cb, void *opaque) |
143 | + req_upiu.header.task_tag = slot; | 295 | +{ |
144 | + qtest_memwrite(ufs->dev.bus->qts, req_upiu_addr, &req_upiu, | 296 | + BlockAIOCB *acb; |
145 | + sizeof(req_upiu)); | 297 | + |
146 | + | 298 | + acb = g_malloc(aiocb_info->aiocb_size); |
147 | + /* Ring Doorbell */ | 299 | + acb->aiocb_info = aiocb_info; |
148 | + ufs_wreg(ufs, A_UTRLDBR, 1); | 300 | + acb->bs = bs; |
149 | + ufs_wait_for_irq(ufs); | 301 | + acb->cb = cb; |
150 | + g_assert_true(FIELD_EX32(ufs_rreg(ufs, A_IS), IS, UTRCS)); | 302 | + acb->opaque = opaque; |
151 | + ufs_wreg(ufs, A_IS, FIELD_DP32(0, IS, UTRCS, 1)); | 303 | + acb->refcnt = 1; |
152 | + | 304 | + return acb; |
153 | + qtest_memread(ufs->dev.bus->qts, utrd_addr, utrd_out, sizeof(*utrd_out)); | 305 | +} |
154 | + qtest_memread(ufs->dev.bus->qts, rsp_upiu_addr, rsp_out, sizeof(*rsp_out)); | 306 | + |
155 | +} | 307 | +void qemu_aio_ref(void *p) |
156 | + | 308 | +{ |
157 | +static void ufs_send_query(QUfs *ufs, uint8_t slot, uint8_t query_function, | 309 | + BlockAIOCB *acb = p; |
158 | + uint8_t query_opcode, uint8_t idn, uint8_t index, | 310 | + acb->refcnt++; |
159 | + UtpTransferReqDesc *utrd_out, UtpUpiuRsp *rsp_out) | 311 | +} |
160 | +{ | 312 | + |
161 | + /* Build up utp transfer request descriptor */ | 313 | +void qemu_aio_unref(void *p) |
162 | + UtpTransferReqDesc utrd = ufs_build_req_utrd(ufs->cmd_desc_addr, slot, | 314 | +{ |
163 | + UFS_UTP_NO_DATA_TRANSFER, 0); | 315 | + BlockAIOCB *acb = p; |
164 | + uint64_t utrd_addr = ufs->utrlba + slot * sizeof(UtpTransferReqDesc); | 316 | + assert(acb->refcnt > 0); |
165 | + uint64_t req_upiu_addr = | 317 | + if (--acb->refcnt == 0) { |
166 | + ufs->cmd_desc_addr + slot * UTP_COMMAND_DESCRIPTOR_SIZE; | 318 | + g_free(acb); |
167 | + uint64_t rsp_upiu_addr = req_upiu_addr + UTP_RESPONSE_UPIU_OFFSET; | ||
168 | + qtest_memwrite(ufs->dev.bus->qts, utrd_addr, &utrd, sizeof(utrd)); | ||
169 | + | ||
170 | + /* Build up request upiu */ | ||
171 | + UtpUpiuReq req_upiu = { 0 }; | ||
172 | + req_upiu.header.trans_type = UFS_UPIU_TRANSACTION_QUERY_REQ; | ||
173 | + req_upiu.header.query_func = query_function; | ||
174 | + req_upiu.header.task_tag = slot; | ||
175 | + /* | ||
176 | + * QEMU UFS does not currently support Write descriptor and Write attribute, | ||
177 | + * so the value of data_segment_length is always 0. | ||
178 | + */ | ||
179 | + req_upiu.header.data_segment_length = 0; | ||
180 | + req_upiu.qr.opcode = query_opcode; | ||
181 | + req_upiu.qr.idn = idn; | ||
182 | + req_upiu.qr.index = index; | ||
183 | + qtest_memwrite(ufs->dev.bus->qts, req_upiu_addr, &req_upiu, | ||
184 | + sizeof(req_upiu)); | ||
185 | + | ||
186 | + /* Ring Doorbell */ | ||
187 | + ufs_wreg(ufs, A_UTRLDBR, 1); | ||
188 | + ufs_wait_for_irq(ufs); | ||
189 | + g_assert_true(FIELD_EX32(ufs_rreg(ufs, A_IS), IS, UTRCS)); | ||
190 | + ufs_wreg(ufs, A_IS, FIELD_DP32(0, IS, UTRCS, 1)); | ||
191 | + | ||
192 | + qtest_memread(ufs->dev.bus->qts, utrd_addr, utrd_out, sizeof(*utrd_out)); | ||
193 | + qtest_memread(ufs->dev.bus->qts, rsp_upiu_addr, rsp_out, sizeof(*rsp_out)); | ||
194 | +} | ||
195 | + | ||
196 | +static void ufs_send_scsi_command(QUfs *ufs, uint8_t slot, uint8_t lun, | ||
197 | + const uint8_t *cdb, const uint8_t *data_in, | ||
198 | + size_t data_in_len, uint8_t *data_out, | ||
199 | + size_t data_out_len, | ||
200 | + UtpTransferReqDesc *utrd_out, | ||
201 | + UtpUpiuRsp *rsp_out) | ||
202 | + | ||
203 | +{ | ||
204 | + /* Build up PRDT */ | ||
205 | + UfshcdSgEntry entries[MAX_PRD_ENTRY_COUNT] = { | ||
206 | + 0, | ||
207 | + }; | ||
208 | + uint8_t flags; | ||
209 | + uint16_t prd_table_length, i; | ||
210 | + uint32_t data_direction, data_len; | ||
211 | + uint64_t req_upiu_addr = | ||
212 | + ufs->cmd_desc_addr + slot * UTP_COMMAND_DESCRIPTOR_SIZE; | ||
213 | + uint64_t prdt_addr = req_upiu_addr + UTP_PRDT_UPIU_OFFSET; | ||
214 | + | ||
215 | + g_assert_true(data_in_len < MAX_PRD_ENTRY_COUNT * PRD_ENTRY_DATA_SIZE); | ||
216 | + g_assert_true(data_out_len < MAX_PRD_ENTRY_COUNT * PRD_ENTRY_DATA_SIZE); | ||
217 | + if (data_in_len > 0) { | ||
218 | + g_assert_nonnull(data_in); | ||
219 | + data_direction = UFS_UTP_HOST_TO_DEVICE; | ||
220 | + data_len = data_in_len; | ||
221 | + flags = UFS_UPIU_CMD_FLAGS_WRITE; | ||
222 | + } else if (data_out_len > 0) { | ||
223 | + g_assert_nonnull(data_out); | ||
224 | + data_direction = UFS_UTP_DEVICE_TO_HOST; | ||
225 | + data_len = data_out_len; | ||
226 | + flags = UFS_UPIU_CMD_FLAGS_READ; | ||
227 | + } else { | ||
228 | + data_direction = UFS_UTP_NO_DATA_TRANSFER; | ||
229 | + data_len = 0; | ||
230 | + flags = UFS_UPIU_CMD_FLAGS_NONE; | ||
231 | + } | 319 | + } |
232 | + prd_table_length = DIV_ROUND_UP(data_len, PRD_ENTRY_DATA_SIZE); | 320 | +} |
233 | + | 321 | diff --git a/async.c b/util/async.c |
234 | + qtest_memset(ufs->dev.bus->qts, ufs->data_buffer_addr, 0, | 322 | similarity index 99% |
235 | + MAX_PRD_ENTRY_COUNT * PRD_ENTRY_DATA_SIZE); | 323 | rename from async.c |
236 | + if (data_in_len) { | 324 | rename to util/async.c |
237 | + qtest_memwrite(ufs->dev.bus->qts, ufs->data_buffer_addr, data_in, | 325 | index XXXXXXX..XXXXXXX 100644 |
238 | + data_in_len); | 326 | --- a/async.c |
239 | + } | 327 | +++ b/util/async.c |
240 | + | 328 | @@ -XXX,XX +XXX,XX @@ |
241 | + for (i = 0; i < prd_table_length; i++) { | 329 | /* |
242 | + entries[i].addr = | 330 | - * QEMU System Emulator |
243 | + cpu_to_le64(ufs->data_buffer_addr + i * sizeof(UfshcdSgEntry)); | 331 | + * Data plane event loop |
244 | + if (i + 1 != prd_table_length) { | 332 | * |
245 | + entries[i].size = cpu_to_le32(PRD_ENTRY_DATA_SIZE - 1); | 333 | * Copyright (c) 2003-2008 Fabrice Bellard |
246 | + } else { | 334 | + * Copyright (c) 2009-2017 QEMU contributors |
247 | + entries[i].size = cpu_to_le32( | 335 | * |
248 | + data_len - (PRD_ENTRY_DATA_SIZE * (prd_table_length - 1)) - 1); | 336 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
249 | + } | 337 | * of this software and associated documentation files (the "Software"), to deal |
250 | + } | 338 | diff --git a/iohandler.c b/util/iohandler.c |
251 | + qtest_memwrite(ufs->dev.bus->qts, prdt_addr, entries, | 339 | similarity index 100% |
252 | + prd_table_length * sizeof(UfshcdSgEntry)); | 340 | rename from iohandler.c |
253 | + | 341 | rename to util/iohandler.c |
254 | + /* Build up utp transfer request descriptor */ | 342 | diff --git a/main-loop.c b/util/main-loop.c |
255 | + UtpTransferReqDesc utrd = ufs_build_req_utrd( | 343 | similarity index 100% |
256 | + ufs->cmd_desc_addr, slot, data_direction, prd_table_length); | 344 | rename from main-loop.c |
257 | + uint64_t utrd_addr = ufs->utrlba + slot * sizeof(UtpTransferReqDesc); | 345 | rename to util/main-loop.c |
258 | + uint64_t rsp_upiu_addr = req_upiu_addr + UTP_RESPONSE_UPIU_OFFSET; | 346 | diff --git a/qemu-timer.c b/util/qemu-timer.c |
259 | + qtest_memwrite(ufs->dev.bus->qts, utrd_addr, &utrd, sizeof(utrd)); | 347 | similarity index 100% |
260 | + | 348 | rename from qemu-timer.c |
261 | + /* Build up request upiu */ | 349 | rename to util/qemu-timer.c |
262 | + UtpUpiuReq req_upiu = { 0 }; | 350 | diff --git a/thread-pool.c b/util/thread-pool.c |
263 | + req_upiu.header.trans_type = UFS_UPIU_TRANSACTION_COMMAND; | 351 | similarity index 99% |
264 | + req_upiu.header.flags = flags; | 352 | rename from thread-pool.c |
265 | + req_upiu.header.lun = lun; | 353 | rename to util/thread-pool.c |
266 | + req_upiu.header.task_tag = slot; | 354 | index XXXXXXX..XXXXXXX 100644 |
267 | + req_upiu.sc.exp_data_transfer_len = cpu_to_be32(data_len); | 355 | --- a/thread-pool.c |
268 | + memcpy(req_upiu.sc.cdb, cdb, UFS_CDB_SIZE); | 356 | +++ b/util/thread-pool.c |
269 | + qtest_memwrite(ufs->dev.bus->qts, req_upiu_addr, &req_upiu, | 357 | @@ -XXX,XX +XXX,XX @@ |
270 | + sizeof(req_upiu)); | 358 | #include "qemu/queue.h" |
271 | + | 359 | #include "qemu/thread.h" |
272 | + /* Ring Doorbell */ | 360 | #include "qemu/coroutine.h" |
273 | + ufs_wreg(ufs, A_UTRLDBR, 1); | 361 | -#include "trace-root.h" |
274 | + ufs_wait_for_irq(ufs); | 362 | +#include "trace.h" |
275 | + g_assert_true(FIELD_EX32(ufs_rreg(ufs, A_IS), IS, UTRCS)); | 363 | #include "block/thread-pool.h" |
276 | + ufs_wreg(ufs, A_IS, FIELD_DP32(0, IS, UTRCS, 1)); | 364 | #include "qemu/main-loop.h" |
277 | + | 365 | |
278 | + qtest_memread(ufs->dev.bus->qts, utrd_addr, utrd_out, sizeof(*utrd_out)); | 366 | diff --git a/trace-events b/trace-events |
279 | + qtest_memread(ufs->dev.bus->qts, rsp_upiu_addr, rsp_out, sizeof(*rsp_out)); | 367 | index XXXXXXX..XXXXXXX 100644 |
280 | + if (data_out_len) { | 368 | --- a/trace-events |
281 | + qtest_memread(ufs->dev.bus->qts, ufs->data_buffer_addr, data_out, | 369 | +++ b/trace-events |
282 | + data_out_len); | 370 | @@ -XXX,XX +XXX,XX @@ |
283 | + } | 371 | # |
284 | +} | 372 | # The <format-string> should be a sprintf()-compatible format string. |
285 | + | 373 | |
286 | +/** | 374 | -# aio-posix.c |
287 | + * Initialize Ufs host controller and logical unit. | 375 | -run_poll_handlers_begin(void *ctx, int64_t max_ns) "ctx %p max_ns %"PRId64 |
288 | + * After running this function, you can make a transfer request to the UFS. | 376 | -run_poll_handlers_end(void *ctx, bool progress) "ctx %p progress %d" |
289 | + */ | 377 | -poll_shrink(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64 |
290 | +static void ufs_init(QUfs *ufs, QGuestAllocator *alloc) | 378 | -poll_grow(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64 |
291 | +{ | 379 | - |
292 | + uint64_t end_time; | 380 | -# thread-pool.c |
293 | + uint32_t nutrs, nutmrs; | 381 | -thread_pool_submit(void *pool, void *req, void *opaque) "pool %p req %p opaque %p" |
294 | + uint32_t hcs, is, ucmdarg2, cap; | 382 | -thread_pool_complete(void *pool, void *req, void *opaque, int ret) "pool %p req %p opaque %p ret %d" |
295 | + uint32_t hce = 0, ie = 0; | 383 | -thread_pool_cancel(void *req, void *opaque) "req %p opaque %p" |
296 | + UtpTransferReqDesc utrd; | 384 | - |
297 | + UtpUpiuRsp rsp_upiu; | 385 | # ioport.c |
298 | + | 386 | cpu_in(unsigned int addr, char size, unsigned int val) "addr %#x(%c) value %u" |
299 | + ufs->bar = qpci_iomap(&ufs->dev, 0, NULL); | 387 | cpu_out(unsigned int addr, char size, unsigned int val) "addr %#x(%c) value %u" |
300 | + qpci_device_enable(&ufs->dev); | 388 | diff --git a/util/trace-events b/util/trace-events |
301 | + | 389 | index XXXXXXX..XXXXXXX 100644 |
302 | + /* Start host controller initialization */ | 390 | --- a/util/trace-events |
303 | + hce = FIELD_DP32(hce, HCE, HCE, 1); | 391 | +++ b/util/trace-events |
304 | + ufs_wreg(ufs, A_HCE, hce); | 392 | @@ -XXX,XX +XXX,XX @@ |
305 | + | 393 | # See docs/tracing.txt for syntax documentation. |
306 | + /* Wait for device to reset */ | 394 | |
307 | + end_time = g_get_monotonic_time() + TIMEOUT_SECONDS * G_TIME_SPAN_SECOND; | 395 | +# util/aio-posix.c |
308 | + do { | 396 | +run_poll_handlers_begin(void *ctx, int64_t max_ns) "ctx %p max_ns %"PRId64 |
309 | + qtest_clock_step(ufs->dev.bus->qts, 100); | 397 | +run_poll_handlers_end(void *ctx, bool progress) "ctx %p progress %d" |
310 | + hce = FIELD_EX32(ufs_rreg(ufs, A_HCE), HCE, HCE); | 398 | +poll_shrink(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64 |
311 | + } while (hce == 0 && g_get_monotonic_time() < end_time); | 399 | +poll_grow(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64 |
312 | + g_assert_cmpuint(hce, ==, 1); | 400 | + |
313 | + | 401 | +# util/thread-pool.c |
314 | + /* Enable interrupt */ | 402 | +thread_pool_submit(void *pool, void *req, void *opaque) "pool %p req %p opaque %p" |
315 | + ie = FIELD_DP32(ie, IE, UCCE, 1); | 403 | +thread_pool_complete(void *pool, void *req, void *opaque, int ret) "pool %p req %p opaque %p ret %d" |
316 | + ie = FIELD_DP32(ie, IE, UHESE, 1); | 404 | +thread_pool_cancel(void *req, void *opaque) "req %p opaque %p" |
317 | + ie = FIELD_DP32(ie, IE, UHXSE, 1); | 405 | + |
318 | + ie = FIELD_DP32(ie, IE, UPMSE, 1); | 406 | # util/buffer.c |
319 | + ufs_wreg(ufs, A_IE, ie); | 407 | buffer_resize(const char *buf, size_t olen, size_t len) "%s: old %zd, new %zd" |
320 | + | 408 | buffer_move_empty(const char *buf, size_t len, const char *from) "%s: %zd bytes from %s" |
321 | + /* Send DME_LINK_STARTUP uic command */ | ||
322 | + hcs = ufs_rreg(ufs, A_HCS); | ||
323 | + g_assert_true(FIELD_EX32(hcs, HCS, UCRDY)); | ||
324 | + | ||
325 | + ufs_wreg(ufs, A_UCMDARG1, 0); | ||
326 | + ufs_wreg(ufs, A_UCMDARG2, 0); | ||
327 | + ufs_wreg(ufs, A_UCMDARG3, 0); | ||
328 | + ufs_wreg(ufs, A_UICCMD, UFS_UIC_CMD_DME_LINK_STARTUP); | ||
329 | + | ||
330 | + is = ufs_rreg(ufs, A_IS); | ||
331 | + g_assert_true(FIELD_EX32(is, IS, UCCS)); | ||
332 | + ufs_wreg(ufs, A_IS, FIELD_DP32(0, IS, UCCS, 1)); | ||
333 | + | ||
334 | + ucmdarg2 = ufs_rreg(ufs, A_UCMDARG2); | ||
335 | + g_assert_cmpuint(ucmdarg2, ==, 0); | ||
336 | + is = ufs_rreg(ufs, A_IS); | ||
337 | + g_assert_cmpuint(is, ==, 0); | ||
338 | + hcs = ufs_rreg(ufs, A_HCS); | ||
339 | + g_assert_true(FIELD_EX32(hcs, HCS, DP)); | ||
340 | + g_assert_true(FIELD_EX32(hcs, HCS, UTRLRDY)); | ||
341 | + g_assert_true(FIELD_EX32(hcs, HCS, UTMRLRDY)); | ||
342 | + g_assert_true(FIELD_EX32(hcs, HCS, UCRDY)); | ||
343 | + | ||
344 | + /* Enable all interrupt functions */ | ||
345 | + ie = FIELD_DP32(ie, IE, UTRCE, 1); | ||
346 | + ie = FIELD_DP32(ie, IE, UEE, 1); | ||
347 | + ie = FIELD_DP32(ie, IE, UPMSE, 1); | ||
348 | + ie = FIELD_DP32(ie, IE, UHXSE, 1); | ||
349 | + ie = FIELD_DP32(ie, IE, UHESE, 1); | ||
350 | + ie = FIELD_DP32(ie, IE, UTMRCE, 1); | ||
351 | + ie = FIELD_DP32(ie, IE, UCCE, 1); | ||
352 | + ie = FIELD_DP32(ie, IE, DFEE, 1); | ||
353 | + ie = FIELD_DP32(ie, IE, HCFEE, 1); | ||
354 | + ie = FIELD_DP32(ie, IE, SBFEE, 1); | ||
355 | + ie = FIELD_DP32(ie, IE, CEFEE, 1); | ||
356 | + ufs_wreg(ufs, A_IE, ie); | ||
357 | + ufs_wreg(ufs, A_UTRIACR, 0); | ||
358 | + | ||
359 | + /* Enable tranfer request and task management request */ | ||
360 | + cap = ufs_rreg(ufs, A_CAP); | ||
361 | + nutrs = FIELD_EX32(cap, CAP, NUTRS) + 1; | ||
362 | + nutmrs = FIELD_EX32(cap, CAP, NUTMRS) + 1; | ||
363 | + ufs->cmd_desc_addr = | ||
364 | + guest_alloc(alloc, nutrs * UTP_COMMAND_DESCRIPTOR_SIZE); | ||
365 | + ufs->data_buffer_addr = | ||
366 | + guest_alloc(alloc, MAX_PRD_ENTRY_COUNT * PRD_ENTRY_DATA_SIZE); | ||
367 | + ufs->utrlba = guest_alloc(alloc, nutrs * sizeof(UtpTransferReqDesc)); | ||
368 | + ufs->utmrlba = guest_alloc(alloc, nutmrs * sizeof(UtpTaskReqDesc)); | ||
369 | + | ||
370 | + ufs_wreg(ufs, A_UTRLBA, ufs->utrlba & 0xffffffff); | ||
371 | + ufs_wreg(ufs, A_UTRLBAU, ufs->utrlba >> 32); | ||
372 | + ufs_wreg(ufs, A_UTMRLBA, ufs->utmrlba & 0xffffffff); | ||
373 | + ufs_wreg(ufs, A_UTMRLBAU, ufs->utmrlba >> 32); | ||
374 | + ufs_wreg(ufs, A_UTRLRSR, 1); | ||
375 | + ufs_wreg(ufs, A_UTMRLRSR, 1); | ||
376 | + | ||
377 | + /* Send nop out to test transfer request */ | ||
378 | + ufs_send_nop_out(ufs, 0, &utrd, &rsp_upiu); | ||
379 | + g_assert_cmpuint(le32_to_cpu(utrd.header.dword_2), ==, UFS_OCS_SUCCESS); | ||
380 | + | ||
381 | + /* Set fDeviceInit flag via query request */ | ||
382 | + ufs_send_query(ufs, 0, UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST, | ||
383 | + UFS_UPIU_QUERY_OPCODE_SET_FLAG, | ||
384 | + UFS_QUERY_FLAG_IDN_FDEVICEINIT, 0, &utrd, &rsp_upiu); | ||
385 | + g_assert_cmpuint(le32_to_cpu(utrd.header.dword_2), ==, UFS_OCS_SUCCESS); | ||
386 | + | ||
387 | + /* Wait for device to reset */ | ||
388 | + end_time = g_get_monotonic_time() + TIMEOUT_SECONDS * G_TIME_SPAN_SECOND; | ||
389 | + do { | ||
390 | + qtest_clock_step(ufs->dev.bus->qts, 100); | ||
391 | + ufs_send_query(ufs, 0, UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST, | ||
392 | + UFS_UPIU_QUERY_OPCODE_READ_FLAG, | ||
393 | + UFS_QUERY_FLAG_IDN_FDEVICEINIT, 0, &utrd, &rsp_upiu); | ||
394 | + } while (be32_to_cpu(rsp_upiu.qr.value) != 0 && | ||
395 | + g_get_monotonic_time() < end_time); | ||
396 | + g_assert_cmpuint(be32_to_cpu(rsp_upiu.qr.value), ==, 0); | ||
397 | + | ||
398 | + ufs->enabled = true; | ||
399 | +} | ||
400 | + | ||
401 | +static void ufs_exit(QUfs *ufs, QGuestAllocator *alloc) | ||
402 | +{ | ||
403 | + if (ufs->enabled) { | ||
404 | + guest_free(alloc, ufs->utrlba); | ||
405 | + guest_free(alloc, ufs->utmrlba); | ||
406 | + guest_free(alloc, ufs->cmd_desc_addr); | ||
407 | + guest_free(alloc, ufs->data_buffer_addr); | ||
408 | + } | ||
409 | + | ||
410 | + qpci_iounmap(&ufs->dev, ufs->bar); | ||
411 | +} | ||
412 | + | ||
413 | +static void *ufs_get_driver(void *obj, const char *interface) | ||
414 | +{ | ||
415 | + QUfs *ufs = obj; | ||
416 | + | ||
417 | + if (!g_strcmp0(interface, "pci-device")) { | ||
418 | + return &ufs->dev; | ||
419 | + } | ||
420 | + | ||
421 | + fprintf(stderr, "%s not present in ufs\n", interface); | ||
422 | + g_assert_not_reached(); | ||
423 | +} | ||
424 | + | ||
425 | +static void *ufs_create(void *pci_bus, QGuestAllocator *alloc, void *addr) | ||
426 | +{ | ||
427 | + QUfs *ufs = g_new0(QUfs, 1); | ||
428 | + QPCIBus *bus = pci_bus; | ||
429 | + | ||
430 | + qpci_device_init(&ufs->dev, bus, addr); | ||
431 | + ufs->obj.get_driver = ufs_get_driver; | ||
432 | + | ||
433 | + return &ufs->obj; | ||
434 | +} | ||
435 | + | ||
436 | +static void ufstest_reg_read(void *obj, void *data, QGuestAllocator *alloc) | ||
437 | +{ | ||
438 | + QUfs *ufs = obj; | ||
439 | + uint32_t cap; | ||
440 | + | ||
441 | + ufs->bar = qpci_iomap(&ufs->dev, 0, NULL); | ||
442 | + qpci_device_enable(&ufs->dev); | ||
443 | + | ||
444 | + cap = ufs_rreg(ufs, A_CAP); | ||
445 | + g_assert_cmpuint(FIELD_EX32(cap, CAP, NUTRS), ==, 31); | ||
446 | + g_assert_cmpuint(FIELD_EX32(cap, CAP, NUTMRS), ==, 7); | ||
447 | + g_assert_cmpuint(FIELD_EX32(cap, CAP, 64AS), ==, 1); | ||
448 | + | ||
449 | + qpci_iounmap(&ufs->dev, ufs->bar); | ||
450 | +} | ||
451 | + | ||
452 | +static void ufstest_init(void *obj, void *data, QGuestAllocator *alloc) | ||
453 | +{ | ||
454 | + QUfs *ufs = obj; | ||
455 | + | ||
456 | + uint8_t buf[4096] = { 0 }; | ||
457 | + const uint8_t report_luns_cdb[UFS_CDB_SIZE] = { | ||
458 | + /* allocation length 4096 */ | ||
459 | + REPORT_LUNS, 0x00, 0x00, 0x00, 0x00, 0x00, | ||
460 | + 0x00, 0x00, 0x10, 0x00, 0x00, 0x00 | ||
461 | + }; | ||
462 | + const uint8_t test_unit_ready_cdb[UFS_CDB_SIZE] = { | ||
463 | + TEST_UNIT_READY, | ||
464 | + }; | ||
465 | + UtpTransferReqDesc utrd; | ||
466 | + UtpUpiuRsp rsp_upiu; | ||
467 | + | ||
468 | + ufs_init(ufs, alloc); | ||
469 | + | ||
470 | + /* Check REPORT_LUNS */ | ||
471 | + ufs_send_scsi_command(ufs, 0, 0, report_luns_cdb, NULL, 0, buf, sizeof(buf), | ||
472 | + &utrd, &rsp_upiu); | ||
473 | + g_assert_cmpuint(le32_to_cpu(utrd.header.dword_2), ==, UFS_OCS_SUCCESS); | ||
474 | + g_assert_cmpuint(rsp_upiu.header.scsi_status, ==, GOOD); | ||
475 | + /* LUN LIST LENGTH should be 8, in big endian */ | ||
476 | + g_assert_cmpuint(buf[3], ==, 8); | ||
477 | + /* There is one logical unit whose lun is 0 */ | ||
478 | + g_assert_cmpuint(buf[9], ==, 0); | ||
479 | + | ||
480 | + /* Check TEST_UNIT_READY */ | ||
481 | + ufs_send_scsi_command(ufs, 0, 0, test_unit_ready_cdb, NULL, 0, NULL, 0, | ||
482 | + &utrd, &rsp_upiu); | ||
483 | + g_assert_cmpuint(le32_to_cpu(utrd.header.dword_2), ==, UFS_OCS_SUCCESS); | ||
484 | + g_assert_cmpuint(rsp_upiu.header.scsi_status, ==, GOOD); | ||
485 | + | ||
486 | + ufs_exit(ufs, alloc); | ||
487 | +} | ||
488 | + | ||
489 | +static void ufstest_read_write(void *obj, void *data, QGuestAllocator *alloc) | ||
490 | +{ | ||
491 | + QUfs *ufs = obj; | ||
492 | + uint8_t read_buf[4096] = { 0 }; | ||
493 | + uint8_t write_buf[4096] = { 0 }; | ||
494 | + const uint8_t read_capacity_cdb[UFS_CDB_SIZE] = { | ||
495 | + /* allocation length 4096 */ | ||
496 | + SERVICE_ACTION_IN_16, | ||
497 | + SAI_READ_CAPACITY_16, | ||
498 | + 0x00, | ||
499 | + 0x00, | ||
500 | + 0x00, | ||
501 | + 0x00, | ||
502 | + 0x00, | ||
503 | + 0x00, | ||
504 | + 0x00, | ||
505 | + 0x00, | ||
506 | + 0x00, | ||
507 | + 0x00, | ||
508 | + 0x10, | ||
509 | + 0x00, | ||
510 | + 0x00, | ||
511 | + 0x00 | ||
512 | + }; | ||
513 | + const uint8_t read_cdb[UFS_CDB_SIZE] = { | ||
514 | + /* READ(10) to LBA 0, transfer length 1 */ | ||
515 | + READ_10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00 | ||
516 | + }; | ||
517 | + const uint8_t write_cdb[UFS_CDB_SIZE] = { | ||
518 | + /* WRITE(10) to LBA 0, transfer length 1 */ | ||
519 | + WRITE_10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00 | ||
520 | + }; | ||
521 | + uint32_t block_size; | ||
522 | + UtpTransferReqDesc utrd; | ||
523 | + UtpUpiuRsp rsp_upiu; | ||
524 | + | ||
525 | + ufs_init(ufs, alloc); | ||
526 | + | ||
527 | + /* Read capacity */ | ||
528 | + ufs_send_scsi_command(ufs, 0, 1, read_capacity_cdb, NULL, 0, read_buf, | ||
529 | + sizeof(read_buf), &utrd, &rsp_upiu); | ||
530 | + g_assert_cmpuint(le32_to_cpu(utrd.header.dword_2), ==, UFS_OCS_SUCCESS); | ||
531 | + g_assert_cmpuint(rsp_upiu.header.scsi_status, ==, | ||
532 | + UFS_COMMAND_RESULT_SUCESS); | ||
533 | + block_size = ldl_be_p(&read_buf[8]); | ||
534 | + g_assert_cmpuint(block_size, ==, 4096); | ||
535 | + | ||
536 | + /* Write data */ | ||
537 | + memset(write_buf, rand() % 255 + 1, block_size); | ||
538 | + ufs_send_scsi_command(ufs, 0, 1, write_cdb, write_buf, block_size, NULL, 0, | ||
539 | + &utrd, &rsp_upiu); | ||
540 | + g_assert_cmpuint(le32_to_cpu(utrd.header.dword_2), ==, UFS_OCS_SUCCESS); | ||
541 | + g_assert_cmpuint(rsp_upiu.header.scsi_status, ==, | ||
542 | + UFS_COMMAND_RESULT_SUCESS); | ||
543 | + | ||
544 | + /* Read data and verify */ | ||
545 | + ufs_send_scsi_command(ufs, 0, 1, read_cdb, NULL, 0, read_buf, block_size, | ||
546 | + &utrd, &rsp_upiu); | ||
547 | + g_assert_cmpuint(le32_to_cpu(utrd.header.dword_2), ==, UFS_OCS_SUCCESS); | ||
548 | + g_assert_cmpuint(rsp_upiu.header.scsi_status, ==, | ||
549 | + UFS_COMMAND_RESULT_SUCESS); | ||
550 | + g_assert_cmpint(memcmp(read_buf, write_buf, block_size), ==, 0); | ||
551 | + | ||
552 | + ufs_exit(ufs, alloc); | ||
553 | +} | ||
554 | + | ||
555 | +static void drive_destroy(void *path) | ||
556 | +{ | ||
557 | + unlink(path); | ||
558 | + g_free(path); | ||
559 | + qos_invalidate_command_line(); | ||
560 | +} | ||
561 | + | ||
562 | +static char *drive_create(void) | ||
563 | +{ | ||
564 | + int fd, ret; | ||
565 | + char *t_path; | ||
566 | + | ||
567 | + /* Create a temporary raw image */ | ||
568 | + fd = g_file_open_tmp("qtest-ufs.XXXXXX", &t_path, NULL); | ||
569 | + g_assert_cmpint(fd, >=, 0); | ||
570 | + ret = ftruncate(fd, TEST_IMAGE_SIZE); | ||
571 | + g_assert_cmpint(ret, ==, 0); | ||
572 | + close(fd); | ||
573 | + | ||
574 | + g_test_queue_destroy(drive_destroy, t_path); | ||
575 | + return t_path; | ||
576 | +} | ||
577 | + | ||
578 | +static void *ufs_blk_test_setup(GString *cmd_line, void *arg) | ||
579 | +{ | ||
580 | + char *tmp_path = drive_create(); | ||
581 | + | ||
582 | + g_string_append_printf(cmd_line, | ||
583 | + " -blockdev file,filename=%s,node-name=drv1 " | ||
584 | + "-device ufs-lu,bus=ufs0,drive=drv1,lun=1 ", | ||
585 | + tmp_path); | ||
586 | + | ||
587 | + return arg; | ||
588 | +} | ||
589 | + | ||
590 | +static void ufs_register_nodes(void) | ||
591 | +{ | ||
592 | + const char *arch; | ||
593 | + QOSGraphEdgeOptions edge_opts = { | ||
594 | + .before_cmd_line = "-blockdev null-co,node-name=drv0,read-zeroes=on", | ||
595 | + .after_cmd_line = "-device ufs-lu,bus=ufs0,drive=drv0,lun=0", | ||
596 | + .extra_device_opts = "addr=04.0,id=ufs0,nutrs=32,nutmrs=8" | ||
597 | + }; | ||
598 | + | ||
599 | + QOSGraphTestOptions io_test_opts = { | ||
600 | + .before = ufs_blk_test_setup, | ||
601 | + }; | ||
602 | + | ||
603 | + add_qpci_address(&edge_opts, &(QPCIAddress){ .devfn = QPCI_DEVFN(4, 0) }); | ||
604 | + | ||
605 | + qos_node_create_driver("ufs", ufs_create); | ||
606 | + qos_node_consumes("ufs", "pci-bus", &edge_opts); | ||
607 | + qos_node_produces("ufs", "pci-device"); | ||
608 | + | ||
609 | + qos_add_test("reg-read", "ufs", ufstest_reg_read, NULL); | ||
610 | + | ||
611 | + /* | ||
612 | + * Check architecture | ||
613 | + * TODO: Enable ufs io tests for ppc64 | ||
614 | + */ | ||
615 | + arch = qtest_get_arch(); | ||
616 | + if (!strcmp(arch, "ppc64")) { | ||
617 | + g_test_message("Skipping ufs io tests for ppc64"); | ||
618 | + return; | ||
619 | + } | ||
620 | + qos_add_test("init", "ufs", ufstest_init, NULL); | ||
621 | + qos_add_test("read-write", "ufs", ufstest_read_write, &io_test_opts); | ||
622 | +} | ||
623 | + | ||
624 | +libqos_init(ufs_register_nodes); | ||
625 | diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build | ||
626 | index XXXXXXX..XXXXXXX 100644 | ||
627 | --- a/tests/qtest/meson.build | ||
628 | +++ b/tests/qtest/meson.build | ||
629 | @@ -XXX,XX +XXX,XX @@ qos_test_ss.add( | ||
630 | 'virtio-iommu-test.c', | ||
631 | 'vmxnet3-test.c', | ||
632 | 'igb-test.c', | ||
633 | + 'ufs-test.c', | ||
634 | ) | ||
635 | |||
636 | if config_all_devices.has_key('CONFIG_VIRTIO_SERIAL') | ||
637 | -- | 409 | -- |
638 | 2.41.0 | 410 | 2.9.3 |
411 | |||
412 | diff view generated by jsdifflib |
1 | From: Jeuk Kim <jeuk20.kim@samsung.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Universal Flash Storage (UFS) is a high-performance mass storage device | 3 | aio_co_wake provides the infrastructure to start a coroutine on a "home" |
4 | with a serial interface. It is primarily used as a high-performance | 4 | AioContext. It will be used by CoMutex and CoQueue, so that coroutines |
5 | data storage device for embedded applications. | 5 | don't jump from one context to another when they go to sleep on a |
6 | 6 | mutex or waitqueue. However, it can also be used as a more efficient | |
7 | This commit contains code for UFS device to be recognized | 7 | alternative to one-shot bottom halves, and saves the effort of tracking |
8 | as a UFS PCI device. | 8 | which AioContext a coroutine is running on. |
9 | Patches to handle UFS logical unit and Transfer Request will follow. | 9 | |
10 | 10 | aio_co_schedule is the part of aio_co_wake that starts a coroutine | |
11 | Signed-off-by: Jeuk Kim <jeuk20.kim@samsung.com> | 11 | on a remove AioContext, but it is also useful to implement e.g. |
12 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | 12 | bdrv_set_aio_context callbacks. |
13 | Message-id: 10232660d462ee5cd10cf673f1a9a1205fc8276c.1693980783.git.jeuk20.kim@gmail.com | 13 | |
14 | The implementation of aio_co_schedule is based on a lock-free | ||
15 | multiple-producer, single-consumer queue. The multiple producers use | ||
16 | cmpxchg to add to a LIFO stack. The consumer (a per-AioContext bottom | ||
17 | half) grabs all items added so far, inverts the list to make it FIFO, | ||
18 | and goes through it one item at a time until it's empty. The data | ||
19 | structure was inspired by OSv, which uses it in the very code we'll | ||
20 | "port" to QEMU for the thread-safe CoMutex. | ||
21 | |||
22 | Most of the new code is really tests. | ||
23 | |||
24 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
25 | Reviewed-by: Fam Zheng <famz@redhat.com> | ||
26 | Message-id: 20170213135235.12274-3-pbonzini@redhat.com | ||
14 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 27 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
15 | --- | 28 | --- |
16 | MAINTAINERS | 6 + | 29 | tests/Makefile.include | 8 +- |
17 | docs/specs/pci-ids.rst | 2 + | 30 | include/block/aio.h | 32 +++++++ |
18 | meson.build | 1 + | 31 | include/qemu/coroutine_int.h | 11 ++- |
19 | hw/ufs/trace.h | 1 + | 32 | tests/iothread.h | 25 +++++ |
20 | hw/ufs/ufs.h | 42 ++ | 33 | tests/iothread.c | 91 ++++++++++++++++++ |
21 | include/block/ufs.h | 1090 ++++++++++++++++++++++++++++++++++++++ | 34 | tests/test-aio-multithread.c | 213 +++++++++++++++++++++++++++++++++++++++++++ |
22 | include/hw/pci/pci.h | 1 + | 35 | util/async.c | 65 +++++++++++++ |
23 | include/hw/pci/pci_ids.h | 1 + | 36 | util/qemu-coroutine.c | 8 ++ |
24 | hw/ufs/ufs.c | 278 ++++++++++ | 37 | util/trace-events | 4 + |
25 | hw/Kconfig | 1 + | 38 | 9 files changed, 453 insertions(+), 4 deletions(-) |
26 | hw/meson.build | 1 + | 39 | create mode 100644 tests/iothread.h |
27 | hw/ufs/Kconfig | 4 + | 40 | create mode 100644 tests/iothread.c |
28 | hw/ufs/meson.build | 1 + | 41 | create mode 100644 tests/test-aio-multithread.c |
29 | hw/ufs/trace-events | 32 ++ | 42 | |
30 | 14 files changed, 1461 insertions(+) | 43 | diff --git a/tests/Makefile.include b/tests/Makefile.include |
31 | create mode 100644 hw/ufs/trace.h | ||
32 | create mode 100644 hw/ufs/ufs.h | ||
33 | create mode 100644 include/block/ufs.h | ||
34 | create mode 100644 hw/ufs/ufs.c | ||
35 | create mode 100644 hw/ufs/Kconfig | ||
36 | create mode 100644 hw/ufs/meson.build | ||
37 | create mode 100644 hw/ufs/trace-events | ||
38 | |||
39 | diff --git a/MAINTAINERS b/MAINTAINERS | ||
40 | index XXXXXXX..XXXXXXX 100644 | 44 | index XXXXXXX..XXXXXXX 100644 |
41 | --- a/MAINTAINERS | 45 | --- a/tests/Makefile.include |
42 | +++ b/MAINTAINERS | 46 | +++ b/tests/Makefile.include |
43 | @@ -XXX,XX +XXX,XX @@ F: tests/qtest/nvme-test.c | 47 | @@ -XXX,XX +XXX,XX @@ check-unit-y += tests/test-aio$(EXESUF) |
44 | F: docs/system/devices/nvme.rst | 48 | gcov-files-test-aio-y = util/async.c util/qemu-timer.o |
45 | T: git git://git.infradead.org/qemu-nvme.git nvme-next | 49 | gcov-files-test-aio-$(CONFIG_WIN32) += util/aio-win32.c |
46 | 50 | gcov-files-test-aio-$(CONFIG_POSIX) += util/aio-posix.c | |
47 | +ufs | 51 | +check-unit-y += tests/test-aio-multithread$(EXESUF) |
48 | +M: Jeuk Kim <jeuk20.kim@samsung.com> | 52 | +gcov-files-test-aio-multithread-y = $(gcov-files-test-aio-y) |
49 | +S: Supported | 53 | +gcov-files-test-aio-multithread-y += util/qemu-coroutine.c tests/iothread.c |
50 | +F: hw/ufs/* | 54 | check-unit-y += tests/test-throttle$(EXESUF) |
51 | +F: include/block/ufs.h | 55 | -gcov-files-test-aio-$(CONFIG_WIN32) = aio-win32.c |
52 | + | 56 | -gcov-files-test-aio-$(CONFIG_POSIX) = aio-posix.c |
53 | megasas | 57 | check-unit-y += tests/test-thread-pool$(EXESUF) |
54 | M: Hannes Reinecke <hare@suse.com> | 58 | gcov-files-test-thread-pool-y = thread-pool.c |
55 | L: qemu-block@nongnu.org | 59 | gcov-files-test-hbitmap-y = util/hbitmap.c |
56 | diff --git a/docs/specs/pci-ids.rst b/docs/specs/pci-ids.rst | 60 | @@ -XXX,XX +XXX,XX @@ test-qapi-obj-y = tests/test-qapi-visit.o tests/test-qapi-types.o \ |
61 | $(test-qom-obj-y) | ||
62 | test-crypto-obj-y = $(crypto-obj-y) $(test-qom-obj-y) | ||
63 | test-io-obj-y = $(io-obj-y) $(test-crypto-obj-y) | ||
64 | -test-block-obj-y = $(block-obj-y) $(test-io-obj-y) | ||
65 | +test-block-obj-y = $(block-obj-y) $(test-io-obj-y) tests/iothread.o | ||
66 | |||
67 | tests/check-qint$(EXESUF): tests/check-qint.o $(test-util-obj-y) | ||
68 | tests/check-qstring$(EXESUF): tests/check-qstring.o $(test-util-obj-y) | ||
69 | @@ -XXX,XX +XXX,XX @@ tests/check-qom-proplist$(EXESUF): tests/check-qom-proplist.o $(test-qom-obj-y) | ||
70 | tests/test-char$(EXESUF): tests/test-char.o $(test-util-obj-y) $(qtest-obj-y) $(test-io-obj-y) $(chardev-obj-y) | ||
71 | tests/test-coroutine$(EXESUF): tests/test-coroutine.o $(test-block-obj-y) | ||
72 | tests/test-aio$(EXESUF): tests/test-aio.o $(test-block-obj-y) | ||
73 | +tests/test-aio-multithread$(EXESUF): tests/test-aio-multithread.o $(test-block-obj-y) | ||
74 | tests/test-throttle$(EXESUF): tests/test-throttle.o $(test-block-obj-y) | ||
75 | tests/test-blockjob$(EXESUF): tests/test-blockjob.o $(test-block-obj-y) $(test-util-obj-y) | ||
76 | tests/test-blockjob-txn$(EXESUF): tests/test-blockjob-txn.o $(test-block-obj-y) $(test-util-obj-y) | ||
77 | diff --git a/include/block/aio.h b/include/block/aio.h | ||
57 | index XXXXXXX..XXXXXXX 100644 | 78 | index XXXXXXX..XXXXXXX 100644 |
58 | --- a/docs/specs/pci-ids.rst | 79 | --- a/include/block/aio.h |
59 | +++ b/docs/specs/pci-ids.rst | 80 | +++ b/include/block/aio.h |
60 | @@ -XXX,XX +XXX,XX @@ PCI devices (other than virtio): | 81 | @@ -XXX,XX +XXX,XX @@ typedef void QEMUBHFunc(void *opaque); |
61 | PCI PVPanic device (``-device pvpanic-pci``) | 82 | typedef bool AioPollFn(void *opaque); |
62 | 1b36:0012 | 83 | typedef void IOHandler(void *opaque); |
63 | PCI ACPI ERST device (``-device acpi-erst``) | 84 | |
64 | +1b36:0013 | 85 | +struct Coroutine; |
65 | + PCI UFS device (``-device ufs``) | 86 | struct ThreadPool; |
66 | 87 | struct LinuxAioState; | |
67 | All these devices are documented in :doc:`index`. | 88 | |
68 | 89 | @@ -XXX,XX +XXX,XX @@ struct AioContext { | |
69 | diff --git a/meson.build b/meson.build | 90 | bool notified; |
91 | EventNotifier notifier; | ||
92 | |||
93 | + QSLIST_HEAD(, Coroutine) scheduled_coroutines; | ||
94 | + QEMUBH *co_schedule_bh; | ||
95 | + | ||
96 | /* Thread pool for performing work and receiving completion callbacks. | ||
97 | * Has its own locking. | ||
98 | */ | ||
99 | @@ -XXX,XX +XXX,XX @@ static inline bool aio_node_check(AioContext *ctx, bool is_external) | ||
100 | } | ||
101 | |||
102 | /** | ||
103 | + * aio_co_schedule: | ||
104 | + * @ctx: the aio context | ||
105 | + * @co: the coroutine | ||
106 | + * | ||
107 | + * Start a coroutine on a remote AioContext. | ||
108 | + * | ||
109 | + * The coroutine must not be entered by anyone else while aio_co_schedule() | ||
110 | + * is active. In addition the coroutine must have yielded unless ctx | ||
111 | + * is the context in which the coroutine is running (i.e. the value of | ||
112 | + * qemu_get_current_aio_context() from the coroutine itself). | ||
113 | + */ | ||
114 | +void aio_co_schedule(AioContext *ctx, struct Coroutine *co); | ||
115 | + | ||
116 | +/** | ||
117 | + * aio_co_wake: | ||
118 | + * @co: the coroutine | ||
119 | + * | ||
120 | + * Restart a coroutine on the AioContext where it was running last, thus | ||
121 | + * preventing coroutines from jumping from one context to another when they | ||
122 | + * go to sleep. | ||
123 | + * | ||
124 | + * aio_co_wake may be executed either in coroutine or non-coroutine | ||
125 | + * context. The coroutine must not be entered by anyone else while | ||
126 | + * aio_co_wake() is active. | ||
127 | + */ | ||
128 | +void aio_co_wake(struct Coroutine *co); | ||
129 | + | ||
130 | +/** | ||
131 | * Return the AioContext whose event loop runs in the current thread. | ||
132 | * | ||
133 | * If called from an IOThread this will be the IOThread's AioContext. If | ||
134 | diff --git a/include/qemu/coroutine_int.h b/include/qemu/coroutine_int.h | ||
70 | index XXXXXXX..XXXXXXX 100644 | 135 | index XXXXXXX..XXXXXXX 100644 |
71 | --- a/meson.build | 136 | --- a/include/qemu/coroutine_int.h |
72 | +++ b/meson.build | 137 | +++ b/include/qemu/coroutine_int.h |
73 | @@ -XXX,XX +XXX,XX @@ if have_system | 138 | @@ -XXX,XX +XXX,XX @@ struct Coroutine { |
74 | 'hw/ssi', | 139 | CoroutineEntry *entry; |
75 | 'hw/timer', | 140 | void *entry_arg; |
76 | 'hw/tpm', | 141 | Coroutine *caller; |
77 | + 'hw/ufs', | 142 | + |
78 | 'hw/usb', | 143 | + /* Only used when the coroutine has terminated. */ |
79 | 'hw/vfio', | 144 | QSLIST_ENTRY(Coroutine) pool_next; |
80 | 'hw/virtio', | 145 | + |
81 | diff --git a/hw/ufs/trace.h b/hw/ufs/trace.h | 146 | size_t locks_held; |
147 | |||
148 | - /* Coroutines that should be woken up when we yield or terminate */ | ||
149 | + /* Coroutines that should be woken up when we yield or terminate. | ||
150 | + * Only used when the coroutine is running. | ||
151 | + */ | ||
152 | QSIMPLEQ_HEAD(, Coroutine) co_queue_wakeup; | ||
153 | + | ||
154 | + /* Only used when the coroutine has yielded. */ | ||
155 | + AioContext *ctx; | ||
156 | QSIMPLEQ_ENTRY(Coroutine) co_queue_next; | ||
157 | + QSLIST_ENTRY(Coroutine) co_scheduled_next; | ||
158 | }; | ||
159 | |||
160 | Coroutine *qemu_coroutine_new(void); | ||
161 | diff --git a/tests/iothread.h b/tests/iothread.h | ||
82 | new file mode 100644 | 162 | new file mode 100644 |
83 | index XXXXXXX..XXXXXXX | 163 | index XXXXXXX..XXXXXXX |
84 | --- /dev/null | 164 | --- /dev/null |
85 | +++ b/hw/ufs/trace.h | 165 | +++ b/tests/iothread.h |
86 | @@ -0,0 +1 @@ | 166 | @@ -XXX,XX +XXX,XX @@ |
87 | +#include "trace/trace-hw_ufs.h" | 167 | +/* |
88 | diff --git a/hw/ufs/ufs.h b/hw/ufs/ufs.h | 168 | + * Event loop thread implementation for unit tests |
169 | + * | ||
170 | + * Copyright Red Hat Inc., 2013, 2016 | ||
171 | + * | ||
172 | + * Authors: | ||
173 | + * Stefan Hajnoczi <stefanha@redhat.com> | ||
174 | + * Paolo Bonzini <pbonzini@redhat.com> | ||
175 | + * | ||
176 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. | ||
177 | + * See the COPYING file in the top-level directory. | ||
178 | + */ | ||
179 | +#ifndef TEST_IOTHREAD_H | ||
180 | +#define TEST_IOTHREAD_H | ||
181 | + | ||
182 | +#include "block/aio.h" | ||
183 | +#include "qemu/thread.h" | ||
184 | + | ||
185 | +typedef struct IOThread IOThread; | ||
186 | + | ||
187 | +IOThread *iothread_new(void); | ||
188 | +void iothread_join(IOThread *iothread); | ||
189 | +AioContext *iothread_get_aio_context(IOThread *iothread); | ||
190 | + | ||
191 | +#endif | ||
192 | diff --git a/tests/iothread.c b/tests/iothread.c | ||
89 | new file mode 100644 | 193 | new file mode 100644 |
90 | index XXXXXXX..XXXXXXX | 194 | index XXXXXXX..XXXXXXX |
91 | --- /dev/null | 195 | --- /dev/null |
92 | +++ b/hw/ufs/ufs.h | 196 | +++ b/tests/iothread.c |
93 | @@ -XXX,XX +XXX,XX @@ | 197 | @@ -XXX,XX +XXX,XX @@ |
94 | +/* | 198 | +/* |
95 | + * QEMU UFS | 199 | + * Event loop thread implementation for unit tests |
96 | + * | 200 | + * |
97 | + * Copyright (c) 2023 Samsung Electronics Co., Ltd. All rights reserved. | 201 | + * Copyright Red Hat Inc., 2013, 2016 |
98 | + * | 202 | + * |
99 | + * Written by Jeuk Kim <jeuk20.kim@samsung.com> | 203 | + * Authors: |
100 | + * | 204 | + * Stefan Hajnoczi <stefanha@redhat.com> |
101 | + * SPDX-License-Identifier: GPL-2.0-or-later | 205 | + * Paolo Bonzini <pbonzini@redhat.com> |
206 | + * | ||
207 | + * This work is licensed under the terms of the GNU GPL, version 2 or later. | ||
208 | + * See the COPYING file in the top-level directory. | ||
209 | + * | ||
102 | + */ | 210 | + */ |
103 | + | 211 | + |
104 | +#ifndef HW_UFS_UFS_H | 212 | +#include "qemu/osdep.h" |
105 | +#define HW_UFS_UFS_H | 213 | +#include "qapi/error.h" |
106 | + | 214 | +#include "block/aio.h" |
107 | +#include "hw/pci/pci_device.h" | 215 | +#include "qemu/main-loop.h" |
108 | +#include "hw/scsi/scsi.h" | 216 | +#include "qemu/rcu.h" |
109 | +#include "block/ufs.h" | 217 | +#include "iothread.h" |
110 | + | 218 | + |
111 | +#define UFS_MAX_LUS 32 | 219 | +struct IOThread { |
112 | +#define UFS_BLOCK_SIZE 4096 | 220 | + AioContext *ctx; |
113 | + | 221 | + |
114 | +typedef struct UfsParams { | 222 | + QemuThread thread; |
115 | + char *serial; | 223 | + QemuMutex init_done_lock; |
116 | + uint8_t nutrs; /* Number of UTP Transfer Request Slots */ | 224 | + QemuCond init_done_cond; /* is thread initialization done? */ |
117 | + uint8_t nutmrs; /* Number of UTP Task Management Request Slots */ | 225 | + bool stopping; |
118 | +} UfsParams; | 226 | +}; |
119 | + | 227 | + |
120 | +typedef struct UfsHc { | 228 | +static __thread IOThread *my_iothread; |
121 | + PCIDevice parent_obj; | 229 | + |
122 | + MemoryRegion iomem; | 230 | +AioContext *qemu_get_current_aio_context(void) |
123 | + UfsReg reg; | 231 | +{ |
124 | + UfsParams params; | 232 | + return my_iothread ? my_iothread->ctx : qemu_get_aio_context(); |
125 | + uint32_t reg_size; | 233 | +} |
126 | + | 234 | + |
127 | + qemu_irq irq; | 235 | +static void *iothread_run(void *opaque) |
128 | + QEMUBH *doorbell_bh; | 236 | +{ |
129 | + QEMUBH *complete_bh; | 237 | + IOThread *iothread = opaque; |
130 | +} UfsHc; | 238 | + |
131 | + | 239 | + rcu_register_thread(); |
132 | +#define TYPE_UFS "ufs" | 240 | + |
133 | +#define UFS(obj) OBJECT_CHECK(UfsHc, (obj), TYPE_UFS) | 241 | + my_iothread = iothread; |
134 | + | 242 | + qemu_mutex_lock(&iothread->init_done_lock); |
135 | +#endif /* HW_UFS_UFS_H */ | 243 | + iothread->ctx = aio_context_new(&error_abort); |
136 | diff --git a/include/block/ufs.h b/include/block/ufs.h | 244 | + qemu_cond_signal(&iothread->init_done_cond); |
245 | + qemu_mutex_unlock(&iothread->init_done_lock); | ||
246 | + | ||
247 | + while (!atomic_read(&iothread->stopping)) { | ||
248 | + aio_poll(iothread->ctx, true); | ||
249 | + } | ||
250 | + | ||
251 | + rcu_unregister_thread(); | ||
252 | + return NULL; | ||
253 | +} | ||
254 | + | ||
255 | +void iothread_join(IOThread *iothread) | ||
256 | +{ | ||
257 | + iothread->stopping = true; | ||
258 | + aio_notify(iothread->ctx); | ||
259 | + qemu_thread_join(&iothread->thread); | ||
260 | + qemu_cond_destroy(&iothread->init_done_cond); | ||
261 | + qemu_mutex_destroy(&iothread->init_done_lock); | ||
262 | + aio_context_unref(iothread->ctx); | ||
263 | + g_free(iothread); | ||
264 | +} | ||
265 | + | ||
266 | +IOThread *iothread_new(void) | ||
267 | +{ | ||
268 | + IOThread *iothread = g_new0(IOThread, 1); | ||
269 | + | ||
270 | + qemu_mutex_init(&iothread->init_done_lock); | ||
271 | + qemu_cond_init(&iothread->init_done_cond); | ||
272 | + qemu_thread_create(&iothread->thread, NULL, iothread_run, | ||
273 | + iothread, QEMU_THREAD_JOINABLE); | ||
274 | + | ||
275 | + /* Wait for initialization to complete */ | ||
276 | + qemu_mutex_lock(&iothread->init_done_lock); | ||
277 | + while (iothread->ctx == NULL) { | ||
278 | + qemu_cond_wait(&iothread->init_done_cond, | ||
279 | + &iothread->init_done_lock); | ||
280 | + } | ||
281 | + qemu_mutex_unlock(&iothread->init_done_lock); | ||
282 | + return iothread; | ||
283 | +} | ||
284 | + | ||
285 | +AioContext *iothread_get_aio_context(IOThread *iothread) | ||
286 | +{ | ||
287 | + return iothread->ctx; | ||
288 | +} | ||
289 | diff --git a/tests/test-aio-multithread.c b/tests/test-aio-multithread.c | ||
137 | new file mode 100644 | 290 | new file mode 100644 |
138 | index XXXXXXX..XXXXXXX | 291 | index XXXXXXX..XXXXXXX |
139 | --- /dev/null | 292 | --- /dev/null |
140 | +++ b/include/block/ufs.h | 293 | +++ b/tests/test-aio-multithread.c |
141 | @@ -XXX,XX +XXX,XX @@ | ||
142 | +/* SPDX-License-Identifier: GPL-2.0-or-later */ | ||
143 | + | ||
144 | +#ifndef BLOCK_UFS_H | ||
145 | +#define BLOCK_UFS_H | ||
146 | + | ||
147 | +#include "hw/registerfields.h" | ||
148 | + | ||
149 | +typedef struct QEMU_PACKED UfsReg { | ||
150 | + uint32_t cap; | ||
151 | + uint32_t rsvd0; | ||
152 | + uint32_t ver; | ||
153 | + uint32_t rsvd1; | ||
154 | + uint32_t hcpid; | ||
155 | + uint32_t hcmid; | ||
156 | + uint32_t ahit; | ||
157 | + uint32_t rsvd2; | ||
158 | + uint32_t is; | ||
159 | + uint32_t ie; | ||
160 | + uint32_t rsvd3[2]; | ||
161 | + uint32_t hcs; | ||
162 | + uint32_t hce; | ||
163 | + uint32_t uecpa; | ||
164 | + uint32_t uecdl; | ||
165 | + uint32_t uecn; | ||
166 | + uint32_t uect; | ||
167 | + uint32_t uecdme; | ||
168 | + uint32_t utriacr; | ||
169 | + uint32_t utrlba; | ||
170 | + uint32_t utrlbau; | ||
171 | + uint32_t utrldbr; | ||
172 | + uint32_t utrlclr; | ||
173 | + uint32_t utrlrsr; | ||
174 | + uint32_t utrlcnr; | ||
175 | + uint32_t rsvd4[2]; | ||
176 | + uint32_t utmrlba; | ||
177 | + uint32_t utmrlbau; | ||
178 | + uint32_t utmrldbr; | ||
179 | + uint32_t utmrlclr; | ||
180 | + uint32_t utmrlrsr; | ||
181 | + uint32_t rsvd5[3]; | ||
182 | + uint32_t uiccmd; | ||
183 | + uint32_t ucmdarg1; | ||
184 | + uint32_t ucmdarg2; | ||
185 | + uint32_t ucmdarg3; | ||
186 | + uint32_t rsvd6[4]; | ||
187 | + uint32_t rsvd7[4]; | ||
188 | + uint32_t rsvd8[16]; | ||
189 | + uint32_t ccap; | ||
190 | +} UfsReg; | ||
191 | + | ||
192 | +REG32(CAP, offsetof(UfsReg, cap)) | ||
193 | + FIELD(CAP, NUTRS, 0, 5) | ||
194 | + FIELD(CAP, RTT, 8, 8) | ||
195 | + FIELD(CAP, NUTMRS, 16, 3) | ||
196 | + FIELD(CAP, AUTOH8, 23, 1) | ||
197 | + FIELD(CAP, 64AS, 24, 1) | ||
198 | + FIELD(CAP, OODDS, 25, 1) | ||
199 | + FIELD(CAP, UICDMETMS, 26, 1) | ||
200 | + FIELD(CAP, CS, 28, 1) | ||
201 | +REG32(VER, offsetof(UfsReg, ver)) | ||
202 | +REG32(HCPID, offsetof(UfsReg, hcpid)) | ||
203 | +REG32(HCMID, offsetof(UfsReg, hcmid)) | ||
204 | +REG32(AHIT, offsetof(UfsReg, ahit)) | ||
205 | +REG32(IS, offsetof(UfsReg, is)) | ||
206 | + FIELD(IS, UTRCS, 0, 1) | ||
207 | + FIELD(IS, UDEPRI, 1, 1) | ||
208 | + FIELD(IS, UE, 2, 1) | ||
209 | + FIELD(IS, UTMS, 3, 1) | ||
210 | + FIELD(IS, UPMS, 4, 1) | ||
211 | + FIELD(IS, UHXS, 5, 1) | ||
212 | + FIELD(IS, UHES, 6, 1) | ||
213 | + FIELD(IS, ULLS, 7, 1) | ||
214 | + FIELD(IS, ULSS, 8, 1) | ||
215 | + FIELD(IS, UTMRCS, 9, 1) | ||
216 | + FIELD(IS, UCCS, 10, 1) | ||
217 | + FIELD(IS, DFES, 11, 1) | ||
218 | + FIELD(IS, UTPES, 12, 1) | ||
219 | + FIELD(IS, HCFES, 16, 1) | ||
220 | + FIELD(IS, SBFES, 17, 1) | ||
221 | + FIELD(IS, CEFES, 18, 1) | ||
222 | +REG32(IE, offsetof(UfsReg, ie)) | ||
223 | + FIELD(IE, UTRCE, 0, 1) | ||
224 | + FIELD(IE, UDEPRIE, 1, 1) | ||
225 | + FIELD(IE, UEE, 2, 1) | ||
226 | + FIELD(IE, UTMSE, 3, 1) | ||
227 | + FIELD(IE, UPMSE, 4, 1) | ||
228 | + FIELD(IE, UHXSE, 5, 1) | ||
229 | + FIELD(IE, UHESE, 6, 1) | ||
230 | + FIELD(IE, ULLSE, 7, 1) | ||
231 | + FIELD(IE, ULSSE, 8, 1) | ||
232 | + FIELD(IE, UTMRCE, 9, 1) | ||
233 | + FIELD(IE, UCCE, 10, 1) | ||
234 | + FIELD(IE, DFEE, 11, 1) | ||
235 | + FIELD(IE, UTPEE, 12, 1) | ||
236 | + FIELD(IE, HCFEE, 16, 1) | ||
237 | + FIELD(IE, SBFEE, 17, 1) | ||
238 | + FIELD(IE, CEFEE, 18, 1) | ||
239 | +REG32(HCS, offsetof(UfsReg, hcs)) | ||
240 | + FIELD(HCS, DP, 0, 1) | ||
241 | + FIELD(HCS, UTRLRDY, 1, 1) | ||
242 | + FIELD(HCS, UTMRLRDY, 2, 1) | ||
243 | + FIELD(HCS, UCRDY, 3, 1) | ||
244 | + FIELD(HCS, UPMCRS, 8, 3) | ||
245 | +REG32(HCE, offsetof(UfsReg, hce)) | ||
246 | + FIELD(HCE, HCE, 0, 1) | ||
247 | + FIELD(HCE, CGE, 1, 1) | ||
248 | +REG32(UECPA, offsetof(UfsReg, uecpa)) | ||
249 | +REG32(UECDL, offsetof(UfsReg, uecdl)) | ||
250 | +REG32(UECN, offsetof(UfsReg, uecn)) | ||
251 | +REG32(UECT, offsetof(UfsReg, uect)) | ||
252 | +REG32(UECDME, offsetof(UfsReg, uecdme)) | ||
253 | +REG32(UTRIACR, offsetof(UfsReg, utriacr)) | ||
254 | +REG32(UTRLBA, offsetof(UfsReg, utrlba)) | ||
255 | + FIELD(UTRLBA, UTRLBA, 9, 22) | ||
256 | +REG32(UTRLBAU, offsetof(UfsReg, utrlbau)) | ||
257 | +REG32(UTRLDBR, offsetof(UfsReg, utrldbr)) | ||
258 | +REG32(UTRLCLR, offsetof(UfsReg, utrlclr)) | ||
259 | +REG32(UTRLRSR, offsetof(UfsReg, utrlrsr)) | ||
260 | +REG32(UTRLCNR, offsetof(UfsReg, utrlcnr)) | ||
261 | +REG32(UTMRLBA, offsetof(UfsReg, utmrlba)) | ||
262 | + FIELD(UTMRLBA, UTMRLBA, 9, 22) | ||
263 | +REG32(UTMRLBAU, offsetof(UfsReg, utmrlbau)) | ||
264 | +REG32(UTMRLDBR, offsetof(UfsReg, utmrldbr)) | ||
265 | +REG32(UTMRLCLR, offsetof(UfsReg, utmrlclr)) | ||
266 | +REG32(UTMRLRSR, offsetof(UfsReg, utmrlrsr)) | ||
267 | +REG32(UICCMD, offsetof(UfsReg, uiccmd)) | ||
268 | +REG32(UCMDARG1, offsetof(UfsReg, ucmdarg1)) | ||
269 | +REG32(UCMDARG2, offsetof(UfsReg, ucmdarg2)) | ||
270 | +REG32(UCMDARG3, offsetof(UfsReg, ucmdarg3)) | ||
271 | +REG32(CCAP, offsetof(UfsReg, ccap)) | ||
272 | + | ||
273 | +#define UFS_INTR_MASK \ | ||
274 | + ((1 << R_IS_CEFES_SHIFT) | (1 << R_IS_SBFES_SHIFT) | \ | ||
275 | + (1 << R_IS_HCFES_SHIFT) | (1 << R_IS_UTPES_SHIFT) | \ | ||
276 | + (1 << R_IS_DFES_SHIFT) | (1 << R_IS_UCCS_SHIFT) | \ | ||
277 | + (1 << R_IS_UTMRCS_SHIFT) | (1 << R_IS_ULSS_SHIFT) | \ | ||
278 | + (1 << R_IS_ULLS_SHIFT) | (1 << R_IS_UHES_SHIFT) | \ | ||
279 | + (1 << R_IS_UHXS_SHIFT) | (1 << R_IS_UPMS_SHIFT) | \ | ||
280 | + (1 << R_IS_UTMS_SHIFT) | (1 << R_IS_UE_SHIFT) | \ | ||
281 | + (1 << R_IS_UDEPRI_SHIFT) | (1 << R_IS_UTRCS_SHIFT)) | ||
282 | + | ||
283 | +#define UFS_UPIU_HEADER_TRANSACTION_TYPE_SHIFT 24 | ||
284 | +#define UFS_UPIU_HEADER_TRANSACTION_TYPE_MASK 0xff | ||
285 | +#define UFS_UPIU_HEADER_TRANSACTION_TYPE(dword0) \ | ||
286 | + ((be32_to_cpu(dword0) >> UFS_UPIU_HEADER_TRANSACTION_TYPE_SHIFT) & \ | ||
287 | + UFS_UPIU_HEADER_TRANSACTION_TYPE_MASK) | ||
288 | + | ||
289 | +#define UFS_UPIU_HEADER_QUERY_FUNC_SHIFT 16 | ||
290 | +#define UFS_UPIU_HEADER_QUERY_FUNC_MASK 0xff | ||
291 | +#define UFS_UPIU_HEADER_QUERY_FUNC(dword1) \ | ||
292 | + ((be32_to_cpu(dword1) >> UFS_UPIU_HEADER_QUERY_FUNC_SHIFT) & \ | ||
293 | + UFS_UPIU_HEADER_QUERY_FUNC_MASK) | ||
294 | + | ||
295 | +#define UFS_UPIU_HEADER_DATA_SEGMENT_LENGTH_SHIFT 0 | ||
296 | +#define UFS_UPIU_HEADER_DATA_SEGMENT_LENGTH_MASK 0xffff | ||
297 | +#define UFS_UPIU_HEADER_DATA_SEGMENT_LENGTH(dword2) \ | ||
298 | + ((be32_to_cpu(dword2) >> UFS_UPIU_HEADER_DATA_SEGMENT_LENGTH_SHIFT) & \ | ||
299 | + UFS_UPIU_HEADER_DATA_SEGMENT_LENGTH_MASK) | ||
300 | + | ||
301 | +typedef struct QEMU_PACKED DeviceDescriptor { | ||
302 | + uint8_t length; | ||
303 | + uint8_t descriptor_idn; | ||
304 | + uint8_t device; | ||
305 | + uint8_t device_class; | ||
306 | + uint8_t device_sub_class; | ||
307 | + uint8_t protocol; | ||
308 | + uint8_t number_lu; | ||
309 | + uint8_t number_wlu; | ||
310 | + uint8_t boot_enable; | ||
311 | + uint8_t descr_access_en; | ||
312 | + uint8_t init_power_mode; | ||
313 | + uint8_t high_priority_lun; | ||
314 | + uint8_t secure_removal_type; | ||
315 | + uint8_t security_lu; | ||
316 | + uint8_t background_ops_term_lat; | ||
317 | + uint8_t init_active_icc_level; | ||
318 | + uint16_t spec_version; | ||
319 | + uint16_t manufacture_date; | ||
320 | + uint8_t manufacturer_name; | ||
321 | + uint8_t product_name; | ||
322 | + uint8_t serial_number; | ||
323 | + uint8_t oem_id; | ||
324 | + uint16_t manufacturer_id; | ||
325 | + uint8_t ud_0_base_offset; | ||
326 | + uint8_t ud_config_p_length; | ||
327 | + uint8_t device_rtt_cap; | ||
328 | + uint16_t periodic_rtc_update; | ||
329 | + uint8_t ufs_features_support; | ||
330 | + uint8_t ffu_timeout; | ||
331 | + uint8_t queue_depth; | ||
332 | + uint16_t device_version; | ||
333 | + uint8_t num_secure_wp_area; | ||
334 | + uint32_t psa_max_data_size; | ||
335 | + uint8_t psa_state_timeout; | ||
336 | + uint8_t product_revision_level; | ||
337 | + uint8_t reserved[36]; | ||
338 | + uint32_t extended_ufs_features_support; | ||
339 | + uint8_t write_booster_buffer_preserve_user_space_en; | ||
340 | + uint8_t write_booster_buffer_type; | ||
341 | + uint32_t num_shared_write_booster_buffer_alloc_units; | ||
342 | +} DeviceDescriptor; | ||
343 | + | ||
344 | +typedef struct QEMU_PACKED GeometryDescriptor { | ||
345 | + uint8_t length; | ||
346 | + uint8_t descriptor_idn; | ||
347 | + uint8_t media_technology; | ||
348 | + uint8_t reserved; | ||
349 | + uint64_t total_raw_device_capacity; | ||
350 | + uint8_t max_number_lu; | ||
351 | + uint32_t segment_size; | ||
352 | + uint8_t allocation_unit_size; | ||
353 | + uint8_t min_addr_block_size; | ||
354 | + uint8_t optimal_read_block_size; | ||
355 | + uint8_t optimal_write_block_size; | ||
356 | + uint8_t max_in_buffer_size; | ||
357 | + uint8_t max_out_buffer_size; | ||
358 | + uint8_t rpmb_read_write_size; | ||
359 | + uint8_t dynamic_capacity_resource_policy; | ||
360 | + uint8_t data_ordering; | ||
361 | + uint8_t max_context_id_number; | ||
362 | + uint8_t sys_data_tag_unit_size; | ||
363 | + uint8_t sys_data_tag_res_size; | ||
364 | + uint8_t supported_sec_r_types; | ||
365 | + uint16_t supported_memory_types; | ||
366 | + uint32_t system_code_max_n_alloc_u; | ||
367 | + uint16_t system_code_cap_adj_fac; | ||
368 | + uint32_t non_persist_max_n_alloc_u; | ||
369 | + uint16_t non_persist_cap_adj_fac; | ||
370 | + uint32_t enhanced_1_max_n_alloc_u; | ||
371 | + uint16_t enhanced_1_cap_adj_fac; | ||
372 | + uint32_t enhanced_2_max_n_alloc_u; | ||
373 | + uint16_t enhanced_2_cap_adj_fac; | ||
374 | + uint32_t enhanced_3_max_n_alloc_u; | ||
375 | + uint16_t enhanced_3_cap_adj_fac; | ||
376 | + uint32_t enhanced_4_max_n_alloc_u; | ||
377 | + uint16_t enhanced_4_cap_adj_fac; | ||
378 | + uint32_t optimal_logical_block_size; | ||
379 | + uint8_t reserved2[7]; | ||
380 | + uint32_t write_booster_buffer_max_n_alloc_units; | ||
381 | + uint8_t device_max_write_booster_l_us; | ||
382 | + uint8_t write_booster_buffer_cap_adj_fac; | ||
383 | + uint8_t supported_write_booster_buffer_user_space_reduction_types; | ||
384 | + uint8_t supported_write_booster_buffer_types; | ||
385 | +} GeometryDescriptor; | ||
386 | + | ||
387 | +#define UFS_GEOMETRY_CAPACITY_SHIFT 9 | ||
388 | + | ||
389 | +typedef struct QEMU_PACKED UnitDescriptor { | ||
390 | + uint8_t length; | ||
391 | + uint8_t descriptor_idn; | ||
392 | + uint8_t unit_index; | ||
393 | + uint8_t lu_enable; | ||
394 | + uint8_t boot_lun_id; | ||
395 | + uint8_t lu_write_protect; | ||
396 | + uint8_t lu_queue_depth; | ||
397 | + uint8_t psa_sensitive; | ||
398 | + uint8_t memory_type; | ||
399 | + uint8_t data_reliability; | ||
400 | + uint8_t logical_block_size; | ||
401 | + uint64_t logical_block_count; | ||
402 | + uint32_t erase_block_size; | ||
403 | + uint8_t provisioning_type; | ||
404 | + uint64_t phy_mem_resource_count; | ||
405 | + uint16_t context_capabilities; | ||
406 | + uint8_t large_unit_granularity_m1; | ||
407 | + uint8_t reserved[6]; | ||
408 | + uint32_t lu_num_write_booster_buffer_alloc_units; | ||
409 | +} UnitDescriptor; | ||
410 | + | ||
411 | +typedef struct QEMU_PACKED RpmbUnitDescriptor { | ||
412 | + uint8_t length; | ||
413 | + uint8_t descriptor_idn; | ||
414 | + uint8_t unit_index; | ||
415 | + uint8_t lu_enable; | ||
416 | + uint8_t boot_lun_id; | ||
417 | + uint8_t lu_write_protect; | ||
418 | + uint8_t lu_queue_depth; | ||
419 | + uint8_t psa_sensitive; | ||
420 | + uint8_t memory_type; | ||
421 | + uint8_t reserved; | ||
422 | + uint8_t logical_block_size; | ||
423 | + uint64_t logical_block_count; | ||
424 | + uint32_t erase_block_size; | ||
425 | + uint8_t provisioning_type; | ||
426 | + uint64_t phy_mem_resource_count; | ||
427 | + uint8_t reserved2[3]; | ||
428 | +} RpmbUnitDescriptor; | ||
429 | + | ||
430 | +typedef struct QEMU_PACKED PowerParametersDescriptor { | ||
431 | + uint8_t length; | ||
432 | + uint8_t descriptor_idn; | ||
433 | + uint16_t active_icc_levels_vcc[16]; | ||
434 | + uint16_t active_icc_levels_vccq[16]; | ||
435 | + uint16_t active_icc_levels_vccq_2[16]; | ||
436 | +} PowerParametersDescriptor; | ||
437 | + | ||
438 | +typedef struct QEMU_PACKED InterconnectDescriptor { | ||
439 | + uint8_t length; | ||
440 | + uint8_t descriptor_idn; | ||
441 | + uint16_t bcd_unipro_version; | ||
442 | + uint16_t bcd_mphy_version; | ||
443 | +} InterconnectDescriptor; | ||
444 | + | ||
445 | +typedef struct QEMU_PACKED StringDescriptor { | ||
446 | + uint8_t length; | ||
447 | + uint8_t descriptor_idn; | ||
448 | + uint16_t UC[126]; | ||
449 | +} StringDescriptor; | ||
450 | + | ||
451 | +typedef struct QEMU_PACKED DeviceHealthDescriptor { | ||
452 | + uint8_t length; | ||
453 | + uint8_t descriptor_idn; | ||
454 | + uint8_t pre_eol_info; | ||
455 | + uint8_t device_life_time_est_a; | ||
456 | + uint8_t device_life_time_est_b; | ||
457 | + uint8_t vendor_prop_info[32]; | ||
458 | + uint32_t refresh_total_count; | ||
459 | + uint32_t refresh_progress; | ||
460 | +} DeviceHealthDescriptor; | ||
461 | + | ||
462 | +typedef struct QEMU_PACKED Flags { | ||
463 | + uint8_t reserved; | ||
464 | + uint8_t device_init; | ||
465 | + uint8_t permanent_wp_en; | ||
466 | + uint8_t power_on_wp_en; | ||
467 | + uint8_t background_ops_en; | ||
468 | + uint8_t device_life_span_mode_en; | ||
469 | + uint8_t purge_enable; | ||
470 | + uint8_t refresh_enable; | ||
471 | + uint8_t phy_resource_removal; | ||
472 | + uint8_t busy_rtc; | ||
473 | + uint8_t reserved2; | ||
474 | + uint8_t permanently_disable_fw_update; | ||
475 | + uint8_t reserved3[2]; | ||
476 | + uint8_t wb_en; | ||
477 | + uint8_t wb_buffer_flush_en; | ||
478 | + uint8_t wb_buffer_flush_during_hibernate; | ||
479 | + uint8_t reserved4[2]; | ||
480 | +} Flags; | ||
481 | + | ||
482 | +typedef struct Attributes { | ||
483 | + uint8_t boot_lun_en; | ||
484 | + uint8_t reserved; | ||
485 | + uint8_t current_power_mode; | ||
486 | + uint8_t active_icc_level; | ||
487 | + uint8_t out_of_order_data_en; | ||
488 | + uint8_t background_op_status; | ||
489 | + uint8_t purge_status; | ||
490 | + uint8_t max_data_in_size; | ||
491 | + uint8_t max_data_out_size; | ||
492 | + uint32_t dyn_cap_needed; | ||
493 | + uint8_t ref_clk_freq; | ||
494 | + uint8_t config_descr_lock; | ||
495 | + uint8_t max_num_of_rtt; | ||
496 | + uint16_t exception_event_control; | ||
497 | + uint16_t exception_event_status; | ||
498 | + uint32_t seconds_passed; | ||
499 | + uint16_t context_conf; | ||
500 | + uint8_t device_ffu_status; | ||
501 | + uint8_t psa_state; | ||
502 | + uint32_t psa_data_size; | ||
503 | + uint8_t ref_clk_gating_wait_time; | ||
504 | + uint8_t device_case_rough_temperaure; | ||
505 | + uint8_t device_too_high_temp_boundary; | ||
506 | + uint8_t device_too_low_temp_boundary; | ||
507 | + uint8_t throttling_status; | ||
508 | + uint8_t wb_buffer_flush_status; | ||
509 | + uint8_t available_wb_buffer_size; | ||
510 | + uint8_t wb_buffer_life_time_est; | ||
511 | + uint32_t current_wb_buffer_size; | ||
512 | + uint8_t refresh_status; | ||
513 | + uint8_t refresh_freq; | ||
514 | + uint8_t refresh_unit; | ||
515 | + uint8_t refresh_method; | ||
516 | +} Attributes; | ||
517 | + | ||
518 | +#define UFS_TRANSACTION_SPECIFIC_FIELD_SIZE 20 | ||
519 | +#define UFS_MAX_QUERY_DATA_SIZE 256 | ||
520 | + | ||
521 | +/* Command response result code */ | ||
522 | +typedef enum CommandRespCode { | ||
523 | + UFS_COMMAND_RESULT_SUCESS = 0x00, | ||
524 | + UFS_COMMAND_RESULT_FAIL = 0x01, | ||
525 | +} CommandRespCode; | ||
526 | + | ||
527 | +enum { | ||
528 | + UFS_UPIU_FLAG_UNDERFLOW = 0x20, | ||
529 | + UFS_UPIU_FLAG_OVERFLOW = 0x40, | ||
530 | +}; | ||
531 | + | ||
532 | +typedef struct QEMU_PACKED UtpUpiuHeader { | ||
533 | + uint8_t trans_type; | ||
534 | + uint8_t flags; | ||
535 | + uint8_t lun; | ||
536 | + uint8_t task_tag; | ||
537 | + uint8_t iid_cmd_set_type; | ||
538 | + uint8_t query_func; | ||
539 | + uint8_t response; | ||
540 | + uint8_t scsi_status; | ||
541 | + uint8_t ehs_len; | ||
542 | + uint8_t device_inf; | ||
543 | + uint16_t data_segment_length; | ||
544 | +} UtpUpiuHeader; | ||
545 | + | ||
546 | +/* | ||
547 | + * The code below is copied from the linux kernel | ||
548 | + * ("include/uapi/scsi/scsi_bsg_ufs.h") and modified to fit the qemu style. | ||
549 | + */ | ||
550 | + | ||
551 | +typedef struct QEMU_PACKED UtpUpiuQuery { | ||
552 | + uint8_t opcode; | ||
553 | + uint8_t idn; | ||
554 | + uint8_t index; | ||
555 | + uint8_t selector; | ||
556 | + uint16_t reserved_osf; | ||
557 | + uint16_t length; | ||
558 | + uint32_t value; | ||
559 | + uint32_t reserved[2]; | ||
560 | + /* EHS length should be 0. We don't have to worry about EHS area. */ | ||
561 | + uint8_t data[UFS_MAX_QUERY_DATA_SIZE]; | ||
562 | +} UtpUpiuQuery; | ||
563 | + | ||
564 | +#define UFS_CDB_SIZE 16 | ||
565 | + | ||
566 | +/* | ||
567 | + * struct UtpUpiuCmd - Command UPIU structure | ||
568 | + * @data_transfer_len: Data Transfer Length DW-3 | ||
569 | + * @cdb: Command Descriptor Block CDB DW-4 to DW-7 | ||
570 | + */ | ||
571 | +typedef struct QEMU_PACKED UtpUpiuCmd { | ||
572 | + uint32_t exp_data_transfer_len; | ||
573 | + uint8_t cdb[UFS_CDB_SIZE]; | ||
574 | +} UtpUpiuCmd; | ||
575 | + | ||
576 | +/* | ||
577 | + * struct UtpUpiuReq - general upiu request structure | ||
578 | + * @header:UPIU header structure DW-0 to DW-2 | ||
579 | + * @sc: fields structure for scsi command DW-3 to DW-7 | ||
580 | + * @qr: fields structure for query request DW-3 to DW-7 | ||
581 | + * @uc: use utp_upiu_query to host the 4 dwords of uic command | ||
582 | + */ | ||
583 | +typedef struct QEMU_PACKED UtpUpiuReq { | ||
584 | + UtpUpiuHeader header; | ||
585 | + union { | ||
586 | + UtpUpiuCmd sc; | ||
587 | + UtpUpiuQuery qr; | ||
588 | + }; | ||
589 | +} UtpUpiuReq; | ||
590 | + | ||
591 | +/* | ||
592 | + * The code below is copied from the linux kernel ("include/ufs/ufshci.h") and | ||
593 | + * modified to fit the qemu style. | ||
594 | + */ | ||
595 | + | ||
596 | +enum { | ||
597 | + UFS_PWR_OK = 0x0, | ||
598 | + UFS_PWR_LOCAL = 0x01, | ||
599 | + UFS_PWR_REMOTE = 0x02, | ||
600 | + UFS_PWR_BUSY = 0x03, | ||
601 | + UFS_PWR_ERROR_CAP = 0x04, | ||
602 | + UFS_PWR_FATAL_ERROR = 0x05, | ||
603 | +}; | ||
604 | + | ||
605 | +/* UIC Commands */ | ||
606 | +enum uic_cmd_dme { | ||
607 | + UFS_UIC_CMD_DME_GET = 0x01, | ||
608 | + UFS_UIC_CMD_DME_SET = 0x02, | ||
609 | + UFS_UIC_CMD_DME_PEER_GET = 0x03, | ||
610 | + UFS_UIC_CMD_DME_PEER_SET = 0x04, | ||
611 | + UFS_UIC_CMD_DME_POWERON = 0x10, | ||
612 | + UFS_UIC_CMD_DME_POWEROFF = 0x11, | ||
613 | + UFS_UIC_CMD_DME_ENABLE = 0x12, | ||
614 | + UFS_UIC_CMD_DME_RESET = 0x14, | ||
615 | + UFS_UIC_CMD_DME_END_PT_RST = 0x15, | ||
616 | + UFS_UIC_CMD_DME_LINK_STARTUP = 0x16, | ||
617 | + UFS_UIC_CMD_DME_HIBER_ENTER = 0x17, | ||
618 | + UFS_UIC_CMD_DME_HIBER_EXIT = 0x18, | ||
619 | + UFS_UIC_CMD_DME_TEST_MODE = 0x1A, | ||
620 | +}; | ||
621 | + | ||
622 | +/* UIC Config result code / Generic error code */ | ||
623 | +enum { | ||
624 | + UFS_UIC_CMD_RESULT_SUCCESS = 0x00, | ||
625 | + UFS_UIC_CMD_RESULT_INVALID_ATTR = 0x01, | ||
626 | + UFS_UIC_CMD_RESULT_FAILURE = 0x01, | ||
627 | + UFS_UIC_CMD_RESULT_INVALID_ATTR_VALUE = 0x02, | ||
628 | + UFS_UIC_CMD_RESULT_READ_ONLY_ATTR = 0x03, | ||
629 | + UFS_UIC_CMD_RESULT_WRITE_ONLY_ATTR = 0x04, | ||
630 | + UFS_UIC_CMD_RESULT_BAD_INDEX = 0x05, | ||
631 | + UFS_UIC_CMD_RESULT_LOCKED_ATTR = 0x06, | ||
632 | + UFS_UIC_CMD_RESULT_BAD_TEST_FEATURE_INDEX = 0x07, | ||
633 | + UFS_UIC_CMD_RESULT_PEER_COMM_FAILURE = 0x08, | ||
634 | + UFS_UIC_CMD_RESULT_BUSY = 0x09, | ||
635 | + UFS_UIC_CMD_RESULT_DME_FAILURE = 0x0A, | ||
636 | +}; | ||
637 | + | ||
638 | +#define UFS_MASK_UIC_COMMAND_RESULT 0xFF | ||
639 | + | ||
640 | +/* | ||
641 | + * Request Descriptor Definitions | ||
642 | + */ | ||
643 | + | ||
644 | +/* Transfer request command type */ | ||
645 | +enum { | ||
646 | + UFS_UTP_CMD_TYPE_SCSI = 0x0, | ||
647 | + UFS_UTP_CMD_TYPE_UFS = 0x1, | ||
648 | + UFS_UTP_CMD_TYPE_DEV_MANAGE = 0x2, | ||
649 | +}; | ||
650 | + | ||
651 | +/* To accommodate UFS2.0 required Command type */ | ||
652 | +enum { | ||
653 | + UFS_UTP_CMD_TYPE_UFS_STORAGE = 0x1, | ||
654 | +}; | ||
655 | + | ||
656 | +enum { | ||
657 | + UFS_UTP_SCSI_COMMAND = 0x00000000, | ||
658 | + UFS_UTP_NATIVE_UFS_COMMAND = 0x10000000, | ||
659 | + UFS_UTP_DEVICE_MANAGEMENT_FUNCTION = 0x20000000, | ||
660 | + UFS_UTP_REQ_DESC_INT_CMD = 0x01000000, | ||
661 | + UFS_UTP_REQ_DESC_CRYPTO_ENABLE_CMD = 0x00800000, | ||
662 | +}; | ||
663 | + | ||
664 | +/* UTP Transfer Request Data Direction (DD) */ | ||
665 | +enum { | ||
666 | + UFS_UTP_NO_DATA_TRANSFER = 0x00000000, | ||
667 | + UFS_UTP_HOST_TO_DEVICE = 0x02000000, | ||
668 | + UFS_UTP_DEVICE_TO_HOST = 0x04000000, | ||
669 | +}; | ||
670 | + | ||
671 | +/* Overall command status values */ | ||
672 | +enum UtpOcsCodes { | ||
673 | + UFS_OCS_SUCCESS = 0x0, | ||
674 | + UFS_OCS_INVALID_CMD_TABLE_ATTR = 0x1, | ||
675 | + UFS_OCS_INVALID_PRDT_ATTR = 0x2, | ||
676 | + UFS_OCS_MISMATCH_DATA_BUF_SIZE = 0x3, | ||
677 | + UFS_OCS_MISMATCH_RESP_UPIU_SIZE = 0x4, | ||
678 | + UFS_OCS_PEER_COMM_FAILURE = 0x5, | ||
679 | + UFS_OCS_ABORTED = 0x6, | ||
680 | + UFS_OCS_FATAL_ERROR = 0x7, | ||
681 | + UFS_OCS_DEVICE_FATAL_ERROR = 0x8, | ||
682 | + UFS_OCS_INVALID_CRYPTO_CONFIG = 0x9, | ||
683 | + UFS_OCS_GENERAL_CRYPTO_ERROR = 0xa, | ||
684 | + UFS_OCS_INVALID_COMMAND_STATUS = 0xf, | ||
685 | +}; | ||
686 | + | ||
687 | +enum { | ||
688 | + UFS_MASK_OCS = 0x0F, | ||
689 | +}; | ||
690 | + | ||
691 | +/* | ||
692 | + * struct UfshcdSgEntry - UFSHCI PRD Entry | ||
693 | + * @addr: Physical address; DW-0 and DW-1. | ||
694 | + * @reserved: Reserved for future use DW-2 | ||
695 | + * @size: size of physical segment DW-3 | ||
696 | + */ | ||
697 | +typedef struct QEMU_PACKED UfshcdSgEntry { | ||
698 | + uint64_t addr; | ||
699 | + uint32_t reserved; | ||
700 | + uint32_t size; | ||
701 | + /* | ||
702 | + * followed by variant-specific fields if | ||
703 | + * CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE has been defined. | ||
704 | + */ | ||
705 | +} UfshcdSgEntry; | ||
706 | + | ||
707 | +/* | ||
708 | + * struct RequestDescHeader - Descriptor Header common to both UTRD and UTMRD | ||
709 | + * @dword0: Descriptor Header DW0 | ||
710 | + * @dword1: Descriptor Header DW1 | ||
711 | + * @dword2: Descriptor Header DW2 | ||
712 | + * @dword3: Descriptor Header DW3 | ||
713 | + */ | ||
714 | +typedef struct QEMU_PACKED RequestDescHeader { | ||
715 | + uint32_t dword_0; | ||
716 | + uint32_t dword_1; | ||
717 | + uint32_t dword_2; | ||
718 | + uint32_t dword_3; | ||
719 | +} RequestDescHeader; | ||
720 | + | ||
721 | +/* | ||
722 | + * struct UtpTransferReqDesc - UTP Transfer Request Descriptor (UTRD) | ||
723 | + * @header: UTRD header DW-0 to DW-3 | ||
724 | + * @command_desc_base_addr_lo: UCD base address low DW-4 | ||
725 | + * @command_desc_base_addr_hi: UCD base address high DW-5 | ||
726 | + * @response_upiu_length: response UPIU length DW-6 | ||
727 | + * @response_upiu_offset: response UPIU offset DW-6 | ||
728 | + * @prd_table_length: Physical region descriptor length DW-7 | ||
729 | + * @prd_table_offset: Physical region descriptor offset DW-7 | ||
730 | + */ | ||
731 | +typedef struct QEMU_PACKED UtpTransferReqDesc { | ||
732 | + /* DW 0-3 */ | ||
733 | + RequestDescHeader header; | ||
734 | + | ||
735 | + /* DW 4-5*/ | ||
736 | + uint32_t command_desc_base_addr_lo; | ||
737 | + uint32_t command_desc_base_addr_hi; | ||
738 | + | ||
739 | + /* DW 6 */ | ||
740 | + uint16_t response_upiu_length; | ||
741 | + uint16_t response_upiu_offset; | ||
742 | + | ||
743 | + /* DW 7 */ | ||
744 | + uint16_t prd_table_length; | ||
745 | + uint16_t prd_table_offset; | ||
746 | +} UtpTransferReqDesc; | ||
747 | + | ||
748 | +/* | ||
749 | + * UTMRD structure. | ||
750 | + */ | ||
751 | +typedef struct QEMU_PACKED UtpTaskReqDesc { | ||
752 | + /* DW 0-3 */ | ||
753 | + RequestDescHeader header; | ||
754 | + | ||
755 | + /* DW 4-11 - Task request UPIU structure */ | ||
756 | + struct { | ||
757 | + UtpUpiuHeader req_header; | ||
758 | + uint32_t input_param1; | ||
759 | + uint32_t input_param2; | ||
760 | + uint32_t input_param3; | ||
761 | + uint32_t reserved1[2]; | ||
762 | + } upiu_req; | ||
763 | + | ||
764 | + /* DW 12-19 - Task Management Response UPIU structure */ | ||
765 | + struct { | ||
766 | + UtpUpiuHeader rsp_header; | ||
767 | + uint32_t output_param1; | ||
768 | + uint32_t output_param2; | ||
769 | + uint32_t reserved2[3]; | ||
770 | + } upiu_rsp; | ||
771 | +} UtpTaskReqDesc; | ||
772 | + | ||
773 | +/* | ||
774 | + * The code below is copied from the linux kernel ("include/ufs/ufs.h") and | ||
775 | + * modified to fit the qemu style. | ||
776 | + */ | ||
777 | + | ||
778 | +#define UFS_GENERAL_UPIU_REQUEST_SIZE (sizeof(UtpUpiuReq)) | ||
779 | +#define UFS_QUERY_DESC_MAX_SIZE 255 | ||
780 | +#define UFS_QUERY_DESC_MIN_SIZE 2 | ||
781 | +#define UFS_QUERY_DESC_HDR_SIZE 2 | ||
782 | +#define UFS_QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - (sizeof(UtpUpiuHeader))) | ||
783 | +#define UFS_SENSE_SIZE 18 | ||
784 | + | ||
785 | +/* | ||
786 | + * UFS device may have standard LUs and LUN id could be from 0x00 to | ||
787 | + * 0x7F. Standard LUs use "Peripheral Device Addressing Format". | ||
788 | + * UFS device may also have the Well Known LUs (also referred as W-LU) | ||
789 | + * which again could be from 0x00 to 0x7F. For W-LUs, device only use | ||
790 | + * the "Extended Addressing Format" which means the W-LUNs would be | ||
791 | + * from 0xc100 (SCSI_W_LUN_BASE) onwards. | ||
792 | + * This means max. LUN number reported from UFS device could be 0xC17F. | ||
793 | + */ | ||
794 | +#define UFS_UPIU_MAX_UNIT_NUM_ID 0x7F | ||
795 | +#define UFS_UPIU_WLUN_ID (1 << 7) | ||
796 | + | ||
797 | +/* WriteBooster buffer is available only for the logical unit from 0 to 7 */ | ||
798 | +#define UFS_UPIU_MAX_WB_LUN_ID 8 | ||
799 | + | ||
800 | +/* | ||
801 | + * WriteBooster buffer lifetime has a limit setted by vendor. | ||
802 | + * If it is over the limit, WriteBooster feature will be disabled. | ||
803 | + */ | ||
804 | +#define UFS_WB_EXCEED_LIFETIME 0x0B | ||
805 | + | ||
806 | +/* | ||
807 | + * In UFS Spec, the Extra Header Segment (EHS) starts from byte 32 in UPIU | ||
808 | + * request/response packet | ||
809 | + */ | ||
810 | +#define UFS_EHS_OFFSET_IN_RESPONSE 32 | ||
811 | + | ||
812 | +/* Well known logical unit id in LUN field of UPIU */ | ||
813 | +enum { | ||
814 | + UFS_UPIU_REPORT_LUNS_WLUN = 0x81, | ||
815 | + UFS_UPIU_UFS_DEVICE_WLUN = 0xD0, | ||
816 | + UFS_UPIU_BOOT_WLUN = 0xB0, | ||
817 | + UFS_UPIU_RPMB_WLUN = 0xC4, | ||
818 | +}; | ||
819 | + | ||
820 | +/* | ||
821 | + * UFS Protocol Information Unit related definitions | ||
822 | + */ | ||
823 | + | ||
824 | +/* Task management functions */ | ||
825 | +enum { | ||
826 | + UFS_ABORT_TASK = 0x01, | ||
827 | + UFS_ABORT_TASK_SET = 0x02, | ||
828 | + UFS_CLEAR_TASK_SET = 0x04, | ||
829 | + UFS_LOGICAL_RESET = 0x08, | ||
830 | + UFS_QUERY_TASK = 0x80, | ||
831 | + UFS_QUERY_TASK_SET = 0x81, | ||
832 | +}; | ||
833 | + | ||
834 | +/* UTP UPIU Transaction Codes Initiator to Target */ | ||
835 | +enum { | ||
836 | + UFS_UPIU_TRANSACTION_NOP_OUT = 0x00, | ||
837 | + UFS_UPIU_TRANSACTION_COMMAND = 0x01, | ||
838 | + UFS_UPIU_TRANSACTION_DATA_OUT = 0x02, | ||
839 | + UFS_UPIU_TRANSACTION_TASK_REQ = 0x04, | ||
840 | + UFS_UPIU_TRANSACTION_QUERY_REQ = 0x16, | ||
841 | +}; | ||
842 | + | ||
843 | +/* UTP UPIU Transaction Codes Target to Initiator */ | ||
844 | +enum { | ||
845 | + UFS_UPIU_TRANSACTION_NOP_IN = 0x20, | ||
846 | + UFS_UPIU_TRANSACTION_RESPONSE = 0x21, | ||
847 | + UFS_UPIU_TRANSACTION_DATA_IN = 0x22, | ||
848 | + UFS_UPIU_TRANSACTION_TASK_RSP = 0x24, | ||
849 | + UFS_UPIU_TRANSACTION_READY_XFER = 0x31, | ||
850 | + UFS_UPIU_TRANSACTION_QUERY_RSP = 0x36, | ||
851 | + UFS_UPIU_TRANSACTION_REJECT_UPIU = 0x3F, | ||
852 | +}; | ||
853 | + | ||
854 | +/* UPIU Read/Write flags */ | ||
855 | +enum { | ||
856 | + UFS_UPIU_CMD_FLAGS_NONE = 0x00, | ||
857 | + UFS_UPIU_CMD_FLAGS_WRITE = 0x20, | ||
858 | + UFS_UPIU_CMD_FLAGS_READ = 0x40, | ||
859 | +}; | ||
860 | + | ||
861 | +/* UPIU Task Attributes */ | ||
862 | +enum { | ||
863 | + UFS_UPIU_TASK_ATTR_SIMPLE = 0x00, | ||
864 | + UFS_UPIU_TASK_ATTR_ORDERED = 0x01, | ||
865 | + UFS_UPIU_TASK_ATTR_HEADQ = 0x02, | ||
866 | + UFS_UPIU_TASK_ATTR_ACA = 0x03, | ||
867 | +}; | ||
868 | + | ||
869 | +/* UPIU Query request function */ | ||
870 | +enum { | ||
871 | + UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST = 0x01, | ||
872 | + UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST = 0x81, | ||
873 | +}; | ||
874 | + | ||
875 | +/* Flag idn for Query Requests*/ | ||
876 | +enum flag_idn { | ||
877 | + UFS_QUERY_FLAG_IDN_FDEVICEINIT = 0x01, | ||
878 | + UFS_QUERY_FLAG_IDN_PERMANENT_WPE = 0x02, | ||
879 | + UFS_QUERY_FLAG_IDN_PWR_ON_WPE = 0x03, | ||
880 | + UFS_QUERY_FLAG_IDN_BKOPS_EN = 0x04, | ||
881 | + UFS_QUERY_FLAG_IDN_LIFE_SPAN_MODE_ENABLE = 0x05, | ||
882 | + UFS_QUERY_FLAG_IDN_PURGE_ENABLE = 0x06, | ||
883 | + UFS_QUERY_FLAG_IDN_REFRESH_ENABLE = 0x07, | ||
884 | + UFS_QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL = 0x08, | ||
885 | + UFS_QUERY_FLAG_IDN_BUSY_RTC = 0x09, | ||
886 | + UFS_QUERY_FLAG_IDN_RESERVED3 = 0x0A, | ||
887 | + UFS_QUERY_FLAG_IDN_PERMANENTLY_DISABLE_FW_UPDATE = 0x0B, | ||
888 | + UFS_QUERY_FLAG_IDN_WB_EN = 0x0E, | ||
889 | + UFS_QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN = 0x0F, | ||
890 | + UFS_QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8 = 0x10, | ||
891 | + UFS_QUERY_FLAG_IDN_HPB_RESET = 0x11, | ||
892 | + UFS_QUERY_FLAG_IDN_HPB_EN = 0x12, | ||
893 | + UFS_QUERY_FLAG_IDN_COUNT, | ||
894 | +}; | ||
895 | + | ||
896 | +/* Attribute idn for Query requests */ | ||
897 | +enum attr_idn { | ||
898 | + UFS_QUERY_ATTR_IDN_BOOT_LU_EN = 0x00, | ||
899 | + UFS_QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD = 0x01, | ||
900 | + UFS_QUERY_ATTR_IDN_POWER_MODE = 0x02, | ||
901 | + UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL = 0x03, | ||
902 | + UFS_QUERY_ATTR_IDN_OOO_DATA_EN = 0x04, | ||
903 | + UFS_QUERY_ATTR_IDN_BKOPS_STATUS = 0x05, | ||
904 | + UFS_QUERY_ATTR_IDN_PURGE_STATUS = 0x06, | ||
905 | + UFS_QUERY_ATTR_IDN_MAX_DATA_IN = 0x07, | ||
906 | + UFS_QUERY_ATTR_IDN_MAX_DATA_OUT = 0x08, | ||
907 | + UFS_QUERY_ATTR_IDN_DYN_CAP_NEEDED = 0x09, | ||
908 | + UFS_QUERY_ATTR_IDN_REF_CLK_FREQ = 0x0A, | ||
909 | + UFS_QUERY_ATTR_IDN_CONF_DESC_LOCK = 0x0B, | ||
910 | + UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT = 0x0C, | ||
911 | + UFS_QUERY_ATTR_IDN_EE_CONTROL = 0x0D, | ||
912 | + UFS_QUERY_ATTR_IDN_EE_STATUS = 0x0E, | ||
913 | + UFS_QUERY_ATTR_IDN_SECONDS_PASSED = 0x0F, | ||
914 | + UFS_QUERY_ATTR_IDN_CNTX_CONF = 0x10, | ||
915 | + UFS_QUERY_ATTR_IDN_CORR_PRG_BLK_NUM = 0x11, | ||
916 | + UFS_QUERY_ATTR_IDN_RESERVED2 = 0x12, | ||
917 | + UFS_QUERY_ATTR_IDN_RESERVED3 = 0x13, | ||
918 | + UFS_QUERY_ATTR_IDN_FFU_STATUS = 0x14, | ||
919 | + UFS_QUERY_ATTR_IDN_PSA_STATE = 0x15, | ||
920 | + UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE = 0x16, | ||
921 | + UFS_QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME = 0x17, | ||
922 | + UFS_QUERY_ATTR_IDN_CASE_ROUGH_TEMP = 0x18, | ||
923 | + UFS_QUERY_ATTR_IDN_HIGH_TEMP_BOUND = 0x19, | ||
924 | + UFS_QUERY_ATTR_IDN_LOW_TEMP_BOUND = 0x1A, | ||
925 | + UFS_QUERY_ATTR_IDN_THROTTLING_STATUS = 0x1B, | ||
926 | + UFS_QUERY_ATTR_IDN_WB_FLUSH_STATUS = 0x1C, | ||
927 | + UFS_QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE = 0x1D, | ||
928 | + UFS_QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST = 0x1E, | ||
929 | + UFS_QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE = 0x1F, | ||
930 | + UFS_QUERY_ATTR_IDN_REFRESH_STATUS = 0x2C, | ||
931 | + UFS_QUERY_ATTR_IDN_REFRESH_FREQ = 0x2D, | ||
932 | + UFS_QUERY_ATTR_IDN_REFRESH_UNIT = 0x2E, | ||
933 | + UFS_QUERY_ATTR_IDN_COUNT, | ||
934 | +}; | ||
935 | + | ||
936 | +/* Descriptor idn for Query requests */ | ||
937 | +enum desc_idn { | ||
938 | + UFS_QUERY_DESC_IDN_DEVICE = 0x0, | ||
939 | + UFS_QUERY_DESC_IDN_CONFIGURATION = 0x1, | ||
940 | + UFS_QUERY_DESC_IDN_UNIT = 0x2, | ||
941 | + UFS_QUERY_DESC_IDN_RFU_0 = 0x3, | ||
942 | + UFS_QUERY_DESC_IDN_INTERCONNECT = 0x4, | ||
943 | + UFS_QUERY_DESC_IDN_STRING = 0x5, | ||
944 | + UFS_QUERY_DESC_IDN_RFU_1 = 0x6, | ||
945 | + UFS_QUERY_DESC_IDN_GEOMETRY = 0x7, | ||
946 | + UFS_QUERY_DESC_IDN_POWER = 0x8, | ||
947 | + UFS_QUERY_DESC_IDN_HEALTH = 0x9, | ||
948 | + UFS_QUERY_DESC_IDN_MAX, | ||
949 | +}; | ||
950 | + | ||
951 | +enum desc_header_offset { | ||
952 | + UFS_QUERY_DESC_LENGTH_OFFSET = 0x00, | ||
953 | + UFS_QUERY_DESC_DESC_TYPE_OFFSET = 0x01, | ||
954 | +}; | ||
955 | + | ||
956 | +/* Unit descriptor parameters offsets in bytes*/ | ||
957 | +enum unit_desc_param { | ||
958 | + UFS_UNIT_DESC_PARAM_LEN = 0x0, | ||
959 | + UFS_UNIT_DESC_PARAM_TYPE = 0x1, | ||
960 | + UFS_UNIT_DESC_PARAM_UNIT_INDEX = 0x2, | ||
961 | + UFS_UNIT_DESC_PARAM_LU_ENABLE = 0x3, | ||
962 | + UFS_UNIT_DESC_PARAM_BOOT_LUN_ID = 0x4, | ||
963 | + UFS_UNIT_DESC_PARAM_LU_WR_PROTECT = 0x5, | ||
964 | + UFS_UNIT_DESC_PARAM_LU_Q_DEPTH = 0x6, | ||
965 | + UFS_UNIT_DESC_PARAM_PSA_SENSITIVE = 0x7, | ||
966 | + UFS_UNIT_DESC_PARAM_MEM_TYPE = 0x8, | ||
967 | + UFS_UNIT_DESC_PARAM_DATA_RELIABILITY = 0x9, | ||
968 | + UFS_UNIT_DESC_PARAM_LOGICAL_BLK_SIZE = 0xA, | ||
969 | + UFS_UNIT_DESC_PARAM_LOGICAL_BLK_COUNT = 0xB, | ||
970 | + UFS_UNIT_DESC_PARAM_ERASE_BLK_SIZE = 0x13, | ||
971 | + UFS_UNIT_DESC_PARAM_PROVISIONING_TYPE = 0x17, | ||
972 | + UFS_UNIT_DESC_PARAM_PHY_MEM_RSRC_CNT = 0x18, | ||
973 | + UFS_UNIT_DESC_PARAM_CTX_CAPABILITIES = 0x20, | ||
974 | + UFS_UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22, | ||
975 | + UFS_UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS = 0x23, | ||
976 | + UFS_UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF = 0x25, | ||
977 | + UFS_UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS = 0x27, | ||
978 | + UFS_UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS = 0x29, | ||
979 | +}; | ||
980 | + | ||
981 | +/* RPMB Unit descriptor parameters offsets in bytes*/ | ||
982 | +enum rpmb_unit_desc_param { | ||
983 | + UFS_RPMB_UNIT_DESC_PARAM_LEN = 0x0, | ||
984 | + UFS_RPMB_UNIT_DESC_PARAM_TYPE = 0x1, | ||
985 | + UFS_RPMB_UNIT_DESC_PARAM_UNIT_INDEX = 0x2, | ||
986 | + UFS_RPMB_UNIT_DESC_PARAM_LU_ENABLE = 0x3, | ||
987 | + UFS_RPMB_UNIT_DESC_PARAM_BOOT_LUN_ID = 0x4, | ||
988 | + UFS_RPMB_UNIT_DESC_PARAM_LU_WR_PROTECT = 0x5, | ||
989 | + UFS_RPMB_UNIT_DESC_PARAM_LU_Q_DEPTH = 0x6, | ||
990 | + UFS_RPMB_UNIT_DESC_PARAM_PSA_SENSITIVE = 0x7, | ||
991 | + UFS_RPMB_UNIT_DESC_PARAM_MEM_TYPE = 0x8, | ||
992 | + UFS_RPMB_UNIT_DESC_PARAM_REGION_EN = 0x9, | ||
993 | + UFS_RPMB_UNIT_DESC_PARAM_LOGICAL_BLK_SIZE = 0xA, | ||
994 | + UFS_RPMB_UNIT_DESC_PARAM_LOGICAL_BLK_COUNT = 0xB, | ||
995 | + UFS_RPMB_UNIT_DESC_PARAM_REGION0_SIZE = 0x13, | ||
996 | + UFS_RPMB_UNIT_DESC_PARAM_REGION1_SIZE = 0x14, | ||
997 | + UFS_RPMB_UNIT_DESC_PARAM_REGION2_SIZE = 0x15, | ||
998 | + UFS_RPMB_UNIT_DESC_PARAM_REGION3_SIZE = 0x16, | ||
999 | + UFS_RPMB_UNIT_DESC_PARAM_PROVISIONING_TYPE = 0x17, | ||
1000 | + UFS_RPMB_UNIT_DESC_PARAM_PHY_MEM_RSRC_CNT = 0x18, | ||
1001 | +}; | ||
1002 | + | ||
1003 | +/* Device descriptor parameters offsets in bytes*/ | ||
1004 | +enum device_desc_param { | ||
1005 | + UFS_DEVICE_DESC_PARAM_LEN = 0x0, | ||
1006 | + UFS_DEVICE_DESC_PARAM_TYPE = 0x1, | ||
1007 | + UFS_DEVICE_DESC_PARAM_DEVICE_TYPE = 0x2, | ||
1008 | + UFS_DEVICE_DESC_PARAM_DEVICE_CLASS = 0x3, | ||
1009 | + UFS_DEVICE_DESC_PARAM_DEVICE_SUB_CLASS = 0x4, | ||
1010 | + UFS_DEVICE_DESC_PARAM_PRTCL = 0x5, | ||
1011 | + UFS_DEVICE_DESC_PARAM_NUM_LU = 0x6, | ||
1012 | + UFS_DEVICE_DESC_PARAM_NUM_WLU = 0x7, | ||
1013 | + UFS_DEVICE_DESC_PARAM_BOOT_ENBL = 0x8, | ||
1014 | + UFS_DEVICE_DESC_PARAM_DESC_ACCSS_ENBL = 0x9, | ||
1015 | + UFS_DEVICE_DESC_PARAM_INIT_PWR_MODE = 0xA, | ||
1016 | + UFS_DEVICE_DESC_PARAM_HIGH_PR_LUN = 0xB, | ||
1017 | + UFS_DEVICE_DESC_PARAM_SEC_RMV_TYPE = 0xC, | ||
1018 | + UFS_DEVICE_DESC_PARAM_SEC_LU = 0xD, | ||
1019 | + UFS_DEVICE_DESC_PARAM_BKOP_TERM_LT = 0xE, | ||
1020 | + UFS_DEVICE_DESC_PARAM_ACTVE_ICC_LVL = 0xF, | ||
1021 | + UFS_DEVICE_DESC_PARAM_SPEC_VER = 0x10, | ||
1022 | + UFS_DEVICE_DESC_PARAM_MANF_DATE = 0x12, | ||
1023 | + UFS_DEVICE_DESC_PARAM_MANF_NAME = 0x14, | ||
1024 | + UFS_DEVICE_DESC_PARAM_PRDCT_NAME = 0x15, | ||
1025 | + UFS_DEVICE_DESC_PARAM_SN = 0x16, | ||
1026 | + UFS_DEVICE_DESC_PARAM_OEM_ID = 0x17, | ||
1027 | + UFS_DEVICE_DESC_PARAM_MANF_ID = 0x18, | ||
1028 | + UFS_DEVICE_DESC_PARAM_UD_OFFSET = 0x1A, | ||
1029 | + UFS_DEVICE_DESC_PARAM_UD_LEN = 0x1B, | ||
1030 | + UFS_DEVICE_DESC_PARAM_RTT_CAP = 0x1C, | ||
1031 | + UFS_DEVICE_DESC_PARAM_FRQ_RTC = 0x1D, | ||
1032 | + UFS_DEVICE_DESC_PARAM_UFS_FEAT = 0x1F, | ||
1033 | + UFS_DEVICE_DESC_PARAM_FFU_TMT = 0x20, | ||
1034 | + UFS_DEVICE_DESC_PARAM_Q_DPTH = 0x21, | ||
1035 | + UFS_DEVICE_DESC_PARAM_DEV_VER = 0x22, | ||
1036 | + UFS_DEVICE_DESC_PARAM_NUM_SEC_WPA = 0x24, | ||
1037 | + UFS_DEVICE_DESC_PARAM_PSA_MAX_DATA = 0x25, | ||
1038 | + UFS_DEVICE_DESC_PARAM_PSA_TMT = 0x29, | ||
1039 | + UFS_DEVICE_DESC_PARAM_PRDCT_REV = 0x2A, | ||
1040 | + UFS_DEVICE_DESC_PARAM_HPB_VER = 0x40, | ||
1041 | + UFS_DEVICE_DESC_PARAM_HPB_CONTROL = 0x42, | ||
1042 | + UFS_DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP = 0x4F, | ||
1043 | + UFS_DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN = 0x53, | ||
1044 | + UFS_DEVICE_DESC_PARAM_WB_TYPE = 0x54, | ||
1045 | + UFS_DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS = 0x55, | ||
1046 | +}; | ||
1047 | + | ||
1048 | +/* Interconnect descriptor parameters offsets in bytes*/ | ||
1049 | +enum interconnect_desc_param { | ||
1050 | + UFS_INTERCONNECT_DESC_PARAM_LEN = 0x0, | ||
1051 | + UFS_INTERCONNECT_DESC_PARAM_TYPE = 0x1, | ||
1052 | + UFS_INTERCONNECT_DESC_PARAM_UNIPRO_VER = 0x2, | ||
1053 | + UFS_INTERCONNECT_DESC_PARAM_MPHY_VER = 0x4, | ||
1054 | +}; | ||
1055 | + | ||
1056 | +/* Geometry descriptor parameters offsets in bytes*/ | ||
1057 | +enum geometry_desc_param { | ||
1058 | + UFS_GEOMETRY_DESC_PARAM_LEN = 0x0, | ||
1059 | + UFS_GEOMETRY_DESC_PARAM_TYPE = 0x1, | ||
1060 | + UFS_GEOMETRY_DESC_PARAM_DEV_CAP = 0x4, | ||
1061 | + UFS_GEOMETRY_DESC_PARAM_MAX_NUM_LUN = 0xC, | ||
1062 | + UFS_GEOMETRY_DESC_PARAM_SEG_SIZE = 0xD, | ||
1063 | + UFS_GEOMETRY_DESC_PARAM_ALLOC_UNIT_SIZE = 0x11, | ||
1064 | + UFS_GEOMETRY_DESC_PARAM_MIN_BLK_SIZE = 0x12, | ||
1065 | + UFS_GEOMETRY_DESC_PARAM_OPT_RD_BLK_SIZE = 0x13, | ||
1066 | + UFS_GEOMETRY_DESC_PARAM_OPT_WR_BLK_SIZE = 0x14, | ||
1067 | + UFS_GEOMETRY_DESC_PARAM_MAX_IN_BUF_SIZE = 0x15, | ||
1068 | + UFS_GEOMETRY_DESC_PARAM_MAX_OUT_BUF_SIZE = 0x16, | ||
1069 | + UFS_GEOMETRY_DESC_PARAM_RPMB_RW_SIZE = 0x17, | ||
1070 | + UFS_GEOMETRY_DESC_PARAM_DYN_CAP_RSRC_PLC = 0x18, | ||
1071 | + UFS_GEOMETRY_DESC_PARAM_DATA_ORDER = 0x19, | ||
1072 | + UFS_GEOMETRY_DESC_PARAM_MAX_NUM_CTX = 0x1A, | ||
1073 | + UFS_GEOMETRY_DESC_PARAM_TAG_UNIT_SIZE = 0x1B, | ||
1074 | + UFS_GEOMETRY_DESC_PARAM_TAG_RSRC_SIZE = 0x1C, | ||
1075 | + UFS_GEOMETRY_DESC_PARAM_SEC_RM_TYPES = 0x1D, | ||
1076 | + UFS_GEOMETRY_DESC_PARAM_MEM_TYPES = 0x1E, | ||
1077 | + UFS_GEOMETRY_DESC_PARAM_SCM_MAX_NUM_UNITS = 0x20, | ||
1078 | + UFS_GEOMETRY_DESC_PARAM_SCM_CAP_ADJ_FCTR = 0x24, | ||
1079 | + UFS_GEOMETRY_DESC_PARAM_NPM_MAX_NUM_UNITS = 0x26, | ||
1080 | + UFS_GEOMETRY_DESC_PARAM_NPM_CAP_ADJ_FCTR = 0x2A, | ||
1081 | + UFS_GEOMETRY_DESC_PARAM_ENM1_MAX_NUM_UNITS = 0x2C, | ||
1082 | + UFS_GEOMETRY_DESC_PARAM_ENM1_CAP_ADJ_FCTR = 0x30, | ||
1083 | + UFS_GEOMETRY_DESC_PARAM_ENM2_MAX_NUM_UNITS = 0x32, | ||
1084 | + UFS_GEOMETRY_DESC_PARAM_ENM2_CAP_ADJ_FCTR = 0x36, | ||
1085 | + UFS_GEOMETRY_DESC_PARAM_ENM3_MAX_NUM_UNITS = 0x38, | ||
1086 | + UFS_GEOMETRY_DESC_PARAM_ENM3_CAP_ADJ_FCTR = 0x3C, | ||
1087 | + UFS_GEOMETRY_DESC_PARAM_ENM4_MAX_NUM_UNITS = 0x3E, | ||
1088 | + UFS_GEOMETRY_DESC_PARAM_ENM4_CAP_ADJ_FCTR = 0x42, | ||
1089 | + UFS_GEOMETRY_DESC_PARAM_OPT_LOG_BLK_SIZE = 0x44, | ||
1090 | + UFS_GEOMETRY_DESC_PARAM_HPB_REGION_SIZE = 0x48, | ||
1091 | + UFS_GEOMETRY_DESC_PARAM_HPB_NUMBER_LU = 0x49, | ||
1092 | + UFS_GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE = 0x4A, | ||
1093 | + UFS_GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS = 0x4B, | ||
1094 | + UFS_GEOMETRY_DESC_PARAM_WB_MAX_ALLOC_UNITS = 0x4F, | ||
1095 | + UFS_GEOMETRY_DESC_PARAM_WB_MAX_WB_LUNS = 0x53, | ||
1096 | + UFS_GEOMETRY_DESC_PARAM_WB_BUFF_CAP_ADJ = 0x54, | ||
1097 | + UFS_GEOMETRY_DESC_PARAM_WB_SUP_RED_TYPE = 0x55, | ||
1098 | + UFS_GEOMETRY_DESC_PARAM_WB_SUP_WB_TYPE = 0x56, | ||
1099 | +}; | ||
1100 | + | ||
1101 | +/* Health descriptor parameters offsets in bytes*/ | ||
1102 | +enum health_desc_param { | ||
1103 | + UFS_HEALTH_DESC_PARAM_LEN = 0x0, | ||
1104 | + UFS_HEALTH_DESC_PARAM_TYPE = 0x1, | ||
1105 | + UFS_HEALTH_DESC_PARAM_EOL_INFO = 0x2, | ||
1106 | + UFS_HEALTH_DESC_PARAM_LIFE_TIME_EST_A = 0x3, | ||
1107 | + UFS_HEALTH_DESC_PARAM_LIFE_TIME_EST_B = 0x4, | ||
1108 | +}; | ||
1109 | + | ||
1110 | +/* WriteBooster buffer mode */ | ||
1111 | +enum { | ||
1112 | + UFS_WB_BUF_MODE_LU_DEDICATED = 0x0, | ||
1113 | + UFS_WB_BUF_MODE_SHARED = 0x1, | ||
1114 | +}; | ||
1115 | + | ||
1116 | +/* | ||
1117 | + * Logical Unit Write Protect | ||
1118 | + * 00h: LU not write protected | ||
1119 | + * 01h: LU write protected when fPowerOnWPEn =1 | ||
1120 | + * 02h: LU permanently write protected when fPermanentWPEn =1 | ||
1121 | + */ | ||
1122 | +enum ufs_lu_wp_type { | ||
1123 | + UFS_LU_NO_WP = 0x00, | ||
1124 | + UFS_LU_POWER_ON_WP = 0x01, | ||
1125 | + UFS_LU_PERM_WP = 0x02, | ||
1126 | +}; | ||
1127 | + | ||
1128 | +/* UTP QUERY Transaction Specific Fields OpCode */ | ||
1129 | +enum query_opcode { | ||
1130 | + UFS_UPIU_QUERY_OPCODE_NOP = 0x0, | ||
1131 | + UFS_UPIU_QUERY_OPCODE_READ_DESC = 0x1, | ||
1132 | + UFS_UPIU_QUERY_OPCODE_WRITE_DESC = 0x2, | ||
1133 | + UFS_UPIU_QUERY_OPCODE_READ_ATTR = 0x3, | ||
1134 | + UFS_UPIU_QUERY_OPCODE_WRITE_ATTR = 0x4, | ||
1135 | + UFS_UPIU_QUERY_OPCODE_READ_FLAG = 0x5, | ||
1136 | + UFS_UPIU_QUERY_OPCODE_SET_FLAG = 0x6, | ||
1137 | + UFS_UPIU_QUERY_OPCODE_CLEAR_FLAG = 0x7, | ||
1138 | + UFS_UPIU_QUERY_OPCODE_TOGGLE_FLAG = 0x8, | ||
1139 | +}; | ||
1140 | + | ||
1141 | +/* Query response result code */ | ||
1142 | +typedef enum QueryRespCode { | ||
1143 | + UFS_QUERY_RESULT_SUCCESS = 0x00, | ||
1144 | + UFS_QUERY_RESULT_NOT_READABLE = 0xF6, | ||
1145 | + UFS_QUERY_RESULT_NOT_WRITEABLE = 0xF7, | ||
1146 | + UFS_QUERY_RESULT_ALREADY_WRITTEN = 0xF8, | ||
1147 | + UFS_QUERY_RESULT_INVALID_LENGTH = 0xF9, | ||
1148 | + UFS_QUERY_RESULT_INVALID_VALUE = 0xFA, | ||
1149 | + UFS_QUERY_RESULT_INVALID_SELECTOR = 0xFB, | ||
1150 | + UFS_QUERY_RESULT_INVALID_INDEX = 0xFC, | ||
1151 | + UFS_QUERY_RESULT_INVALID_IDN = 0xFD, | ||
1152 | + UFS_QUERY_RESULT_INVALID_OPCODE = 0xFE, | ||
1153 | + UFS_QUERY_RESULT_GENERAL_FAILURE = 0xFF, | ||
1154 | +} QueryRespCode; | ||
1155 | + | ||
1156 | +/* UTP Transfer Request Command Type (CT) */ | ||
1157 | +enum { | ||
1158 | + UFS_UPIU_COMMAND_SET_TYPE_SCSI = 0x0, | ||
1159 | + UFS_UPIU_COMMAND_SET_TYPE_UFS = 0x1, | ||
1160 | + UFS_UPIU_COMMAND_SET_TYPE_QUERY = 0x2, | ||
1161 | +}; | ||
1162 | + | ||
1163 | +/* Task management service response */ | ||
1164 | +enum { | ||
1165 | + UFS_UPIU_TASK_MANAGEMENT_FUNC_COMPL = 0x00, | ||
1166 | + UFS_UPIU_TASK_MANAGEMENT_FUNC_NOT_SUPPORTED = 0x04, | ||
1167 | + UFS_UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED = 0x08, | ||
1168 | + UFS_UPIU_TASK_MANAGEMENT_FUNC_FAILED = 0x05, | ||
1169 | + UFS_UPIU_INCORRECT_LOGICAL_UNIT_NO = 0x09, | ||
1170 | +}; | ||
1171 | + | ||
1172 | +/* UFS device power modes */ | ||
1173 | +enum ufs_dev_pwr_mode { | ||
1174 | + UFS_ACTIVE_PWR_MODE = 1, | ||
1175 | + UFS_SLEEP_PWR_MODE = 2, | ||
1176 | + UFS_POWERDOWN_PWR_MODE = 3, | ||
1177 | + UFS_DEEPSLEEP_PWR_MODE = 4, | ||
1178 | +}; | ||
1179 | + | ||
1180 | +/* | ||
1181 | + * struct UtpCmdRsp - Response UPIU structure | ||
1182 | + * @residual_transfer_count: Residual transfer count DW-3 | ||
1183 | + * @reserved: Reserved double words DW-4 to DW-7 | ||
1184 | + * @sense_data_len: Sense data length DW-8 U16 | ||
1185 | + * @sense_data: Sense data field DW-8 to DW-12 | ||
1186 | + */ | ||
1187 | +typedef struct QEMU_PACKED UtpCmdRsp { | ||
1188 | + uint32_t residual_transfer_count; | ||
1189 | + uint32_t reserved[4]; | ||
1190 | + uint16_t sense_data_len; | ||
1191 | + uint8_t sense_data[UFS_SENSE_SIZE]; | ||
1192 | +} UtpCmdRsp; | ||
1193 | + | ||
1194 | +/* | ||
1195 | + * struct UtpUpiuRsp - general upiu response structure | ||
1196 | + * @header: UPIU header structure DW-0 to DW-2 | ||
1197 | + * @sr: fields structure for scsi command DW-3 to DW-12 | ||
1198 | + * @qr: fields structure for query request DW-3 to DW-7 | ||
1199 | + */ | ||
1200 | +typedef struct QEMU_PACKED UtpUpiuRsp { | ||
1201 | + UtpUpiuHeader header; | ||
1202 | + union { | ||
1203 | + UtpCmdRsp sr; | ||
1204 | + UtpUpiuQuery qr; | ||
1205 | + }; | ||
1206 | +} UtpUpiuRsp; | ||
1207 | + | ||
1208 | +static inline void _ufs_check_size(void) | ||
1209 | +{ | ||
1210 | + QEMU_BUILD_BUG_ON(sizeof(UfsReg) != 0x104); | ||
1211 | + QEMU_BUILD_BUG_ON(sizeof(DeviceDescriptor) != 89); | ||
1212 | + QEMU_BUILD_BUG_ON(sizeof(GeometryDescriptor) != 87); | ||
1213 | + QEMU_BUILD_BUG_ON(sizeof(UnitDescriptor) != 45); | ||
1214 | + QEMU_BUILD_BUG_ON(sizeof(RpmbUnitDescriptor) != 35); | ||
1215 | + QEMU_BUILD_BUG_ON(sizeof(PowerParametersDescriptor) != 98); | ||
1216 | + QEMU_BUILD_BUG_ON(sizeof(InterconnectDescriptor) != 6); | ||
1217 | + QEMU_BUILD_BUG_ON(sizeof(StringDescriptor) != 254); | ||
1218 | + QEMU_BUILD_BUG_ON(sizeof(DeviceHealthDescriptor) != 45); | ||
1219 | + QEMU_BUILD_BUG_ON(sizeof(Flags) != 0x13); | ||
1220 | + QEMU_BUILD_BUG_ON(sizeof(UtpUpiuHeader) != 12); | ||
1221 | + QEMU_BUILD_BUG_ON(sizeof(UtpUpiuQuery) != 276); | ||
1222 | + QEMU_BUILD_BUG_ON(sizeof(UtpUpiuCmd) != 20); | ||
1223 | + QEMU_BUILD_BUG_ON(sizeof(UtpUpiuReq) != 288); | ||
1224 | + QEMU_BUILD_BUG_ON(sizeof(UfshcdSgEntry) != 16); | ||
1225 | + QEMU_BUILD_BUG_ON(sizeof(RequestDescHeader) != 16); | ||
1226 | + QEMU_BUILD_BUG_ON(sizeof(UtpTransferReqDesc) != 32); | ||
1227 | + QEMU_BUILD_BUG_ON(sizeof(UtpTaskReqDesc) != 80); | ||
1228 | + QEMU_BUILD_BUG_ON(sizeof(UtpCmdRsp) != 40); | ||
1229 | + QEMU_BUILD_BUG_ON(sizeof(UtpUpiuRsp) != 288); | ||
1230 | +} | ||
1231 | +#endif | ||
1232 | diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h | ||
1233 | index XXXXXXX..XXXXXXX 100644 | ||
1234 | --- a/include/hw/pci/pci.h | ||
1235 | +++ b/include/hw/pci/pci.h | ||
1236 | @@ -XXX,XX +XXX,XX @@ extern bool pci_available; | ||
1237 | #define PCI_DEVICE_ID_REDHAT_NVME 0x0010 | ||
1238 | #define PCI_DEVICE_ID_REDHAT_PVPANIC 0x0011 | ||
1239 | #define PCI_DEVICE_ID_REDHAT_ACPI_ERST 0x0012 | ||
1240 | +#define PCI_DEVICE_ID_REDHAT_UFS 0x0013 | ||
1241 | #define PCI_DEVICE_ID_REDHAT_QXL 0x0100 | ||
1242 | |||
1243 | #define FMT_PCIBUS PRIx64 | ||
1244 | diff --git a/include/hw/pci/pci_ids.h b/include/hw/pci/pci_ids.h | ||
1245 | index XXXXXXX..XXXXXXX 100644 | ||
1246 | --- a/include/hw/pci/pci_ids.h | ||
1247 | +++ b/include/hw/pci/pci_ids.h | ||
1248 | @@ -XXX,XX +XXX,XX @@ | ||
1249 | #define PCI_CLASS_STORAGE_SATA 0x0106 | ||
1250 | #define PCI_CLASS_STORAGE_SAS 0x0107 | ||
1251 | #define PCI_CLASS_STORAGE_EXPRESS 0x0108 | ||
1252 | +#define PCI_CLASS_STORAGE_UFS 0x0109 | ||
1253 | #define PCI_CLASS_STORAGE_OTHER 0x0180 | ||
1254 | |||
1255 | #define PCI_BASE_CLASS_NETWORK 0x02 | ||
1256 | diff --git a/hw/ufs/ufs.c b/hw/ufs/ufs.c | ||
1257 | new file mode 100644 | ||
1258 | index XXXXXXX..XXXXXXX | ||
1259 | --- /dev/null | ||
1260 | +++ b/hw/ufs/ufs.c | ||
1261 | @@ -XXX,XX +XXX,XX @@ | 294 | @@ -XXX,XX +XXX,XX @@ |
1262 | +/* | 295 | +/* |
1263 | + * QEMU Universal Flash Storage (UFS) Controller | 296 | + * AioContext multithreading tests |
1264 | + * | 297 | + * |
1265 | + * Copyright (c) 2023 Samsung Electronics Co., Ltd. All rights reserved. | 298 | + * Copyright Red Hat, Inc. 2016 |
1266 | + * | 299 | + * |
1267 | + * Written by Jeuk Kim <jeuk20.kim@samsung.com> | 300 | + * Authors: |
1268 | + * | 301 | + * Paolo Bonzini <pbonzini@redhat.com> |
1269 | + * SPDX-License-Identifier: GPL-2.0-or-later | 302 | + * |
303 | + * This work is licensed under the terms of the GNU LGPL, version 2 or later. | ||
304 | + * See the COPYING.LIB file in the top-level directory. | ||
1270 | + */ | 305 | + */ |
1271 | + | 306 | + |
1272 | +#include "qemu/osdep.h" | 307 | +#include "qemu/osdep.h" |
308 | +#include <glib.h> | ||
309 | +#include "block/aio.h" | ||
1273 | +#include "qapi/error.h" | 310 | +#include "qapi/error.h" |
1274 | +#include "migration/vmstate.h" | 311 | +#include "qemu/coroutine.h" |
312 | +#include "qemu/thread.h" | ||
313 | +#include "qemu/error-report.h" | ||
314 | +#include "iothread.h" | ||
315 | + | ||
316 | +/* AioContext management */ | ||
317 | + | ||
318 | +#define NUM_CONTEXTS 5 | ||
319 | + | ||
320 | +static IOThread *threads[NUM_CONTEXTS]; | ||
321 | +static AioContext *ctx[NUM_CONTEXTS]; | ||
322 | +static __thread int id = -1; | ||
323 | + | ||
324 | +static QemuEvent done_event; | ||
325 | + | ||
326 | +/* Run a function synchronously on a remote iothread. */ | ||
327 | + | ||
328 | +typedef struct CtxRunData { | ||
329 | + QEMUBHFunc *cb; | ||
330 | + void *arg; | ||
331 | +} CtxRunData; | ||
332 | + | ||
333 | +static void ctx_run_bh_cb(void *opaque) | ||
334 | +{ | ||
335 | + CtxRunData *data = opaque; | ||
336 | + | ||
337 | + data->cb(data->arg); | ||
338 | + qemu_event_set(&done_event); | ||
339 | +} | ||
340 | + | ||
341 | +static void ctx_run(int i, QEMUBHFunc *cb, void *opaque) | ||
342 | +{ | ||
343 | + CtxRunData data = { | ||
344 | + .cb = cb, | ||
345 | + .arg = opaque | ||
346 | + }; | ||
347 | + | ||
348 | + qemu_event_reset(&done_event); | ||
349 | + aio_bh_schedule_oneshot(ctx[i], ctx_run_bh_cb, &data); | ||
350 | + qemu_event_wait(&done_event); | ||
351 | +} | ||
352 | + | ||
353 | +/* Starting the iothreads. */ | ||
354 | + | ||
355 | +static void set_id_cb(void *opaque) | ||
356 | +{ | ||
357 | + int *i = opaque; | ||
358 | + | ||
359 | + id = *i; | ||
360 | +} | ||
361 | + | ||
362 | +static void create_aio_contexts(void) | ||
363 | +{ | ||
364 | + int i; | ||
365 | + | ||
366 | + for (i = 0; i < NUM_CONTEXTS; i++) { | ||
367 | + threads[i] = iothread_new(); | ||
368 | + ctx[i] = iothread_get_aio_context(threads[i]); | ||
369 | + } | ||
370 | + | ||
371 | + qemu_event_init(&done_event, false); | ||
372 | + for (i = 0; i < NUM_CONTEXTS; i++) { | ||
373 | + ctx_run(i, set_id_cb, &i); | ||
374 | + } | ||
375 | +} | ||
376 | + | ||
377 | +/* Stopping the iothreads. */ | ||
378 | + | ||
379 | +static void join_aio_contexts(void) | ||
380 | +{ | ||
381 | + int i; | ||
382 | + | ||
383 | + for (i = 0; i < NUM_CONTEXTS; i++) { | ||
384 | + aio_context_ref(ctx[i]); | ||
385 | + } | ||
386 | + for (i = 0; i < NUM_CONTEXTS; i++) { | ||
387 | + iothread_join(threads[i]); | ||
388 | + } | ||
389 | + for (i = 0; i < NUM_CONTEXTS; i++) { | ||
390 | + aio_context_unref(ctx[i]); | ||
391 | + } | ||
392 | + qemu_event_destroy(&done_event); | ||
393 | +} | ||
394 | + | ||
395 | +/* Basic test for the stuff above. */ | ||
396 | + | ||
397 | +static void test_lifecycle(void) | ||
398 | +{ | ||
399 | + create_aio_contexts(); | ||
400 | + join_aio_contexts(); | ||
401 | +} | ||
402 | + | ||
403 | +/* aio_co_schedule test. */ | ||
404 | + | ||
405 | +static Coroutine *to_schedule[NUM_CONTEXTS]; | ||
406 | + | ||
407 | +static bool now_stopping; | ||
408 | + | ||
409 | +static int count_retry; | ||
410 | +static int count_here; | ||
411 | +static int count_other; | ||
412 | + | ||
413 | +static bool schedule_next(int n) | ||
414 | +{ | ||
415 | + Coroutine *co; | ||
416 | + | ||
417 | + co = atomic_xchg(&to_schedule[n], NULL); | ||
418 | + if (!co) { | ||
419 | + atomic_inc(&count_retry); | ||
420 | + return false; | ||
421 | + } | ||
422 | + | ||
423 | + if (n == id) { | ||
424 | + atomic_inc(&count_here); | ||
425 | + } else { | ||
426 | + atomic_inc(&count_other); | ||
427 | + } | ||
428 | + | ||
429 | + aio_co_schedule(ctx[n], co); | ||
430 | + return true; | ||
431 | +} | ||
432 | + | ||
433 | +static void finish_cb(void *opaque) | ||
434 | +{ | ||
435 | + schedule_next(id); | ||
436 | +} | ||
437 | + | ||
438 | +static coroutine_fn void test_multi_co_schedule_entry(void *opaque) | ||
439 | +{ | ||
440 | + g_assert(to_schedule[id] == NULL); | ||
441 | + atomic_mb_set(&to_schedule[id], qemu_coroutine_self()); | ||
442 | + | ||
443 | + while (!atomic_mb_read(&now_stopping)) { | ||
444 | + int n; | ||
445 | + | ||
446 | + n = g_test_rand_int_range(0, NUM_CONTEXTS); | ||
447 | + schedule_next(n); | ||
448 | + qemu_coroutine_yield(); | ||
449 | + | ||
450 | + g_assert(to_schedule[id] == NULL); | ||
451 | + atomic_mb_set(&to_schedule[id], qemu_coroutine_self()); | ||
452 | + } | ||
453 | +} | ||
454 | + | ||
455 | + | ||
456 | +static void test_multi_co_schedule(int seconds) | ||
457 | +{ | ||
458 | + int i; | ||
459 | + | ||
460 | + count_here = count_other = count_retry = 0; | ||
461 | + now_stopping = false; | ||
462 | + | ||
463 | + create_aio_contexts(); | ||
464 | + for (i = 0; i < NUM_CONTEXTS; i++) { | ||
465 | + Coroutine *co1 = qemu_coroutine_create(test_multi_co_schedule_entry, NULL); | ||
466 | + aio_co_schedule(ctx[i], co1); | ||
467 | + } | ||
468 | + | ||
469 | + g_usleep(seconds * 1000000); | ||
470 | + | ||
471 | + atomic_mb_set(&now_stopping, true); | ||
472 | + for (i = 0; i < NUM_CONTEXTS; i++) { | ||
473 | + ctx_run(i, finish_cb, NULL); | ||
474 | + to_schedule[i] = NULL; | ||
475 | + } | ||
476 | + | ||
477 | + join_aio_contexts(); | ||
478 | + g_test_message("scheduled %d, queued %d, retry %d, total %d\n", | ||
479 | + count_other, count_here, count_retry, | ||
480 | + count_here + count_other + count_retry); | ||
481 | +} | ||
482 | + | ||
483 | +static void test_multi_co_schedule_1(void) | ||
484 | +{ | ||
485 | + test_multi_co_schedule(1); | ||
486 | +} | ||
487 | + | ||
488 | +static void test_multi_co_schedule_10(void) | ||
489 | +{ | ||
490 | + test_multi_co_schedule(10); | ||
491 | +} | ||
492 | + | ||
493 | +/* End of tests. */ | ||
494 | + | ||
495 | +int main(int argc, char **argv) | ||
496 | +{ | ||
497 | + init_clocks(); | ||
498 | + | ||
499 | + g_test_init(&argc, &argv, NULL); | ||
500 | + g_test_add_func("/aio/multi/lifecycle", test_lifecycle); | ||
501 | + if (g_test_quick()) { | ||
502 | + g_test_add_func("/aio/multi/schedule", test_multi_co_schedule_1); | ||
503 | + } else { | ||
504 | + g_test_add_func("/aio/multi/schedule", test_multi_co_schedule_10); | ||
505 | + } | ||
506 | + return g_test_run(); | ||
507 | +} | ||
508 | diff --git a/util/async.c b/util/async.c | ||
509 | index XXXXXXX..XXXXXXX 100644 | ||
510 | --- a/util/async.c | ||
511 | +++ b/util/async.c | ||
512 | @@ -XXX,XX +XXX,XX @@ | ||
513 | #include "qemu/main-loop.h" | ||
514 | #include "qemu/atomic.h" | ||
515 | #include "block/raw-aio.h" | ||
516 | +#include "qemu/coroutine_int.h" | ||
1275 | +#include "trace.h" | 517 | +#include "trace.h" |
1276 | +#include "ufs.h" | 518 | |
1277 | + | 519 | /***********************************************************/ |
1278 | +/* The QEMU-UFS device follows spec version 3.1 */ | 520 | /* bottom halves (can be seen as timers which expire ASAP) */ |
1279 | +#define UFS_SPEC_VER 0x00000310 | 521 | @@ -XXX,XX +XXX,XX @@ aio_ctx_finalize(GSource *source) |
1280 | +#define UFS_MAX_NUTRS 32 | 522 | } |
1281 | +#define UFS_MAX_NUTMRS 8 | 523 | #endif |
1282 | + | 524 | |
1283 | +static void ufs_irq_check(UfsHc *u) | 525 | + assert(QSLIST_EMPTY(&ctx->scheduled_coroutines)); |
1284 | +{ | 526 | + qemu_bh_delete(ctx->co_schedule_bh); |
1285 | + PCIDevice *pci = PCI_DEVICE(u); | 527 | + |
1286 | + | 528 | qemu_lockcnt_lock(&ctx->list_lock); |
1287 | + if ((u->reg.is & UFS_INTR_MASK) & u->reg.ie) { | 529 | assert(!qemu_lockcnt_count(&ctx->list_lock)); |
1288 | + trace_ufs_irq_raise(); | 530 | while (ctx->first_bh) { |
1289 | + pci_irq_assert(pci); | 531 | @@ -XXX,XX +XXX,XX @@ static bool event_notifier_poll(void *opaque) |
532 | return atomic_read(&ctx->notified); | ||
533 | } | ||
534 | |||
535 | +static void co_schedule_bh_cb(void *opaque) | ||
536 | +{ | ||
537 | + AioContext *ctx = opaque; | ||
538 | + QSLIST_HEAD(, Coroutine) straight, reversed; | ||
539 | + | ||
540 | + QSLIST_MOVE_ATOMIC(&reversed, &ctx->scheduled_coroutines); | ||
541 | + QSLIST_INIT(&straight); | ||
542 | + | ||
543 | + while (!QSLIST_EMPTY(&reversed)) { | ||
544 | + Coroutine *co = QSLIST_FIRST(&reversed); | ||
545 | + QSLIST_REMOVE_HEAD(&reversed, co_scheduled_next); | ||
546 | + QSLIST_INSERT_HEAD(&straight, co, co_scheduled_next); | ||
547 | + } | ||
548 | + | ||
549 | + while (!QSLIST_EMPTY(&straight)) { | ||
550 | + Coroutine *co = QSLIST_FIRST(&straight); | ||
551 | + QSLIST_REMOVE_HEAD(&straight, co_scheduled_next); | ||
552 | + trace_aio_co_schedule_bh_cb(ctx, co); | ||
553 | + qemu_coroutine_enter(co); | ||
554 | + } | ||
555 | +} | ||
556 | + | ||
557 | AioContext *aio_context_new(Error **errp) | ||
558 | { | ||
559 | int ret; | ||
560 | @@ -XXX,XX +XXX,XX @@ AioContext *aio_context_new(Error **errp) | ||
561 | } | ||
562 | g_source_set_can_recurse(&ctx->source, true); | ||
563 | qemu_lockcnt_init(&ctx->list_lock); | ||
564 | + | ||
565 | + ctx->co_schedule_bh = aio_bh_new(ctx, co_schedule_bh_cb, ctx); | ||
566 | + QSLIST_INIT(&ctx->scheduled_coroutines); | ||
567 | + | ||
568 | aio_set_event_notifier(ctx, &ctx->notifier, | ||
569 | false, | ||
570 | (EventNotifierHandler *) | ||
571 | @@ -XXX,XX +XXX,XX @@ fail: | ||
572 | return NULL; | ||
573 | } | ||
574 | |||
575 | +void aio_co_schedule(AioContext *ctx, Coroutine *co) | ||
576 | +{ | ||
577 | + trace_aio_co_schedule(ctx, co); | ||
578 | + QSLIST_INSERT_HEAD_ATOMIC(&ctx->scheduled_coroutines, | ||
579 | + co, co_scheduled_next); | ||
580 | + qemu_bh_schedule(ctx->co_schedule_bh); | ||
581 | +} | ||
582 | + | ||
583 | +void aio_co_wake(struct Coroutine *co) | ||
584 | +{ | ||
585 | + AioContext *ctx; | ||
586 | + | ||
587 | + /* Read coroutine before co->ctx. Matches smp_wmb in | ||
588 | + * qemu_coroutine_enter. | ||
589 | + */ | ||
590 | + smp_read_barrier_depends(); | ||
591 | + ctx = atomic_read(&co->ctx); | ||
592 | + | ||
593 | + if (ctx != qemu_get_current_aio_context()) { | ||
594 | + aio_co_schedule(ctx, co); | ||
595 | + return; | ||
596 | + } | ||
597 | + | ||
598 | + if (qemu_in_coroutine()) { | ||
599 | + Coroutine *self = qemu_coroutine_self(); | ||
600 | + assert(self != co); | ||
601 | + QSIMPLEQ_INSERT_TAIL(&self->co_queue_wakeup, co, co_queue_next); | ||
1290 | + } else { | 602 | + } else { |
1291 | + trace_ufs_irq_lower(); | 603 | + aio_context_acquire(ctx); |
1292 | + pci_irq_deassert(pci); | 604 | + qemu_coroutine_enter(co); |
1293 | + } | 605 | + aio_context_release(ctx); |
1294 | +} | 606 | + } |
1295 | + | 607 | +} |
1296 | +static void ufs_process_uiccmd(UfsHc *u, uint32_t val) | 608 | + |
1297 | +{ | 609 | void aio_context_ref(AioContext *ctx) |
1298 | + trace_ufs_process_uiccmd(val, u->reg.ucmdarg1, u->reg.ucmdarg2, | 610 | { |
1299 | + u->reg.ucmdarg3); | 611 | g_source_ref(&ctx->source); |
1300 | + /* | 612 | diff --git a/util/qemu-coroutine.c b/util/qemu-coroutine.c |
1301 | + * Only the essential uic commands for running drivers on Linux and Windows | 613 | index XXXXXXX..XXXXXXX 100644 |
1302 | + * are implemented. | 614 | --- a/util/qemu-coroutine.c |
615 | +++ b/util/qemu-coroutine.c | ||
616 | @@ -XXX,XX +XXX,XX @@ | ||
617 | #include "qemu/atomic.h" | ||
618 | #include "qemu/coroutine.h" | ||
619 | #include "qemu/coroutine_int.h" | ||
620 | +#include "block/aio.h" | ||
621 | |||
622 | enum { | ||
623 | POOL_BATCH_SIZE = 64, | ||
624 | @@ -XXX,XX +XXX,XX @@ void qemu_coroutine_enter(Coroutine *co) | ||
625 | } | ||
626 | |||
627 | co->caller = self; | ||
628 | + co->ctx = qemu_get_current_aio_context(); | ||
629 | + | ||
630 | + /* Store co->ctx before anything that stores co. Matches | ||
631 | + * barrier in aio_co_wake. | ||
1303 | + */ | 632 | + */ |
1304 | + switch (val) { | 633 | + smp_wmb(); |
1305 | + case UFS_UIC_CMD_DME_LINK_STARTUP: | 634 | + |
1306 | + u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, DP, 1); | 635 | ret = qemu_coroutine_switch(self, co, COROUTINE_ENTER); |
1307 | + u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UTRLRDY, 1); | 636 | |
1308 | + u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UTMRLRDY, 1); | 637 | qemu_co_queue_run_restart(co); |
1309 | + u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_SUCCESS; | 638 | diff --git a/util/trace-events b/util/trace-events |
1310 | + break; | ||
1311 | + /* TODO: Revisit it when Power Management is implemented */ | ||
1312 | + case UFS_UIC_CMD_DME_HIBER_ENTER: | ||
1313 | + u->reg.is = FIELD_DP32(u->reg.is, IS, UHES, 1); | ||
1314 | + u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UPMCRS, UFS_PWR_LOCAL); | ||
1315 | + u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_SUCCESS; | ||
1316 | + break; | ||
1317 | + case UFS_UIC_CMD_DME_HIBER_EXIT: | ||
1318 | + u->reg.is = FIELD_DP32(u->reg.is, IS, UHXS, 1); | ||
1319 | + u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UPMCRS, UFS_PWR_LOCAL); | ||
1320 | + u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_SUCCESS; | ||
1321 | + break; | ||
1322 | + default: | ||
1323 | + u->reg.ucmdarg2 = UFS_UIC_CMD_RESULT_FAILURE; | ||
1324 | + } | ||
1325 | + | ||
1326 | + u->reg.is = FIELD_DP32(u->reg.is, IS, UCCS, 1); | ||
1327 | + | ||
1328 | + ufs_irq_check(u); | ||
1329 | +} | ||
1330 | + | ||
1331 | +static void ufs_write_reg(UfsHc *u, hwaddr offset, uint32_t data, unsigned size) | ||
1332 | +{ | ||
1333 | + switch (offset) { | ||
1334 | + case A_IS: | ||
1335 | + u->reg.is &= ~data; | ||
1336 | + ufs_irq_check(u); | ||
1337 | + break; | ||
1338 | + case A_IE: | ||
1339 | + u->reg.ie = data; | ||
1340 | + ufs_irq_check(u); | ||
1341 | + break; | ||
1342 | + case A_HCE: | ||
1343 | + if (!FIELD_EX32(u->reg.hce, HCE, HCE) && FIELD_EX32(data, HCE, HCE)) { | ||
1344 | + u->reg.hcs = FIELD_DP32(u->reg.hcs, HCS, UCRDY, 1); | ||
1345 | + u->reg.hce = FIELD_DP32(u->reg.hce, HCE, HCE, 1); | ||
1346 | + } else if (FIELD_EX32(u->reg.hce, HCE, HCE) && | ||
1347 | + !FIELD_EX32(data, HCE, HCE)) { | ||
1348 | + u->reg.hcs = 0; | ||
1349 | + u->reg.hce = FIELD_DP32(u->reg.hce, HCE, HCE, 0); | ||
1350 | + } | ||
1351 | + break; | ||
1352 | + case A_UTRLBA: | ||
1353 | + u->reg.utrlba = data & R_UTRLBA_UTRLBA_MASK; | ||
1354 | + break; | ||
1355 | + case A_UTRLBAU: | ||
1356 | + u->reg.utrlbau = data; | ||
1357 | + break; | ||
1358 | + case A_UTRLDBR: | ||
1359 | + /* Not yet supported */ | ||
1360 | + break; | ||
1361 | + case A_UTRLRSR: | ||
1362 | + u->reg.utrlrsr = data; | ||
1363 | + break; | ||
1364 | + case A_UTRLCNR: | ||
1365 | + u->reg.utrlcnr &= ~data; | ||
1366 | + break; | ||
1367 | + case A_UTMRLBA: | ||
1368 | + u->reg.utmrlba = data & R_UTMRLBA_UTMRLBA_MASK; | ||
1369 | + break; | ||
1370 | + case A_UTMRLBAU: | ||
1371 | + u->reg.utmrlbau = data; | ||
1372 | + break; | ||
1373 | + case A_UICCMD: | ||
1374 | + ufs_process_uiccmd(u, data); | ||
1375 | + break; | ||
1376 | + case A_UCMDARG1: | ||
1377 | + u->reg.ucmdarg1 = data; | ||
1378 | + break; | ||
1379 | + case A_UCMDARG2: | ||
1380 | + u->reg.ucmdarg2 = data; | ||
1381 | + break; | ||
1382 | + case A_UCMDARG3: | ||
1383 | + u->reg.ucmdarg3 = data; | ||
1384 | + break; | ||
1385 | + case A_UTRLCLR: | ||
1386 | + case A_UTMRLDBR: | ||
1387 | + case A_UTMRLCLR: | ||
1388 | + case A_UTMRLRSR: | ||
1389 | + trace_ufs_err_unsupport_register_offset(offset); | ||
1390 | + break; | ||
1391 | + default: | ||
1392 | + trace_ufs_err_invalid_register_offset(offset); | ||
1393 | + break; | ||
1394 | + } | ||
1395 | +} | ||
1396 | + | ||
1397 | +static uint64_t ufs_mmio_read(void *opaque, hwaddr addr, unsigned size) | ||
1398 | +{ | ||
1399 | + UfsHc *u = (UfsHc *)opaque; | ||
1400 | + uint8_t *ptr = (uint8_t *)&u->reg; | ||
1401 | + uint64_t value; | ||
1402 | + | ||
1403 | + if (addr > sizeof(u->reg) - size) { | ||
1404 | + trace_ufs_err_invalid_register_offset(addr); | ||
1405 | + return 0; | ||
1406 | + } | ||
1407 | + | ||
1408 | + value = *(uint32_t *)(ptr + addr); | ||
1409 | + trace_ufs_mmio_read(addr, value, size); | ||
1410 | + return value; | ||
1411 | +} | ||
1412 | + | ||
1413 | +static void ufs_mmio_write(void *opaque, hwaddr addr, uint64_t data, | ||
1414 | + unsigned size) | ||
1415 | +{ | ||
1416 | + UfsHc *u = (UfsHc *)opaque; | ||
1417 | + | ||
1418 | + if (addr > sizeof(u->reg) - size) { | ||
1419 | + trace_ufs_err_invalid_register_offset(addr); | ||
1420 | + return; | ||
1421 | + } | ||
1422 | + | ||
1423 | + trace_ufs_mmio_write(addr, data, size); | ||
1424 | + ufs_write_reg(u, addr, data, size); | ||
1425 | +} | ||
1426 | + | ||
1427 | +static const MemoryRegionOps ufs_mmio_ops = { | ||
1428 | + .read = ufs_mmio_read, | ||
1429 | + .write = ufs_mmio_write, | ||
1430 | + .endianness = DEVICE_LITTLE_ENDIAN, | ||
1431 | + .impl = { | ||
1432 | + .min_access_size = 4, | ||
1433 | + .max_access_size = 4, | ||
1434 | + }, | ||
1435 | +}; | ||
1436 | + | ||
1437 | +static bool ufs_check_constraints(UfsHc *u, Error **errp) | ||
1438 | +{ | ||
1439 | + if (u->params.nutrs > UFS_MAX_NUTRS) { | ||
1440 | + error_setg(errp, "nutrs must be less than or equal to %d", | ||
1441 | + UFS_MAX_NUTRS); | ||
1442 | + return false; | ||
1443 | + } | ||
1444 | + | ||
1445 | + if (u->params.nutmrs > UFS_MAX_NUTMRS) { | ||
1446 | + error_setg(errp, "nutmrs must be less than or equal to %d", | ||
1447 | + UFS_MAX_NUTMRS); | ||
1448 | + return false; | ||
1449 | + } | ||
1450 | + | ||
1451 | + return true; | ||
1452 | +} | ||
1453 | + | ||
1454 | +static void ufs_init_pci(UfsHc *u, PCIDevice *pci_dev) | ||
1455 | +{ | ||
1456 | + uint8_t *pci_conf = pci_dev->config; | ||
1457 | + | ||
1458 | + pci_conf[PCI_INTERRUPT_PIN] = 1; | ||
1459 | + pci_config_set_prog_interface(pci_conf, 0x1); | ||
1460 | + | ||
1461 | + memory_region_init_io(&u->iomem, OBJECT(u), &ufs_mmio_ops, u, "ufs", | ||
1462 | + u->reg_size); | ||
1463 | + pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &u->iomem); | ||
1464 | + u->irq = pci_allocate_irq(pci_dev); | ||
1465 | +} | ||
1466 | + | ||
1467 | +static void ufs_init_hc(UfsHc *u) | ||
1468 | +{ | ||
1469 | + uint32_t cap = 0; | ||
1470 | + | ||
1471 | + u->reg_size = pow2ceil(sizeof(UfsReg)); | ||
1472 | + | ||
1473 | + memset(&u->reg, 0, sizeof(u->reg)); | ||
1474 | + cap = FIELD_DP32(cap, CAP, NUTRS, (u->params.nutrs - 1)); | ||
1475 | + cap = FIELD_DP32(cap, CAP, RTT, 2); | ||
1476 | + cap = FIELD_DP32(cap, CAP, NUTMRS, (u->params.nutmrs - 1)); | ||
1477 | + cap = FIELD_DP32(cap, CAP, AUTOH8, 0); | ||
1478 | + cap = FIELD_DP32(cap, CAP, 64AS, 1); | ||
1479 | + cap = FIELD_DP32(cap, CAP, OODDS, 0); | ||
1480 | + cap = FIELD_DP32(cap, CAP, UICDMETMS, 0); | ||
1481 | + cap = FIELD_DP32(cap, CAP, CS, 0); | ||
1482 | + u->reg.cap = cap; | ||
1483 | + u->reg.ver = UFS_SPEC_VER; | ||
1484 | +} | ||
1485 | + | ||
1486 | +static void ufs_realize(PCIDevice *pci_dev, Error **errp) | ||
1487 | +{ | ||
1488 | + UfsHc *u = UFS(pci_dev); | ||
1489 | + | ||
1490 | + if (!ufs_check_constraints(u, errp)) { | ||
1491 | + return; | ||
1492 | + } | ||
1493 | + | ||
1494 | + ufs_init_hc(u); | ||
1495 | + ufs_init_pci(u, pci_dev); | ||
1496 | +} | ||
1497 | + | ||
1498 | +static Property ufs_props[] = { | ||
1499 | + DEFINE_PROP_STRING("serial", UfsHc, params.serial), | ||
1500 | + DEFINE_PROP_UINT8("nutrs", UfsHc, params.nutrs, 32), | ||
1501 | + DEFINE_PROP_UINT8("nutmrs", UfsHc, params.nutmrs, 8), | ||
1502 | + DEFINE_PROP_END_OF_LIST(), | ||
1503 | +}; | ||
1504 | + | ||
1505 | +static const VMStateDescription ufs_vmstate = { | ||
1506 | + .name = "ufs", | ||
1507 | + .unmigratable = 1, | ||
1508 | +}; | ||
1509 | + | ||
1510 | +static void ufs_class_init(ObjectClass *oc, void *data) | ||
1511 | +{ | ||
1512 | + DeviceClass *dc = DEVICE_CLASS(oc); | ||
1513 | + PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc); | ||
1514 | + | ||
1515 | + pc->realize = ufs_realize; | ||
1516 | + pc->vendor_id = PCI_VENDOR_ID_REDHAT; | ||
1517 | + pc->device_id = PCI_DEVICE_ID_REDHAT_UFS; | ||
1518 | + pc->class_id = PCI_CLASS_STORAGE_UFS; | ||
1519 | + | ||
1520 | + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); | ||
1521 | + dc->desc = "Universal Flash Storage"; | ||
1522 | + device_class_set_props(dc, ufs_props); | ||
1523 | + dc->vmsd = &ufs_vmstate; | ||
1524 | +} | ||
1525 | + | ||
1526 | +static const TypeInfo ufs_info = { | ||
1527 | + .name = TYPE_UFS, | ||
1528 | + .parent = TYPE_PCI_DEVICE, | ||
1529 | + .class_init = ufs_class_init, | ||
1530 | + .instance_size = sizeof(UfsHc), | ||
1531 | + .interfaces = (InterfaceInfo[]){ { INTERFACE_PCIE_DEVICE }, {} }, | ||
1532 | +}; | ||
1533 | + | ||
1534 | +static void ufs_register_types(void) | ||
1535 | +{ | ||
1536 | + type_register_static(&ufs_info); | ||
1537 | +} | ||
1538 | + | ||
1539 | +type_init(ufs_register_types) | ||
1540 | diff --git a/hw/Kconfig b/hw/Kconfig | ||
1541 | index XXXXXXX..XXXXXXX 100644 | 639 | index XXXXXXX..XXXXXXX 100644 |
1542 | --- a/hw/Kconfig | 640 | --- a/util/trace-events |
1543 | +++ b/hw/Kconfig | 641 | +++ b/util/trace-events |
1544 | @@ -XXX,XX +XXX,XX @@ source smbios/Kconfig | 642 | @@ -XXX,XX +XXX,XX @@ run_poll_handlers_end(void *ctx, bool progress) "ctx %p progress %d" |
1545 | source ssi/Kconfig | 643 | poll_shrink(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64 |
1546 | source timer/Kconfig | 644 | poll_grow(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64 |
1547 | source tpm/Kconfig | 645 | |
1548 | +source ufs/Kconfig | 646 | +# util/async.c |
1549 | source usb/Kconfig | 647 | +aio_co_schedule(void *ctx, void *co) "ctx %p co %p" |
1550 | source virtio/Kconfig | 648 | +aio_co_schedule_bh_cb(void *ctx, void *co) "ctx %p co %p" |
1551 | source vfio/Kconfig | 649 | + |
1552 | diff --git a/hw/meson.build b/hw/meson.build | 650 | # util/thread-pool.c |
1553 | index XXXXXXX..XXXXXXX 100644 | 651 | thread_pool_submit(void *pool, void *req, void *opaque) "pool %p req %p opaque %p" |
1554 | --- a/hw/meson.build | 652 | thread_pool_complete(void *pool, void *req, void *opaque, int ret) "pool %p req %p opaque %p ret %d" |
1555 | +++ b/hw/meson.build | ||
1556 | @@ -XXX,XX +XXX,XX @@ subdir('smbios') | ||
1557 | subdir('ssi') | ||
1558 | subdir('timer') | ||
1559 | subdir('tpm') | ||
1560 | +subdir('ufs') | ||
1561 | subdir('usb') | ||
1562 | subdir('vfio') | ||
1563 | subdir('virtio') | ||
1564 | diff --git a/hw/ufs/Kconfig b/hw/ufs/Kconfig | ||
1565 | new file mode 100644 | ||
1566 | index XXXXXXX..XXXXXXX | ||
1567 | --- /dev/null | ||
1568 | +++ b/hw/ufs/Kconfig | ||
1569 | @@ -XXX,XX +XXX,XX @@ | ||
1570 | +config UFS_PCI | ||
1571 | + bool | ||
1572 | + default y if PCI_DEVICES | ||
1573 | + depends on PCI | ||
1574 | diff --git a/hw/ufs/meson.build b/hw/ufs/meson.build | ||
1575 | new file mode 100644 | ||
1576 | index XXXXXXX..XXXXXXX | ||
1577 | --- /dev/null | ||
1578 | +++ b/hw/ufs/meson.build | ||
1579 | @@ -0,0 +1 @@ | ||
1580 | +system_ss.add(when: 'CONFIG_UFS_PCI', if_true: files('ufs.c')) | ||
1581 | diff --git a/hw/ufs/trace-events b/hw/ufs/trace-events | ||
1582 | new file mode 100644 | ||
1583 | index XXXXXXX..XXXXXXX | ||
1584 | --- /dev/null | ||
1585 | +++ b/hw/ufs/trace-events | ||
1586 | @@ -XXX,XX +XXX,XX @@ | ||
1587 | +# ufs.c | ||
1588 | +ufs_irq_raise(void) "INTx" | ||
1589 | +ufs_irq_lower(void) "INTx" | ||
1590 | +ufs_mmio_read(uint64_t addr, uint64_t data, unsigned size) "addr 0x%"PRIx64" data 0x%"PRIx64" size %d" | ||
1591 | +ufs_mmio_write(uint64_t addr, uint64_t data, unsigned size) "addr 0x%"PRIx64" data 0x%"PRIx64" size %d" | ||
1592 | +ufs_process_db(uint32_t slot) "UTRLDBR slot %"PRIu32"" | ||
1593 | +ufs_process_req(uint32_t slot) "UTRLDBR slot %"PRIu32"" | ||
1594 | +ufs_complete_req(uint32_t slot) "UTRLDBR slot %"PRIu32"" | ||
1595 | +ufs_sendback_req(uint32_t slot) "UTRLDBR slot %"PRIu32"" | ||
1596 | +ufs_exec_nop_cmd(uint32_t slot) "UTRLDBR slot %"PRIu32"" | ||
1597 | +ufs_exec_scsi_cmd(uint32_t slot, uint8_t lun, uint8_t opcode) "slot %"PRIu32", lun 0x%"PRIx8", opcode 0x%"PRIx8"" | ||
1598 | +ufs_exec_query_cmd(uint32_t slot, uint8_t opcode) "slot %"PRIu32", opcode 0x%"PRIx8"" | ||
1599 | +ufs_process_uiccmd(uint32_t uiccmd, uint32_t ucmdarg1, uint32_t ucmdarg2, uint32_t ucmdarg3) "uiccmd 0x%"PRIx32", ucmdarg1 0x%"PRIx32", ucmdarg2 0x%"PRIx32", ucmdarg3 0x%"PRIx32"" | ||
1600 | + | ||
1601 | +# error condition | ||
1602 | +ufs_err_dma_read_utrd(uint32_t slot, uint64_t addr) "failed to read utrd. UTRLDBR slot %"PRIu32", UTRD dma addr %"PRIu64"" | ||
1603 | +ufs_err_dma_read_req_upiu(uint32_t slot, uint64_t addr) "failed to read req upiu. UTRLDBR slot %"PRIu32", request upiu addr %"PRIu64"" | ||
1604 | +ufs_err_dma_read_prdt(uint32_t slot, uint64_t addr) "failed to read prdt. UTRLDBR slot %"PRIu32", prdt addr %"PRIu64"" | ||
1605 | +ufs_err_dma_write_utrd(uint32_t slot, uint64_t addr) "failed to write utrd. UTRLDBR slot %"PRIu32", UTRD dma addr %"PRIu64"" | ||
1606 | +ufs_err_dma_write_rsp_upiu(uint32_t slot, uint64_t addr) "failed to write rsp upiu. UTRLDBR slot %"PRIu32", response upiu addr %"PRIu64"" | ||
1607 | +ufs_err_utrl_slot_busy(uint32_t slot) "UTRLDBR slot %"PRIu32" is busy" | ||
1608 | +ufs_err_unsupport_register_offset(uint32_t offset) "Register offset 0x%"PRIx32" is not yet supported" | ||
1609 | +ufs_err_invalid_register_offset(uint32_t offset) "Register offset 0x%"PRIx32" is invalid" | ||
1610 | +ufs_err_scsi_cmd_invalid_lun(uint8_t lun) "scsi command has invalid lun: 0x%"PRIx8"" | ||
1611 | +ufs_err_query_flag_not_readable(uint8_t idn) "query flag idn 0x%"PRIx8" is denied to read" | ||
1612 | +ufs_err_query_flag_not_writable(uint8_t idn) "query flag idn 0x%"PRIx8" is denied to write" | ||
1613 | +ufs_err_query_attr_not_readable(uint8_t idn) "query attribute idn 0x%"PRIx8" is denied to read" | ||
1614 | +ufs_err_query_attr_not_writable(uint8_t idn) "query attribute idn 0x%"PRIx8" is denied to write" | ||
1615 | +ufs_err_query_invalid_opcode(uint8_t opcode) "query request has invalid opcode. opcode: 0x%"PRIx8"" | ||
1616 | +ufs_err_query_invalid_idn(uint8_t opcode, uint8_t idn) "query request has invalid idn. opcode: 0x%"PRIx8", idn 0x%"PRIx8"" | ||
1617 | +ufs_err_query_invalid_index(uint8_t opcode, uint8_t index) "query request has invalid index. opcode: 0x%"PRIx8", index 0x%"PRIx8"" | ||
1618 | +ufs_err_invalid_trans_code(uint32_t slot, uint8_t trans_code) "request upiu has invalid transaction code. slot: %"PRIu32", trans_code: 0x%"PRIx8"" | ||
1619 | -- | 653 | -- |
1620 | 2.41.0 | 654 | 2.9.3 |
655 | |||
656 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Paolo Bonzini <pbonzini@redhat.com> | ||
1 | 2 | ||
3 | qcow2_create2 calls this. Do not run a nested event loop, as that | ||
4 | breaks when aio_co_wake tries to queue the coroutine on the co_queue_wakeup | ||
5 | list of the currently running one. | ||
6 | |||
7 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
8 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
9 | Reviewed-by: Fam Zheng <famz@redhat.com> | ||
10 | Message-id: 20170213135235.12274-4-pbonzini@redhat.com | ||
11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
12 | --- | ||
13 | block/block-backend.c | 12 ++++++++---- | ||
14 | 1 file changed, 8 insertions(+), 4 deletions(-) | ||
15 | |||
16 | diff --git a/block/block-backend.c b/block/block-backend.c | ||
17 | index XXXXXXX..XXXXXXX 100644 | ||
18 | --- a/block/block-backend.c | ||
19 | +++ b/block/block-backend.c | ||
20 | @@ -XXX,XX +XXX,XX @@ static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf, | ||
21 | { | ||
22 | QEMUIOVector qiov; | ||
23 | struct iovec iov; | ||
24 | - Coroutine *co; | ||
25 | BlkRwCo rwco; | ||
26 | |||
27 | iov = (struct iovec) { | ||
28 | @@ -XXX,XX +XXX,XX @@ static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf, | ||
29 | .ret = NOT_DONE, | ||
30 | }; | ||
31 | |||
32 | - co = qemu_coroutine_create(co_entry, &rwco); | ||
33 | - qemu_coroutine_enter(co); | ||
34 | - BDRV_POLL_WHILE(blk_bs(blk), rwco.ret == NOT_DONE); | ||
35 | + if (qemu_in_coroutine()) { | ||
36 | + /* Fast-path if already in coroutine context */ | ||
37 | + co_entry(&rwco); | ||
38 | + } else { | ||
39 | + Coroutine *co = qemu_coroutine_create(co_entry, &rwco); | ||
40 | + qemu_coroutine_enter(co); | ||
41 | + BDRV_POLL_WHILE(blk_bs(blk), rwco.ret == NOT_DONE); | ||
42 | + } | ||
43 | |||
44 | return rwco.ret; | ||
45 | } | ||
46 | -- | ||
47 | 2.9.3 | ||
48 | |||
49 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Paolo Bonzini <pbonzini@redhat.com> | ||
1 | 2 | ||
3 | Once the thread pool starts using aio_co_wake, it will also need | ||
4 | qemu_get_current_aio_context(). Make test-thread-pool create | ||
5 | an AioContext with qemu_init_main_loop, so that stubs/iothread.c | ||
6 | and tests/iothread.c can provide the rest. | ||
7 | |||
8 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
9 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
10 | Reviewed-by: Fam Zheng <famz@redhat.com> | ||
11 | Message-id: 20170213135235.12274-5-pbonzini@redhat.com | ||
12 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
13 | --- | ||
14 | tests/test-thread-pool.c | 12 +++--------- | ||
15 | 1 file changed, 3 insertions(+), 9 deletions(-) | ||
16 | |||
17 | diff --git a/tests/test-thread-pool.c b/tests/test-thread-pool.c | ||
18 | index XXXXXXX..XXXXXXX 100644 | ||
19 | --- a/tests/test-thread-pool.c | ||
20 | +++ b/tests/test-thread-pool.c | ||
21 | @@ -XXX,XX +XXX,XX @@ | ||
22 | #include "qapi/error.h" | ||
23 | #include "qemu/timer.h" | ||
24 | #include "qemu/error-report.h" | ||
25 | +#include "qemu/main-loop.h" | ||
26 | |||
27 | static AioContext *ctx; | ||
28 | static ThreadPool *pool; | ||
29 | @@ -XXX,XX +XXX,XX @@ static void test_cancel_async(void) | ||
30 | int main(int argc, char **argv) | ||
31 | { | ||
32 | int ret; | ||
33 | - Error *local_error = NULL; | ||
34 | |||
35 | - init_clocks(); | ||
36 | - | ||
37 | - ctx = aio_context_new(&local_error); | ||
38 | - if (!ctx) { | ||
39 | - error_reportf_err(local_error, "Failed to create AIO Context: "); | ||
40 | - exit(1); | ||
41 | - } | ||
42 | + qemu_init_main_loop(&error_abort); | ||
43 | + ctx = qemu_get_current_aio_context(); | ||
44 | pool = aio_get_thread_pool(ctx); | ||
45 | |||
46 | g_test_init(&argc, &argv, NULL); | ||
47 | @@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv) | ||
48 | |||
49 | ret = g_test_run(); | ||
50 | |||
51 | - aio_context_unref(ctx); | ||
52 | return ret; | ||
53 | } | ||
54 | -- | ||
55 | 2.9.3 | ||
56 | |||
57 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | 1 | From: Paolo Bonzini <pbonzini@redhat.com> | |
2 | |||
3 | This is in preparation for making qio_channel_yield work on | ||
4 | AioContexts other than the main one. | ||
5 | |||
6 | Reviewed-by: Daniel P. Berrange <berrange@redhat.com> | ||
7 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
8 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
9 | Reviewed-by: Fam Zheng <famz@redhat.com> | ||
10 | Message-id: 20170213135235.12274-6-pbonzini@redhat.com | ||
11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
12 | --- | ||
13 | include/io/channel.h | 25 +++++++++++++++++++++++++ | ||
14 | io/channel-command.c | 13 +++++++++++++ | ||
15 | io/channel-file.c | 11 +++++++++++ | ||
16 | io/channel-socket.c | 16 +++++++++++----- | ||
17 | io/channel-tls.c | 12 ++++++++++++ | ||
18 | io/channel-watch.c | 6 ++++++ | ||
19 | io/channel.c | 11 +++++++++++ | ||
20 | 7 files changed, 89 insertions(+), 5 deletions(-) | ||
21 | |||
22 | diff --git a/include/io/channel.h b/include/io/channel.h | ||
23 | index XXXXXXX..XXXXXXX 100644 | ||
24 | --- a/include/io/channel.h | ||
25 | +++ b/include/io/channel.h | ||
26 | @@ -XXX,XX +XXX,XX @@ | ||
27 | |||
28 | #include "qemu-common.h" | ||
29 | #include "qom/object.h" | ||
30 | +#include "block/aio.h" | ||
31 | |||
32 | #define TYPE_QIO_CHANNEL "qio-channel" | ||
33 | #define QIO_CHANNEL(obj) \ | ||
34 | @@ -XXX,XX +XXX,XX @@ struct QIOChannelClass { | ||
35 | off_t offset, | ||
36 | int whence, | ||
37 | Error **errp); | ||
38 | + void (*io_set_aio_fd_handler)(QIOChannel *ioc, | ||
39 | + AioContext *ctx, | ||
40 | + IOHandler *io_read, | ||
41 | + IOHandler *io_write, | ||
42 | + void *opaque); | ||
43 | }; | ||
44 | |||
45 | /* General I/O handling functions */ | ||
46 | @@ -XXX,XX +XXX,XX @@ void qio_channel_yield(QIOChannel *ioc, | ||
47 | void qio_channel_wait(QIOChannel *ioc, | ||
48 | GIOCondition condition); | ||
49 | |||
50 | +/** | ||
51 | + * qio_channel_set_aio_fd_handler: | ||
52 | + * @ioc: the channel object | ||
53 | + * @ctx: the AioContext to set the handlers on | ||
54 | + * @io_read: the read handler | ||
55 | + * @io_write: the write handler | ||
56 | + * @opaque: the opaque value passed to the handler | ||
57 | + * | ||
58 | + * This is used internally by qio_channel_yield(). It can | ||
59 | + * be used by channel implementations to forward the handlers | ||
60 | + * to another channel (e.g. from #QIOChannelTLS to the | ||
61 | + * underlying socket). | ||
62 | + */ | ||
63 | +void qio_channel_set_aio_fd_handler(QIOChannel *ioc, | ||
64 | + AioContext *ctx, | ||
65 | + IOHandler *io_read, | ||
66 | + IOHandler *io_write, | ||
67 | + void *opaque); | ||
68 | + | ||
69 | #endif /* QIO_CHANNEL_H */ | ||
70 | diff --git a/io/channel-command.c b/io/channel-command.c | ||
71 | index XXXXXXX..XXXXXXX 100644 | ||
72 | --- a/io/channel-command.c | ||
73 | +++ b/io/channel-command.c | ||
74 | @@ -XXX,XX +XXX,XX @@ static int qio_channel_command_close(QIOChannel *ioc, | ||
75 | } | ||
76 | |||
77 | |||
78 | +static void qio_channel_command_set_aio_fd_handler(QIOChannel *ioc, | ||
79 | + AioContext *ctx, | ||
80 | + IOHandler *io_read, | ||
81 | + IOHandler *io_write, | ||
82 | + void *opaque) | ||
83 | +{ | ||
84 | + QIOChannelCommand *cioc = QIO_CHANNEL_COMMAND(ioc); | ||
85 | + aio_set_fd_handler(ctx, cioc->readfd, false, io_read, NULL, NULL, opaque); | ||
86 | + aio_set_fd_handler(ctx, cioc->writefd, false, NULL, io_write, NULL, opaque); | ||
87 | +} | ||
88 | + | ||
89 | + | ||
90 | static GSource *qio_channel_command_create_watch(QIOChannel *ioc, | ||
91 | GIOCondition condition) | ||
92 | { | ||
93 | @@ -XXX,XX +XXX,XX @@ static void qio_channel_command_class_init(ObjectClass *klass, | ||
94 | ioc_klass->io_set_blocking = qio_channel_command_set_blocking; | ||
95 | ioc_klass->io_close = qio_channel_command_close; | ||
96 | ioc_klass->io_create_watch = qio_channel_command_create_watch; | ||
97 | + ioc_klass->io_set_aio_fd_handler = qio_channel_command_set_aio_fd_handler; | ||
98 | } | ||
99 | |||
100 | static const TypeInfo qio_channel_command_info = { | ||
101 | diff --git a/io/channel-file.c b/io/channel-file.c | ||
102 | index XXXXXXX..XXXXXXX 100644 | ||
103 | --- a/io/channel-file.c | ||
104 | +++ b/io/channel-file.c | ||
105 | @@ -XXX,XX +XXX,XX @@ static int qio_channel_file_close(QIOChannel *ioc, | ||
106 | } | ||
107 | |||
108 | |||
109 | +static void qio_channel_file_set_aio_fd_handler(QIOChannel *ioc, | ||
110 | + AioContext *ctx, | ||
111 | + IOHandler *io_read, | ||
112 | + IOHandler *io_write, | ||
113 | + void *opaque) | ||
114 | +{ | ||
115 | + QIOChannelFile *fioc = QIO_CHANNEL_FILE(ioc); | ||
116 | + aio_set_fd_handler(ctx, fioc->fd, false, io_read, io_write, NULL, opaque); | ||
117 | +} | ||
118 | + | ||
119 | static GSource *qio_channel_file_create_watch(QIOChannel *ioc, | ||
120 | GIOCondition condition) | ||
121 | { | ||
122 | @@ -XXX,XX +XXX,XX @@ static void qio_channel_file_class_init(ObjectClass *klass, | ||
123 | ioc_klass->io_seek = qio_channel_file_seek; | ||
124 | ioc_klass->io_close = qio_channel_file_close; | ||
125 | ioc_klass->io_create_watch = qio_channel_file_create_watch; | ||
126 | + ioc_klass->io_set_aio_fd_handler = qio_channel_file_set_aio_fd_handler; | ||
127 | } | ||
128 | |||
129 | static const TypeInfo qio_channel_file_info = { | ||
130 | diff --git a/io/channel-socket.c b/io/channel-socket.c | ||
131 | index XXXXXXX..XXXXXXX 100644 | ||
132 | --- a/io/channel-socket.c | ||
133 | +++ b/io/channel-socket.c | ||
134 | @@ -XXX,XX +XXX,XX @@ qio_channel_socket_set_blocking(QIOChannel *ioc, | ||
135 | qemu_set_block(sioc->fd); | ||
136 | } else { | ||
137 | qemu_set_nonblock(sioc->fd); | ||
138 | -#ifdef WIN32 | ||
139 | - WSAEventSelect(sioc->fd, ioc->event, | ||
140 | - FD_READ | FD_ACCEPT | FD_CLOSE | | ||
141 | - FD_CONNECT | FD_WRITE | FD_OOB); | ||
142 | -#endif | ||
143 | } | ||
144 | return 0; | ||
145 | } | ||
146 | @@ -XXX,XX +XXX,XX @@ qio_channel_socket_shutdown(QIOChannel *ioc, | ||
147 | return 0; | ||
148 | } | ||
149 | |||
150 | +static void qio_channel_socket_set_aio_fd_handler(QIOChannel *ioc, | ||
151 | + AioContext *ctx, | ||
152 | + IOHandler *io_read, | ||
153 | + IOHandler *io_write, | ||
154 | + void *opaque) | ||
155 | +{ | ||
156 | + QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc); | ||
157 | + aio_set_fd_handler(ctx, sioc->fd, false, io_read, io_write, NULL, opaque); | ||
158 | +} | ||
159 | + | ||
160 | static GSource *qio_channel_socket_create_watch(QIOChannel *ioc, | ||
161 | GIOCondition condition) | ||
162 | { | ||
163 | @@ -XXX,XX +XXX,XX @@ static void qio_channel_socket_class_init(ObjectClass *klass, | ||
164 | ioc_klass->io_set_cork = qio_channel_socket_set_cork; | ||
165 | ioc_klass->io_set_delay = qio_channel_socket_set_delay; | ||
166 | ioc_klass->io_create_watch = qio_channel_socket_create_watch; | ||
167 | + ioc_klass->io_set_aio_fd_handler = qio_channel_socket_set_aio_fd_handler; | ||
168 | } | ||
169 | |||
170 | static const TypeInfo qio_channel_socket_info = { | ||
171 | diff --git a/io/channel-tls.c b/io/channel-tls.c | ||
172 | index XXXXXXX..XXXXXXX 100644 | ||
173 | --- a/io/channel-tls.c | ||
174 | +++ b/io/channel-tls.c | ||
175 | @@ -XXX,XX +XXX,XX @@ static int qio_channel_tls_close(QIOChannel *ioc, | ||
176 | return qio_channel_close(tioc->master, errp); | ||
177 | } | ||
178 | |||
179 | +static void qio_channel_tls_set_aio_fd_handler(QIOChannel *ioc, | ||
180 | + AioContext *ctx, | ||
181 | + IOHandler *io_read, | ||
182 | + IOHandler *io_write, | ||
183 | + void *opaque) | ||
184 | +{ | ||
185 | + QIOChannelTLS *tioc = QIO_CHANNEL_TLS(ioc); | ||
186 | + | ||
187 | + qio_channel_set_aio_fd_handler(tioc->master, ctx, io_read, io_write, opaque); | ||
188 | +} | ||
189 | + | ||
190 | static GSource *qio_channel_tls_create_watch(QIOChannel *ioc, | ||
191 | GIOCondition condition) | ||
192 | { | ||
193 | @@ -XXX,XX +XXX,XX @@ static void qio_channel_tls_class_init(ObjectClass *klass, | ||
194 | ioc_klass->io_close = qio_channel_tls_close; | ||
195 | ioc_klass->io_shutdown = qio_channel_tls_shutdown; | ||
196 | ioc_klass->io_create_watch = qio_channel_tls_create_watch; | ||
197 | + ioc_klass->io_set_aio_fd_handler = qio_channel_tls_set_aio_fd_handler; | ||
198 | } | ||
199 | |||
200 | static const TypeInfo qio_channel_tls_info = { | ||
201 | diff --git a/io/channel-watch.c b/io/channel-watch.c | ||
202 | index XXXXXXX..XXXXXXX 100644 | ||
203 | --- a/io/channel-watch.c | ||
204 | +++ b/io/channel-watch.c | ||
205 | @@ -XXX,XX +XXX,XX @@ GSource *qio_channel_create_socket_watch(QIOChannel *ioc, | ||
206 | GSource *source; | ||
207 | QIOChannelSocketSource *ssource; | ||
208 | |||
209 | +#ifdef WIN32 | ||
210 | + WSAEventSelect(socket, ioc->event, | ||
211 | + FD_READ | FD_ACCEPT | FD_CLOSE | | ||
212 | + FD_CONNECT | FD_WRITE | FD_OOB); | ||
213 | +#endif | ||
214 | + | ||
215 | source = g_source_new(&qio_channel_socket_source_funcs, | ||
216 | sizeof(QIOChannelSocketSource)); | ||
217 | ssource = (QIOChannelSocketSource *)source; | ||
218 | diff --git a/io/channel.c b/io/channel.c | ||
219 | index XXXXXXX..XXXXXXX 100644 | ||
220 | --- a/io/channel.c | ||
221 | +++ b/io/channel.c | ||
222 | @@ -XXX,XX +XXX,XX @@ GSource *qio_channel_create_watch(QIOChannel *ioc, | ||
223 | } | ||
224 | |||
225 | |||
226 | +void qio_channel_set_aio_fd_handler(QIOChannel *ioc, | ||
227 | + AioContext *ctx, | ||
228 | + IOHandler *io_read, | ||
229 | + IOHandler *io_write, | ||
230 | + void *opaque) | ||
231 | +{ | ||
232 | + QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc); | ||
233 | + | ||
234 | + klass->io_set_aio_fd_handler(ioc, ctx, io_read, io_write, opaque); | ||
235 | +} | ||
236 | + | ||
237 | guint qio_channel_add_watch(QIOChannel *ioc, | ||
238 | GIOCondition condition, | ||
239 | QIOChannelFunc func, | ||
240 | -- | ||
241 | 2.9.3 | ||
242 | |||
243 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | 1 | From: Paolo Bonzini <pbonzini@redhat.com> | |
2 | |||
3 | Support separate coroutines for reading and writing, and place the | ||
4 | read/write handlers on the AioContext that the QIOChannel is registered | ||
5 | with. | ||
6 | |||
7 | Reviewed-by: Daniel P. Berrange <berrange@redhat.com> | ||
8 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
9 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
10 | Reviewed-by: Fam Zheng <famz@redhat.com> | ||
11 | Message-id: 20170213135235.12274-7-pbonzini@redhat.com | ||
12 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
13 | --- | ||
14 | include/io/channel.h | 47 ++++++++++++++++++++++++++-- | ||
15 | io/channel.c | 86 +++++++++++++++++++++++++++++++++++++++------------- | ||
16 | 2 files changed, 109 insertions(+), 24 deletions(-) | ||
17 | |||
18 | diff --git a/include/io/channel.h b/include/io/channel.h | ||
19 | index XXXXXXX..XXXXXXX 100644 | ||
20 | --- a/include/io/channel.h | ||
21 | +++ b/include/io/channel.h | ||
22 | @@ -XXX,XX +XXX,XX @@ | ||
23 | |||
24 | #include "qemu-common.h" | ||
25 | #include "qom/object.h" | ||
26 | +#include "qemu/coroutine.h" | ||
27 | #include "block/aio.h" | ||
28 | |||
29 | #define TYPE_QIO_CHANNEL "qio-channel" | ||
30 | @@ -XXX,XX +XXX,XX @@ struct QIOChannel { | ||
31 | Object parent; | ||
32 | unsigned int features; /* bitmask of QIOChannelFeatures */ | ||
33 | char *name; | ||
34 | + AioContext *ctx; | ||
35 | + Coroutine *read_coroutine; | ||
36 | + Coroutine *write_coroutine; | ||
37 | #ifdef _WIN32 | ||
38 | HANDLE event; /* For use with GSource on Win32 */ | ||
39 | #endif | ||
40 | @@ -XXX,XX +XXX,XX @@ guint qio_channel_add_watch(QIOChannel *ioc, | ||
41 | |||
42 | |||
43 | /** | ||
44 | + * qio_channel_attach_aio_context: | ||
45 | + * @ioc: the channel object | ||
46 | + * @ctx: the #AioContext to set the handlers on | ||
47 | + * | ||
48 | + * Request that qio_channel_yield() sets I/O handlers on | ||
49 | + * the given #AioContext. If @ctx is %NULL, qio_channel_yield() | ||
50 | + * uses QEMU's main thread event loop. | ||
51 | + * | ||
52 | + * You can move a #QIOChannel from one #AioContext to another even if | ||
53 | + * I/O handlers are set for a coroutine. However, #QIOChannel provides | ||
54 | + * no synchronization between the calls to qio_channel_yield() and | ||
55 | + * qio_channel_attach_aio_context(). | ||
56 | + * | ||
57 | + * Therefore you should first call qio_channel_detach_aio_context() | ||
58 | + * to ensure that the coroutine is not entered concurrently. Then, | ||
59 | + * while the coroutine has yielded, call qio_channel_attach_aio_context(), | ||
60 | + * and then aio_co_schedule() to place the coroutine on the new | ||
61 | + * #AioContext. The calls to qio_channel_detach_aio_context() | ||
62 | + * and qio_channel_attach_aio_context() should be protected with | ||
63 | + * aio_context_acquire() and aio_context_release(). | ||
64 | + */ | ||
65 | +void qio_channel_attach_aio_context(QIOChannel *ioc, | ||
66 | + AioContext *ctx); | ||
67 | + | ||
68 | +/** | ||
69 | + * qio_channel_detach_aio_context: | ||
70 | + * @ioc: the channel object | ||
71 | + * | ||
72 | + * Disable any I/O handlers set by qio_channel_yield(). With the | ||
73 | + * help of aio_co_schedule(), this allows moving a coroutine that was | ||
74 | + * paused by qio_channel_yield() to another context. | ||
75 | + */ | ||
76 | +void qio_channel_detach_aio_context(QIOChannel *ioc); | ||
77 | + | ||
78 | +/** | ||
79 | * qio_channel_yield: | ||
80 | * @ioc: the channel object | ||
81 | * @condition: the I/O condition to wait for | ||
82 | * | ||
83 | - * Yields execution from the current coroutine until | ||
84 | - * the condition indicated by @condition becomes | ||
85 | - * available. | ||
86 | + * Yields execution from the current coroutine until the condition | ||
87 | + * indicated by @condition becomes available. @condition must | ||
88 | + * be either %G_IO_IN or %G_IO_OUT; it cannot contain both. In | ||
89 | + * addition, no two coroutine can be waiting on the same condition | ||
90 | + * and channel at the same time. | ||
91 | * | ||
92 | * This must only be called from coroutine context | ||
93 | */ | ||
94 | diff --git a/io/channel.c b/io/channel.c | ||
95 | index XXXXXXX..XXXXXXX 100644 | ||
96 | --- a/io/channel.c | ||
97 | +++ b/io/channel.c | ||
98 | @@ -XXX,XX +XXX,XX @@ | ||
99 | #include "qemu/osdep.h" | ||
100 | #include "io/channel.h" | ||
101 | #include "qapi/error.h" | ||
102 | -#include "qemu/coroutine.h" | ||
103 | +#include "qemu/main-loop.h" | ||
104 | |||
105 | bool qio_channel_has_feature(QIOChannel *ioc, | ||
106 | QIOChannelFeature feature) | ||
107 | @@ -XXX,XX +XXX,XX @@ off_t qio_channel_io_seek(QIOChannel *ioc, | ||
108 | } | ||
109 | |||
110 | |||
111 | -typedef struct QIOChannelYieldData QIOChannelYieldData; | ||
112 | -struct QIOChannelYieldData { | ||
113 | - QIOChannel *ioc; | ||
114 | - Coroutine *co; | ||
115 | -}; | ||
116 | +static void qio_channel_set_aio_fd_handlers(QIOChannel *ioc); | ||
117 | |||
118 | +static void qio_channel_restart_read(void *opaque) | ||
119 | +{ | ||
120 | + QIOChannel *ioc = opaque; | ||
121 | + Coroutine *co = ioc->read_coroutine; | ||
122 | + | ||
123 | + ioc->read_coroutine = NULL; | ||
124 | + qio_channel_set_aio_fd_handlers(ioc); | ||
125 | + aio_co_wake(co); | ||
126 | +} | ||
127 | |||
128 | -static gboolean qio_channel_yield_enter(QIOChannel *ioc, | ||
129 | - GIOCondition condition, | ||
130 | - gpointer opaque) | ||
131 | +static void qio_channel_restart_write(void *opaque) | ||
132 | { | ||
133 | - QIOChannelYieldData *data = opaque; | ||
134 | - qemu_coroutine_enter(data->co); | ||
135 | - return FALSE; | ||
136 | + QIOChannel *ioc = opaque; | ||
137 | + Coroutine *co = ioc->write_coroutine; | ||
138 | + | ||
139 | + ioc->write_coroutine = NULL; | ||
140 | + qio_channel_set_aio_fd_handlers(ioc); | ||
141 | + aio_co_wake(co); | ||
142 | } | ||
143 | |||
144 | +static void qio_channel_set_aio_fd_handlers(QIOChannel *ioc) | ||
145 | +{ | ||
146 | + IOHandler *rd_handler = NULL, *wr_handler = NULL; | ||
147 | + AioContext *ctx; | ||
148 | + | ||
149 | + if (ioc->read_coroutine) { | ||
150 | + rd_handler = qio_channel_restart_read; | ||
151 | + } | ||
152 | + if (ioc->write_coroutine) { | ||
153 | + wr_handler = qio_channel_restart_write; | ||
154 | + } | ||
155 | + | ||
156 | + ctx = ioc->ctx ? ioc->ctx : iohandler_get_aio_context(); | ||
157 | + qio_channel_set_aio_fd_handler(ioc, ctx, rd_handler, wr_handler, ioc); | ||
158 | +} | ||
159 | + | ||
160 | +void qio_channel_attach_aio_context(QIOChannel *ioc, | ||
161 | + AioContext *ctx) | ||
162 | +{ | ||
163 | + AioContext *old_ctx; | ||
164 | + if (ioc->ctx == ctx) { | ||
165 | + return; | ||
166 | + } | ||
167 | + | ||
168 | + old_ctx = ioc->ctx ? ioc->ctx : iohandler_get_aio_context(); | ||
169 | + qio_channel_set_aio_fd_handler(ioc, old_ctx, NULL, NULL, NULL); | ||
170 | + ioc->ctx = ctx; | ||
171 | + qio_channel_set_aio_fd_handlers(ioc); | ||
172 | +} | ||
173 | + | ||
174 | +void qio_channel_detach_aio_context(QIOChannel *ioc) | ||
175 | +{ | ||
176 | + ioc->read_coroutine = NULL; | ||
177 | + ioc->write_coroutine = NULL; | ||
178 | + qio_channel_set_aio_fd_handlers(ioc); | ||
179 | + ioc->ctx = NULL; | ||
180 | +} | ||
181 | |||
182 | void coroutine_fn qio_channel_yield(QIOChannel *ioc, | ||
183 | GIOCondition condition) | ||
184 | { | ||
185 | - QIOChannelYieldData data; | ||
186 | - | ||
187 | assert(qemu_in_coroutine()); | ||
188 | - data.ioc = ioc; | ||
189 | - data.co = qemu_coroutine_self(); | ||
190 | - qio_channel_add_watch(ioc, | ||
191 | - condition, | ||
192 | - qio_channel_yield_enter, | ||
193 | - &data, | ||
194 | - NULL); | ||
195 | + if (condition == G_IO_IN) { | ||
196 | + assert(!ioc->read_coroutine); | ||
197 | + ioc->read_coroutine = qemu_coroutine_self(); | ||
198 | + } else if (condition == G_IO_OUT) { | ||
199 | + assert(!ioc->write_coroutine); | ||
200 | + ioc->write_coroutine = qemu_coroutine_self(); | ||
201 | + } else { | ||
202 | + abort(); | ||
203 | + } | ||
204 | + qio_channel_set_aio_fd_handlers(ioc); | ||
205 | qemu_coroutine_yield(); | ||
206 | } | ||
207 | |||
208 | -- | ||
209 | 2.9.3 | ||
210 | |||
211 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Paolo Bonzini <pbonzini@redhat.com> | ||
1 | 2 | ||
3 | In the client, read the reply headers from a coroutine, switching the | ||
4 | read side between the "read header" coroutine and the I/O coroutine that | ||
5 | reads the body of the reply. | ||
6 | |||
7 | In the server, if the server can read more requests it will create a new | ||
8 | "read request" coroutine as soon as a request has been read. Otherwise, | ||
9 | the new coroutine is created in nbd_request_put. | ||
10 | |||
11 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
12 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
13 | Reviewed-by: Fam Zheng <famz@redhat.com> | ||
14 | Reviewed-by: Daniel P. Berrange <berrange@redhat.com> | ||
15 | Message-id: 20170213135235.12274-8-pbonzini@redhat.com | ||
16 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
17 | --- | ||
18 | block/nbd-client.h | 2 +- | ||
19 | block/nbd-client.c | 117 ++++++++++++++++++++++++----------------------------- | ||
20 | nbd/client.c | 2 +- | ||
21 | nbd/common.c | 9 +---- | ||
22 | nbd/server.c | 94 +++++++++++++----------------------------- | ||
23 | 5 files changed, 83 insertions(+), 141 deletions(-) | ||
24 | |||
25 | diff --git a/block/nbd-client.h b/block/nbd-client.h | ||
26 | index XXXXXXX..XXXXXXX 100644 | ||
27 | --- a/block/nbd-client.h | ||
28 | +++ b/block/nbd-client.h | ||
29 | @@ -XXX,XX +XXX,XX @@ typedef struct NBDClientSession { | ||
30 | |||
31 | CoMutex send_mutex; | ||
32 | CoQueue free_sema; | ||
33 | - Coroutine *send_coroutine; | ||
34 | + Coroutine *read_reply_co; | ||
35 | int in_flight; | ||
36 | |||
37 | Coroutine *recv_coroutine[MAX_NBD_REQUESTS]; | ||
38 | diff --git a/block/nbd-client.c b/block/nbd-client.c | ||
39 | index XXXXXXX..XXXXXXX 100644 | ||
40 | --- a/block/nbd-client.c | ||
41 | +++ b/block/nbd-client.c | ||
42 | @@ -XXX,XX +XXX,XX @@ | ||
43 | #define HANDLE_TO_INDEX(bs, handle) ((handle) ^ ((uint64_t)(intptr_t)bs)) | ||
44 | #define INDEX_TO_HANDLE(bs, index) ((index) ^ ((uint64_t)(intptr_t)bs)) | ||
45 | |||
46 | -static void nbd_recv_coroutines_enter_all(NBDClientSession *s) | ||
47 | +static void nbd_recv_coroutines_enter_all(BlockDriverState *bs) | ||
48 | { | ||
49 | + NBDClientSession *s = nbd_get_client_session(bs); | ||
50 | int i; | ||
51 | |||
52 | for (i = 0; i < MAX_NBD_REQUESTS; i++) { | ||
53 | @@ -XXX,XX +XXX,XX @@ static void nbd_recv_coroutines_enter_all(NBDClientSession *s) | ||
54 | qemu_coroutine_enter(s->recv_coroutine[i]); | ||
55 | } | ||
56 | } | ||
57 | + BDRV_POLL_WHILE(bs, s->read_reply_co); | ||
58 | } | ||
59 | |||
60 | static void nbd_teardown_connection(BlockDriverState *bs) | ||
61 | @@ -XXX,XX +XXX,XX @@ static void nbd_teardown_connection(BlockDriverState *bs) | ||
62 | qio_channel_shutdown(client->ioc, | ||
63 | QIO_CHANNEL_SHUTDOWN_BOTH, | ||
64 | NULL); | ||
65 | - nbd_recv_coroutines_enter_all(client); | ||
66 | + nbd_recv_coroutines_enter_all(bs); | ||
67 | |||
68 | nbd_client_detach_aio_context(bs); | ||
69 | object_unref(OBJECT(client->sioc)); | ||
70 | @@ -XXX,XX +XXX,XX @@ static void nbd_teardown_connection(BlockDriverState *bs) | ||
71 | client->ioc = NULL; | ||
72 | } | ||
73 | |||
74 | -static void nbd_reply_ready(void *opaque) | ||
75 | +static coroutine_fn void nbd_read_reply_entry(void *opaque) | ||
76 | { | ||
77 | - BlockDriverState *bs = opaque; | ||
78 | - NBDClientSession *s = nbd_get_client_session(bs); | ||
79 | + NBDClientSession *s = opaque; | ||
80 | uint64_t i; | ||
81 | int ret; | ||
82 | |||
83 | - if (!s->ioc) { /* Already closed */ | ||
84 | - return; | ||
85 | - } | ||
86 | - | ||
87 | - if (s->reply.handle == 0) { | ||
88 | - /* No reply already in flight. Fetch a header. It is possible | ||
89 | - * that another thread has done the same thing in parallel, so | ||
90 | - * the socket is not readable anymore. | ||
91 | - */ | ||
92 | + for (;;) { | ||
93 | + assert(s->reply.handle == 0); | ||
94 | ret = nbd_receive_reply(s->ioc, &s->reply); | ||
95 | - if (ret == -EAGAIN) { | ||
96 | - return; | ||
97 | - } | ||
98 | if (ret < 0) { | ||
99 | - s->reply.handle = 0; | ||
100 | - goto fail; | ||
101 | + break; | ||
102 | } | ||
103 | - } | ||
104 | |||
105 | - /* There's no need for a mutex on the receive side, because the | ||
106 | - * handler acts as a synchronization point and ensures that only | ||
107 | - * one coroutine is called until the reply finishes. */ | ||
108 | - i = HANDLE_TO_INDEX(s, s->reply.handle); | ||
109 | - if (i >= MAX_NBD_REQUESTS) { | ||
110 | - goto fail; | ||
111 | - } | ||
112 | + /* There's no need for a mutex on the receive side, because the | ||
113 | + * handler acts as a synchronization point and ensures that only | ||
114 | + * one coroutine is called until the reply finishes. | ||
115 | + */ | ||
116 | + i = HANDLE_TO_INDEX(s, s->reply.handle); | ||
117 | + if (i >= MAX_NBD_REQUESTS || !s->recv_coroutine[i]) { | ||
118 | + break; | ||
119 | + } | ||
120 | |||
121 | - if (s->recv_coroutine[i]) { | ||
122 | - qemu_coroutine_enter(s->recv_coroutine[i]); | ||
123 | - return; | ||
124 | + /* We're woken up by the recv_coroutine itself. Note that there | ||
125 | + * is no race between yielding and reentering read_reply_co. This | ||
126 | + * is because: | ||
127 | + * | ||
128 | + * - if recv_coroutine[i] runs on the same AioContext, it is only | ||
129 | + * entered after we yield | ||
130 | + * | ||
131 | + * - if recv_coroutine[i] runs on a different AioContext, reentering | ||
132 | + * read_reply_co happens through a bottom half, which can only | ||
133 | + * run after we yield. | ||
134 | + */ | ||
135 | + aio_co_wake(s->recv_coroutine[i]); | ||
136 | + qemu_coroutine_yield(); | ||
137 | } | ||
138 | - | ||
139 | -fail: | ||
140 | - nbd_teardown_connection(bs); | ||
141 | -} | ||
142 | - | ||
143 | -static void nbd_restart_write(void *opaque) | ||
144 | -{ | ||
145 | - BlockDriverState *bs = opaque; | ||
146 | - | ||
147 | - qemu_coroutine_enter(nbd_get_client_session(bs)->send_coroutine); | ||
148 | + s->read_reply_co = NULL; | ||
149 | } | ||
150 | |||
151 | static int nbd_co_send_request(BlockDriverState *bs, | ||
152 | @@ -XXX,XX +XXX,XX @@ static int nbd_co_send_request(BlockDriverState *bs, | ||
153 | QEMUIOVector *qiov) | ||
154 | { | ||
155 | NBDClientSession *s = nbd_get_client_session(bs); | ||
156 | - AioContext *aio_context; | ||
157 | int rc, ret, i; | ||
158 | |||
159 | qemu_co_mutex_lock(&s->send_mutex); | ||
160 | @@ -XXX,XX +XXX,XX @@ static int nbd_co_send_request(BlockDriverState *bs, | ||
161 | return -EPIPE; | ||
162 | } | ||
163 | |||
164 | - s->send_coroutine = qemu_coroutine_self(); | ||
165 | - aio_context = bdrv_get_aio_context(bs); | ||
166 | - | ||
167 | - aio_set_fd_handler(aio_context, s->sioc->fd, false, | ||
168 | - nbd_reply_ready, nbd_restart_write, NULL, bs); | ||
169 | if (qiov) { | ||
170 | qio_channel_set_cork(s->ioc, true); | ||
171 | rc = nbd_send_request(s->ioc, request); | ||
172 | @@ -XXX,XX +XXX,XX @@ static int nbd_co_send_request(BlockDriverState *bs, | ||
173 | } else { | ||
174 | rc = nbd_send_request(s->ioc, request); | ||
175 | } | ||
176 | - aio_set_fd_handler(aio_context, s->sioc->fd, false, | ||
177 | - nbd_reply_ready, NULL, NULL, bs); | ||
178 | - s->send_coroutine = NULL; | ||
179 | qemu_co_mutex_unlock(&s->send_mutex); | ||
180 | return rc; | ||
181 | } | ||
182 | @@ -XXX,XX +XXX,XX @@ static void nbd_co_receive_reply(NBDClientSession *s, | ||
183 | { | ||
184 | int ret; | ||
185 | |||
186 | - /* Wait until we're woken up by the read handler. TODO: perhaps | ||
187 | - * peek at the next reply and avoid yielding if it's ours? */ | ||
188 | + /* Wait until we're woken up by nbd_read_reply_entry. */ | ||
189 | qemu_coroutine_yield(); | ||
190 | *reply = s->reply; | ||
191 | if (reply->handle != request->handle || | ||
192 | @@ -XXX,XX +XXX,XX @@ static void nbd_coroutine_start(NBDClientSession *s, | ||
193 | /* s->recv_coroutine[i] is set as soon as we get the send_lock. */ | ||
194 | } | ||
195 | |||
196 | -static void nbd_coroutine_end(NBDClientSession *s, | ||
197 | +static void nbd_coroutine_end(BlockDriverState *bs, | ||
198 | NBDRequest *request) | ||
199 | { | ||
200 | + NBDClientSession *s = nbd_get_client_session(bs); | ||
201 | int i = HANDLE_TO_INDEX(s, request->handle); | ||
202 | + | ||
203 | s->recv_coroutine[i] = NULL; | ||
204 | - if (s->in_flight-- == MAX_NBD_REQUESTS) { | ||
205 | - qemu_co_queue_next(&s->free_sema); | ||
206 | + s->in_flight--; | ||
207 | + qemu_co_queue_next(&s->free_sema); | ||
208 | + | ||
209 | + /* Kick the read_reply_co to get the next reply. */ | ||
210 | + if (s->read_reply_co) { | ||
211 | + aio_co_wake(s->read_reply_co); | ||
212 | } | ||
213 | } | ||
214 | |||
215 | @@ -XXX,XX +XXX,XX @@ int nbd_client_co_preadv(BlockDriverState *bs, uint64_t offset, | ||
216 | } else { | ||
217 | nbd_co_receive_reply(client, &request, &reply, qiov); | ||
218 | } | ||
219 | - nbd_coroutine_end(client, &request); | ||
220 | + nbd_coroutine_end(bs, &request); | ||
221 | return -reply.error; | ||
222 | } | ||
223 | |||
224 | @@ -XXX,XX +XXX,XX @@ int nbd_client_co_pwritev(BlockDriverState *bs, uint64_t offset, | ||
225 | } else { | ||
226 | nbd_co_receive_reply(client, &request, &reply, NULL); | ||
227 | } | ||
228 | - nbd_coroutine_end(client, &request); | ||
229 | + nbd_coroutine_end(bs, &request); | ||
230 | return -reply.error; | ||
231 | } | ||
232 | |||
233 | @@ -XXX,XX +XXX,XX @@ int nbd_client_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, | ||
234 | } else { | ||
235 | nbd_co_receive_reply(client, &request, &reply, NULL); | ||
236 | } | ||
237 | - nbd_coroutine_end(client, &request); | ||
238 | + nbd_coroutine_end(bs, &request); | ||
239 | return -reply.error; | ||
240 | } | ||
241 | |||
242 | @@ -XXX,XX +XXX,XX @@ int nbd_client_co_flush(BlockDriverState *bs) | ||
243 | } else { | ||
244 | nbd_co_receive_reply(client, &request, &reply, NULL); | ||
245 | } | ||
246 | - nbd_coroutine_end(client, &request); | ||
247 | + nbd_coroutine_end(bs, &request); | ||
248 | return -reply.error; | ||
249 | } | ||
250 | |||
251 | @@ -XXX,XX +XXX,XX @@ int nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset, int count) | ||
252 | } else { | ||
253 | nbd_co_receive_reply(client, &request, &reply, NULL); | ||
254 | } | ||
255 | - nbd_coroutine_end(client, &request); | ||
256 | + nbd_coroutine_end(bs, &request); | ||
257 | return -reply.error; | ||
258 | |||
259 | } | ||
260 | |||
261 | void nbd_client_detach_aio_context(BlockDriverState *bs) | ||
262 | { | ||
263 | - aio_set_fd_handler(bdrv_get_aio_context(bs), | ||
264 | - nbd_get_client_session(bs)->sioc->fd, | ||
265 | - false, NULL, NULL, NULL, NULL); | ||
266 | + NBDClientSession *client = nbd_get_client_session(bs); | ||
267 | + qio_channel_detach_aio_context(QIO_CHANNEL(client->sioc)); | ||
268 | } | ||
269 | |||
270 | void nbd_client_attach_aio_context(BlockDriverState *bs, | ||
271 | AioContext *new_context) | ||
272 | { | ||
273 | - aio_set_fd_handler(new_context, nbd_get_client_session(bs)->sioc->fd, | ||
274 | - false, nbd_reply_ready, NULL, NULL, bs); | ||
275 | + NBDClientSession *client = nbd_get_client_session(bs); | ||
276 | + qio_channel_attach_aio_context(QIO_CHANNEL(client->sioc), new_context); | ||
277 | + aio_co_schedule(new_context, client->read_reply_co); | ||
278 | } | ||
279 | |||
280 | void nbd_client_close(BlockDriverState *bs) | ||
281 | @@ -XXX,XX +XXX,XX @@ int nbd_client_init(BlockDriverState *bs, | ||
282 | /* Now that we're connected, set the socket to be non-blocking and | ||
283 | * kick the reply mechanism. */ | ||
284 | qio_channel_set_blocking(QIO_CHANNEL(sioc), false, NULL); | ||
285 | - | ||
286 | + client->read_reply_co = qemu_coroutine_create(nbd_read_reply_entry, client); | ||
287 | nbd_client_attach_aio_context(bs, bdrv_get_aio_context(bs)); | ||
288 | |||
289 | logout("Established connection with NBD server\n"); | ||
290 | diff --git a/nbd/client.c b/nbd/client.c | ||
291 | index XXXXXXX..XXXXXXX 100644 | ||
292 | --- a/nbd/client.c | ||
293 | +++ b/nbd/client.c | ||
294 | @@ -XXX,XX +XXX,XX @@ ssize_t nbd_receive_reply(QIOChannel *ioc, NBDReply *reply) | ||
295 | ssize_t ret; | ||
296 | |||
297 | ret = read_sync(ioc, buf, sizeof(buf)); | ||
298 | - if (ret < 0) { | ||
299 | + if (ret <= 0) { | ||
300 | return ret; | ||
301 | } | ||
302 | |||
303 | diff --git a/nbd/common.c b/nbd/common.c | ||
304 | index XXXXXXX..XXXXXXX 100644 | ||
305 | --- a/nbd/common.c | ||
306 | +++ b/nbd/common.c | ||
307 | @@ -XXX,XX +XXX,XX @@ ssize_t nbd_wr_syncv(QIOChannel *ioc, | ||
308 | } | ||
309 | if (len == QIO_CHANNEL_ERR_BLOCK) { | ||
310 | if (qemu_in_coroutine()) { | ||
311 | - /* XXX figure out if we can create a variant on | ||
312 | - * qio_channel_yield() that works with AIO contexts | ||
313 | - * and consider using that in this branch */ | ||
314 | - qemu_coroutine_yield(); | ||
315 | - } else if (done) { | ||
316 | - /* XXX this is needed by nbd_reply_ready. */ | ||
317 | - qio_channel_wait(ioc, | ||
318 | - do_read ? G_IO_IN : G_IO_OUT); | ||
319 | + qio_channel_yield(ioc, do_read ? G_IO_IN : G_IO_OUT); | ||
320 | } else { | ||
321 | return -EAGAIN; | ||
322 | } | ||
323 | diff --git a/nbd/server.c b/nbd/server.c | ||
324 | index XXXXXXX..XXXXXXX 100644 | ||
325 | --- a/nbd/server.c | ||
326 | +++ b/nbd/server.c | ||
327 | @@ -XXX,XX +XXX,XX @@ struct NBDClient { | ||
328 | CoMutex send_lock; | ||
329 | Coroutine *send_coroutine; | ||
330 | |||
331 | - bool can_read; | ||
332 | - | ||
333 | QTAILQ_ENTRY(NBDClient) next; | ||
334 | int nb_requests; | ||
335 | bool closing; | ||
336 | @@ -XXX,XX +XXX,XX @@ struct NBDClient { | ||
337 | |||
338 | /* That's all folks */ | ||
339 | |||
340 | -static void nbd_set_handlers(NBDClient *client); | ||
341 | -static void nbd_unset_handlers(NBDClient *client); | ||
342 | -static void nbd_update_can_read(NBDClient *client); | ||
343 | +static void nbd_client_receive_next_request(NBDClient *client); | ||
344 | |||
345 | static gboolean nbd_negotiate_continue(QIOChannel *ioc, | ||
346 | GIOCondition condition, | ||
347 | @@ -XXX,XX +XXX,XX @@ void nbd_client_put(NBDClient *client) | ||
348 | */ | ||
349 | assert(client->closing); | ||
350 | |||
351 | - nbd_unset_handlers(client); | ||
352 | + qio_channel_detach_aio_context(client->ioc); | ||
353 | object_unref(OBJECT(client->sioc)); | ||
354 | object_unref(OBJECT(client->ioc)); | ||
355 | if (client->tlscreds) { | ||
356 | @@ -XXX,XX +XXX,XX @@ static NBDRequestData *nbd_request_get(NBDClient *client) | ||
357 | |||
358 | assert(client->nb_requests <= MAX_NBD_REQUESTS - 1); | ||
359 | client->nb_requests++; | ||
360 | - nbd_update_can_read(client); | ||
361 | |||
362 | req = g_new0(NBDRequestData, 1); | ||
363 | nbd_client_get(client); | ||
364 | @@ -XXX,XX +XXX,XX @@ static void nbd_request_put(NBDRequestData *req) | ||
365 | g_free(req); | ||
366 | |||
367 | client->nb_requests--; | ||
368 | - nbd_update_can_read(client); | ||
369 | + nbd_client_receive_next_request(client); | ||
370 | + | ||
371 | nbd_client_put(client); | ||
372 | } | ||
373 | |||
374 | @@ -XXX,XX +XXX,XX @@ static void blk_aio_attached(AioContext *ctx, void *opaque) | ||
375 | exp->ctx = ctx; | ||
376 | |||
377 | QTAILQ_FOREACH(client, &exp->clients, next) { | ||
378 | - nbd_set_handlers(client); | ||
379 | + qio_channel_attach_aio_context(client->ioc, ctx); | ||
380 | + if (client->recv_coroutine) { | ||
381 | + aio_co_schedule(ctx, client->recv_coroutine); | ||
382 | + } | ||
383 | + if (client->send_coroutine) { | ||
384 | + aio_co_schedule(ctx, client->send_coroutine); | ||
385 | + } | ||
386 | } | ||
387 | } | ||
388 | |||
389 | @@ -XXX,XX +XXX,XX @@ static void blk_aio_detach(void *opaque) | ||
390 | TRACE("Export %s: Detaching clients from AIO context %p\n", exp->name, exp->ctx); | ||
391 | |||
392 | QTAILQ_FOREACH(client, &exp->clients, next) { | ||
393 | - nbd_unset_handlers(client); | ||
394 | + qio_channel_detach_aio_context(client->ioc); | ||
395 | } | ||
396 | |||
397 | exp->ctx = NULL; | ||
398 | @@ -XXX,XX +XXX,XX @@ static ssize_t nbd_co_send_reply(NBDRequestData *req, NBDReply *reply, | ||
399 | g_assert(qemu_in_coroutine()); | ||
400 | qemu_co_mutex_lock(&client->send_lock); | ||
401 | client->send_coroutine = qemu_coroutine_self(); | ||
402 | - nbd_set_handlers(client); | ||
403 | |||
404 | if (!len) { | ||
405 | rc = nbd_send_reply(client->ioc, reply); | ||
406 | @@ -XXX,XX +XXX,XX @@ static ssize_t nbd_co_send_reply(NBDRequestData *req, NBDReply *reply, | ||
407 | } | ||
408 | |||
409 | client->send_coroutine = NULL; | ||
410 | - nbd_set_handlers(client); | ||
411 | qemu_co_mutex_unlock(&client->send_lock); | ||
412 | return rc; | ||
413 | } | ||
414 | @@ -XXX,XX +XXX,XX @@ static ssize_t nbd_co_receive_request(NBDRequestData *req, | ||
415 | ssize_t rc; | ||
416 | |||
417 | g_assert(qemu_in_coroutine()); | ||
418 | - client->recv_coroutine = qemu_coroutine_self(); | ||
419 | - nbd_update_can_read(client); | ||
420 | - | ||
421 | + assert(client->recv_coroutine == qemu_coroutine_self()); | ||
422 | rc = nbd_receive_request(client->ioc, request); | ||
423 | if (rc < 0) { | ||
424 | if (rc != -EAGAIN) { | ||
425 | @@ -XXX,XX +XXX,XX @@ static ssize_t nbd_co_receive_request(NBDRequestData *req, | ||
426 | |||
427 | out: | ||
428 | client->recv_coroutine = NULL; | ||
429 | - nbd_update_can_read(client); | ||
430 | + nbd_client_receive_next_request(client); | ||
431 | |||
432 | return rc; | ||
433 | } | ||
434 | |||
435 | -static void nbd_trip(void *opaque) | ||
436 | +/* Owns a reference to the NBDClient passed as opaque. */ | ||
437 | +static coroutine_fn void nbd_trip(void *opaque) | ||
438 | { | ||
439 | NBDClient *client = opaque; | ||
440 | NBDExport *exp = client->exp; | ||
441 | NBDRequestData *req; | ||
442 | - NBDRequest request; | ||
443 | + NBDRequest request = { 0 }; /* GCC thinks it can be used uninitialized */ | ||
444 | NBDReply reply; | ||
445 | ssize_t ret; | ||
446 | int flags; | ||
447 | |||
448 | TRACE("Reading request."); | ||
449 | if (client->closing) { | ||
450 | + nbd_client_put(client); | ||
451 | return; | ||
452 | } | ||
453 | |||
454 | @@ -XXX,XX +XXX,XX @@ static void nbd_trip(void *opaque) | ||
455 | |||
456 | done: | ||
457 | nbd_request_put(req); | ||
458 | + nbd_client_put(client); | ||
459 | return; | ||
460 | |||
461 | out: | ||
462 | nbd_request_put(req); | ||
463 | client_close(client); | ||
464 | + nbd_client_put(client); | ||
465 | } | ||
466 | |||
467 | -static void nbd_read(void *opaque) | ||
468 | +static void nbd_client_receive_next_request(NBDClient *client) | ||
469 | { | ||
470 | - NBDClient *client = opaque; | ||
471 | - | ||
472 | - if (client->recv_coroutine) { | ||
473 | - qemu_coroutine_enter(client->recv_coroutine); | ||
474 | - } else { | ||
475 | - qemu_coroutine_enter(qemu_coroutine_create(nbd_trip, client)); | ||
476 | - } | ||
477 | -} | ||
478 | - | ||
479 | -static void nbd_restart_write(void *opaque) | ||
480 | -{ | ||
481 | - NBDClient *client = opaque; | ||
482 | - | ||
483 | - qemu_coroutine_enter(client->send_coroutine); | ||
484 | -} | ||
485 | - | ||
486 | -static void nbd_set_handlers(NBDClient *client) | ||
487 | -{ | ||
488 | - if (client->exp && client->exp->ctx) { | ||
489 | - aio_set_fd_handler(client->exp->ctx, client->sioc->fd, true, | ||
490 | - client->can_read ? nbd_read : NULL, | ||
491 | - client->send_coroutine ? nbd_restart_write : NULL, | ||
492 | - NULL, client); | ||
493 | - } | ||
494 | -} | ||
495 | - | ||
496 | -static void nbd_unset_handlers(NBDClient *client) | ||
497 | -{ | ||
498 | - if (client->exp && client->exp->ctx) { | ||
499 | - aio_set_fd_handler(client->exp->ctx, client->sioc->fd, true, NULL, | ||
500 | - NULL, NULL, NULL); | ||
501 | - } | ||
502 | -} | ||
503 | - | ||
504 | -static void nbd_update_can_read(NBDClient *client) | ||
505 | -{ | ||
506 | - bool can_read = client->recv_coroutine || | ||
507 | - client->nb_requests < MAX_NBD_REQUESTS; | ||
508 | - | ||
509 | - if (can_read != client->can_read) { | ||
510 | - client->can_read = can_read; | ||
511 | - nbd_set_handlers(client); | ||
512 | - | ||
513 | - /* There is no need to invoke aio_notify(), since aio_set_fd_handler() | ||
514 | - * in nbd_set_handlers() will have taken care of that */ | ||
515 | + if (!client->recv_coroutine && client->nb_requests < MAX_NBD_REQUESTS) { | ||
516 | + nbd_client_get(client); | ||
517 | + client->recv_coroutine = qemu_coroutine_create(nbd_trip, client); | ||
518 | + aio_co_schedule(client->exp->ctx, client->recv_coroutine); | ||
519 | } | ||
520 | } | ||
521 | |||
522 | @@ -XXX,XX +XXX,XX @@ static coroutine_fn void nbd_co_client_start(void *opaque) | ||
523 | goto out; | ||
524 | } | ||
525 | qemu_co_mutex_init(&client->send_lock); | ||
526 | - nbd_set_handlers(client); | ||
527 | |||
528 | if (exp) { | ||
529 | QTAILQ_INSERT_TAIL(&exp->clients, client, next); | ||
530 | } | ||
531 | + | ||
532 | + nbd_client_receive_next_request(client); | ||
533 | + | ||
534 | out: | ||
535 | g_free(data); | ||
536 | } | ||
537 | @@ -XXX,XX +XXX,XX @@ void nbd_client_new(NBDExport *exp, | ||
538 | object_ref(OBJECT(client->sioc)); | ||
539 | client->ioc = QIO_CHANNEL(sioc); | ||
540 | object_ref(OBJECT(client->ioc)); | ||
541 | - client->can_read = true; | ||
542 | client->close = close_fn; | ||
543 | |||
544 | data->client = client; | ||
545 | -- | ||
546 | 2.9.3 | ||
547 | |||
548 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Paolo Bonzini <pbonzini@redhat.com> | ||
1 | 2 | ||
3 | As a small step towards the introduction of multiqueue, we want | ||
4 | coroutines to remain on the same AioContext that started them, | ||
5 | unless they are moved explicitly with e.g. aio_co_schedule. This patch | ||
6 | avoids that coroutines switch AioContext when they use a CoMutex. | ||
7 | For now it does not make much of a difference, because the CoMutex | ||
8 | is not thread-safe and the AioContext itself is used to protect the | ||
9 | CoMutex from concurrent access. However, this is going to change. | ||
10 | |||
11 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
12 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
13 | Reviewed-by: Fam Zheng <famz@redhat.com> | ||
14 | Reviewed-by: Daniel P. Berrange <berrange@redhat.com> | ||
15 | Message-id: 20170213135235.12274-9-pbonzini@redhat.com | ||
16 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
17 | --- | ||
18 | util/qemu-coroutine-lock.c | 5 ++--- | ||
19 | util/trace-events | 1 - | ||
20 | 2 files changed, 2 insertions(+), 4 deletions(-) | ||
21 | |||
22 | diff --git a/util/qemu-coroutine-lock.c b/util/qemu-coroutine-lock.c | ||
23 | index XXXXXXX..XXXXXXX 100644 | ||
24 | --- a/util/qemu-coroutine-lock.c | ||
25 | +++ b/util/qemu-coroutine-lock.c | ||
26 | @@ -XXX,XX +XXX,XX @@ | ||
27 | #include "qemu/coroutine.h" | ||
28 | #include "qemu/coroutine_int.h" | ||
29 | #include "qemu/queue.h" | ||
30 | +#include "block/aio.h" | ||
31 | #include "trace.h" | ||
32 | |||
33 | void qemu_co_queue_init(CoQueue *queue) | ||
34 | @@ -XXX,XX +XXX,XX @@ void qemu_co_queue_run_restart(Coroutine *co) | ||
35 | |||
36 | static bool qemu_co_queue_do_restart(CoQueue *queue, bool single) | ||
37 | { | ||
38 | - Coroutine *self = qemu_coroutine_self(); | ||
39 | Coroutine *next; | ||
40 | |||
41 | if (QSIMPLEQ_EMPTY(&queue->entries)) { | ||
42 | @@ -XXX,XX +XXX,XX @@ static bool qemu_co_queue_do_restart(CoQueue *queue, bool single) | ||
43 | |||
44 | while ((next = QSIMPLEQ_FIRST(&queue->entries)) != NULL) { | ||
45 | QSIMPLEQ_REMOVE_HEAD(&queue->entries, co_queue_next); | ||
46 | - QSIMPLEQ_INSERT_TAIL(&self->co_queue_wakeup, next, co_queue_next); | ||
47 | - trace_qemu_co_queue_next(next); | ||
48 | + aio_co_wake(next); | ||
49 | if (single) { | ||
50 | break; | ||
51 | } | ||
52 | diff --git a/util/trace-events b/util/trace-events | ||
53 | index XXXXXXX..XXXXXXX 100644 | ||
54 | --- a/util/trace-events | ||
55 | +++ b/util/trace-events | ||
56 | @@ -XXX,XX +XXX,XX @@ qemu_coroutine_terminate(void *co) "self %p" | ||
57 | |||
58 | # util/qemu-coroutine-lock.c | ||
59 | qemu_co_queue_run_restart(void *co) "co %p" | ||
60 | -qemu_co_queue_next(void *nxt) "next %p" | ||
61 | qemu_co_mutex_lock_entry(void *mutex, void *self) "mutex %p self %p" | ||
62 | qemu_co_mutex_lock_return(void *mutex, void *self) "mutex %p self %p" | ||
63 | qemu_co_mutex_unlock_entry(void *mutex, void *self) "mutex %p self %p" | ||
64 | -- | ||
65 | 2.9.3 | ||
66 | |||
67 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Paolo Bonzini <pbonzini@redhat.com> | ||
1 | 2 | ||
3 | Keep the coroutine on the same AioContext. Without this change, | ||
4 | there would be a race between yielding the coroutine and reentering it. | ||
5 | While the race cannot happen now, because the code only runs from a single | ||
6 | AioContext, this will change with multiqueue support in the block layer. | ||
7 | |||
8 | While doing the change, replace custom bottom half with aio_co_schedule. | ||
9 | |||
10 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
11 | Reviewed-by: Fam Zheng <famz@redhat.com> | ||
12 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
13 | Reviewed-by: Daniel P. Berrange <berrange@redhat.com> | ||
14 | Message-id: 20170213135235.12274-10-pbonzini@redhat.com | ||
15 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
16 | --- | ||
17 | block/blkdebug.c | 9 +-------- | ||
18 | 1 file changed, 1 insertion(+), 8 deletions(-) | ||
19 | |||
20 | diff --git a/block/blkdebug.c b/block/blkdebug.c | ||
21 | index XXXXXXX..XXXXXXX 100644 | ||
22 | --- a/block/blkdebug.c | ||
23 | +++ b/block/blkdebug.c | ||
24 | @@ -XXX,XX +XXX,XX @@ out: | ||
25 | return ret; | ||
26 | } | ||
27 | |||
28 | -static void error_callback_bh(void *opaque) | ||
29 | -{ | ||
30 | - Coroutine *co = opaque; | ||
31 | - qemu_coroutine_enter(co); | ||
32 | -} | ||
33 | - | ||
34 | static int inject_error(BlockDriverState *bs, BlkdebugRule *rule) | ||
35 | { | ||
36 | BDRVBlkdebugState *s = bs->opaque; | ||
37 | @@ -XXX,XX +XXX,XX @@ static int inject_error(BlockDriverState *bs, BlkdebugRule *rule) | ||
38 | } | ||
39 | |||
40 | if (!immediately) { | ||
41 | - aio_bh_schedule_oneshot(bdrv_get_aio_context(bs), error_callback_bh, | ||
42 | - qemu_coroutine_self()); | ||
43 | + aio_co_schedule(qemu_get_current_aio_context(), qemu_coroutine_self()); | ||
44 | qemu_coroutine_yield(); | ||
45 | } | ||
46 | |||
47 | -- | ||
48 | 2.9.3 | ||
49 | |||
50 | diff view generated by jsdifflib |
1 | From: Jeuk Kim <jeuk20.kim@samsung.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | This commit makes the UFS device support query | 3 | qed_aio_start_io and qed_aio_next_io will not have to acquire/release |
4 | and nop out transfer requests. | 4 | the AioContext, while qed_aio_next_io_cb will. Split the functionality |
5 | and gain a little type-safety in the process. | ||
5 | 6 | ||
6 | The next patch would be support for UFS logical | ||
7 | unit and scsi command transfer request. | ||
8 | |||
9 | Signed-off-by: Jeuk Kim <jeuk20.kim@samsung.com> | ||
10 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | 7 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> |
11 | Message-id: ff7a5f0fd26761936a553ffb89d3df0ba62844e9.1693980783.git.jeuk20.kim@gmail.com | 8 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> |
9 | Reviewed-by: Fam Zheng <famz@redhat.com> | ||
10 | Reviewed-by: Daniel P. Berrange <berrange@redhat.com> | ||
11 | Message-id: 20170213135235.12274-11-pbonzini@redhat.com | ||
12 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 12 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
13 | --- | 13 | --- |
14 | hw/ufs/ufs.h | 46 +++ | 14 | block/qed.c | 39 +++++++++++++++++++++++++-------------- |
15 | hw/ufs/ufs.c | 988 +++++++++++++++++++++++++++++++++++++++++++- | 15 | 1 file changed, 25 insertions(+), 14 deletions(-) |
16 | hw/ufs/trace-events | 1 + | ||
17 | 3 files changed, 1033 insertions(+), 2 deletions(-) | ||
18 | 16 | ||
19 | diff --git a/hw/ufs/ufs.h b/hw/ufs/ufs.h | 17 | diff --git a/block/qed.c b/block/qed.c |
20 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
21 | --- a/hw/ufs/ufs.h | 19 | --- a/block/qed.c |
22 | +++ b/hw/ufs/ufs.h | 20 | +++ b/block/qed.c |
23 | @@ -XXX,XX +XXX,XX @@ | 21 | @@ -XXX,XX +XXX,XX @@ static CachedL2Table *qed_new_l2_table(BDRVQEDState *s) |
24 | #define UFS_MAX_LUS 32 | 22 | return l2_table; |
25 | #define UFS_BLOCK_SIZE 4096 | 23 | } |
26 | 24 | ||
27 | +typedef enum UfsRequestState { | 25 | -static void qed_aio_next_io(void *opaque, int ret); |
28 | + UFS_REQUEST_IDLE = 0, | 26 | +static void qed_aio_next_io(QEDAIOCB *acb, int ret); |
29 | + UFS_REQUEST_READY = 1, | ||
30 | + UFS_REQUEST_RUNNING = 2, | ||
31 | + UFS_REQUEST_COMPLETE = 3, | ||
32 | + UFS_REQUEST_ERROR = 4, | ||
33 | +} UfsRequestState; | ||
34 | + | 27 | + |
35 | +typedef enum UfsReqResult { | 28 | +static void qed_aio_start_io(QEDAIOCB *acb) |
36 | + UFS_REQUEST_SUCCESS = 0, | ||
37 | + UFS_REQUEST_FAIL = 1, | ||
38 | +} UfsReqResult; | ||
39 | + | ||
40 | +typedef struct UfsRequest { | ||
41 | + struct UfsHc *hc; | ||
42 | + UfsRequestState state; | ||
43 | + int slot; | ||
44 | + | ||
45 | + UtpTransferReqDesc utrd; | ||
46 | + UtpUpiuReq req_upiu; | ||
47 | + UtpUpiuRsp rsp_upiu; | ||
48 | + | ||
49 | + /* for scsi command */ | ||
50 | + QEMUSGList *sg; | ||
51 | +} UfsRequest; | ||
52 | + | ||
53 | typedef struct UfsParams { | ||
54 | char *serial; | ||
55 | uint8_t nutrs; /* Number of UTP Transfer Request Slots */ | ||
56 | @@ -XXX,XX +XXX,XX @@ typedef struct UfsHc { | ||
57 | UfsReg reg; | ||
58 | UfsParams params; | ||
59 | uint32_t reg_size; | ||
60 | + UfsRequest *req_list; | ||
61 | + | ||
62 | + DeviceDescriptor device_desc; | ||
63 | + GeometryDescriptor geometry_desc; | ||
64 | + Attributes attributes; | ||
65 | + Flags flags; | ||
66 | |||
67 | qemu_irq irq; | ||
68 | QEMUBH *doorbell_bh; | ||
69 | @@ -XXX,XX +XXX,XX @@ typedef struct UfsHc { | ||
70 | #define TYPE_UFS "ufs" | ||
71 | #define UFS(obj) OBJECT_CHECK(UfsHc, (obj), TYPE_UFS) | ||
72 | |||
73 | +typedef enum UfsQueryFlagPerm { | ||
74 | + UFS_QUERY_FLAG_NONE = 0x0, | ||
75 | + UFS_QUERY_FLAG_READ = 0x1, | ||
76 | + UFS_QUERY_FLAG_SET = 0x2, | ||
77 | + UFS_QUERY_FLAG_CLEAR = 0x4, | ||
78 | + UFS_QUERY_FLAG_TOGGLE = 0x8, | ||
79 | +} UfsQueryFlagPerm; | ||
80 | + | ||
81 | +typedef enum UfsQueryAttrPerm { | ||
82 | + UFS_QUERY_ATTR_NONE = 0x0, | ||
83 | + UFS_QUERY_ATTR_READ = 0x1, | ||
84 | + UFS_QUERY_ATTR_WRITE = 0x2, | ||
85 | +} UfsQueryAttrPerm; | ||
86 | + | ||
87 | #endif /* HW_UFS_UFS_H */ | ||
88 | diff --git a/hw/ufs/ufs.c b/hw/ufs/ufs.c | ||
89 | index XXXXXXX..XXXXXXX 100644 | ||
90 | --- a/hw/ufs/ufs.c | ||
91 | +++ b/hw/ufs/ufs.c | ||
92 | @@ -XXX,XX +XXX,XX @@ | ||
93 | #include "ufs.h" | ||
94 | |||
95 | /* The QEMU-UFS device follows spec version 3.1 */ | ||
96 | -#define UFS_SPEC_VER 0x00000310 | ||
97 | +#define UFS_SPEC_VER 0x0310 | ||
98 | #define UFS_MAX_NUTRS 32 | ||
99 | #define UFS_MAX_NUTMRS 8 | ||
100 | |||
101 | +static MemTxResult ufs_addr_read(UfsHc *u, hwaddr addr, void *buf, int size) | ||
102 | +{ | 29 | +{ |
103 | + hwaddr hi = addr + size - 1; | 30 | + qed_aio_next_io(acb, 0); |
104 | + | ||
105 | + if (hi < addr) { | ||
106 | + return MEMTX_DECODE_ERROR; | ||
107 | + } | ||
108 | + | ||
109 | + if (!FIELD_EX32(u->reg.cap, CAP, 64AS) && (hi >> 32)) { | ||
110 | + return MEMTX_DECODE_ERROR; | ||
111 | + } | ||
112 | + | ||
113 | + return pci_dma_read(PCI_DEVICE(u), addr, buf, size); | ||
114 | +} | 31 | +} |
115 | + | 32 | + |
116 | +static MemTxResult ufs_addr_write(UfsHc *u, hwaddr addr, const void *buf, | 33 | +static void qed_aio_next_io_cb(void *opaque, int ret) |
117 | + int size) | ||
118 | +{ | 34 | +{ |
119 | + hwaddr hi = addr + size - 1; | 35 | + QEDAIOCB *acb = opaque; |
120 | + if (hi < addr) { | ||
121 | + return MEMTX_DECODE_ERROR; | ||
122 | + } | ||
123 | + | 36 | + |
124 | + if (!FIELD_EX32(u->reg.cap, CAP, 64AS) && (hi >> 32)) { | 37 | + qed_aio_next_io(acb, ret); |
125 | + return MEMTX_DECODE_ERROR; | ||
126 | + } | ||
127 | + | ||
128 | + return pci_dma_write(PCI_DEVICE(u), addr, buf, size); | ||
129 | +} | 38 | +} |
130 | + | 39 | |
131 | +static void ufs_complete_req(UfsRequest *req, UfsReqResult req_result); | 40 | static void qed_plug_allocating_write_reqs(BDRVQEDState *s) |
132 | + | ||
133 | +static inline hwaddr ufs_get_utrd_addr(UfsHc *u, uint32_t slot) | ||
134 | +{ | ||
135 | + hwaddr utrl_base_addr = (((hwaddr)u->reg.utrlbau) << 32) + u->reg.utrlba; | ||
136 | + hwaddr utrd_addr = utrl_base_addr + slot * sizeof(UtpTransferReqDesc); | ||
137 | + | ||
138 | + return utrd_addr; | ||
139 | +} | ||
140 | + | ||
141 | +static inline hwaddr ufs_get_req_upiu_base_addr(const UtpTransferReqDesc *utrd) | ||
142 | +{ | ||
143 | + uint32_t cmd_desc_base_addr_lo = | ||
144 | + le32_to_cpu(utrd->command_desc_base_addr_lo); | ||
145 | + uint32_t cmd_desc_base_addr_hi = | ||
146 | + le32_to_cpu(utrd->command_desc_base_addr_hi); | ||
147 | + | ||
148 | + return (((hwaddr)cmd_desc_base_addr_hi) << 32) + cmd_desc_base_addr_lo; | ||
149 | +} | ||
150 | + | ||
151 | +static inline hwaddr ufs_get_rsp_upiu_base_addr(const UtpTransferReqDesc *utrd) | ||
152 | +{ | ||
153 | + hwaddr req_upiu_base_addr = ufs_get_req_upiu_base_addr(utrd); | ||
154 | + uint32_t rsp_upiu_byte_off = | ||
155 | + le16_to_cpu(utrd->response_upiu_offset) * sizeof(uint32_t); | ||
156 | + return req_upiu_base_addr + rsp_upiu_byte_off; | ||
157 | +} | ||
158 | + | ||
159 | +static MemTxResult ufs_dma_read_utrd(UfsRequest *req) | ||
160 | +{ | ||
161 | + UfsHc *u = req->hc; | ||
162 | + hwaddr utrd_addr = ufs_get_utrd_addr(u, req->slot); | ||
163 | + MemTxResult ret; | ||
164 | + | ||
165 | + ret = ufs_addr_read(u, utrd_addr, &req->utrd, sizeof(req->utrd)); | ||
166 | + if (ret) { | ||
167 | + trace_ufs_err_dma_read_utrd(req->slot, utrd_addr); | ||
168 | + } | ||
169 | + return ret; | ||
170 | +} | ||
171 | + | ||
172 | +static MemTxResult ufs_dma_read_req_upiu(UfsRequest *req) | ||
173 | +{ | ||
174 | + UfsHc *u = req->hc; | ||
175 | + hwaddr req_upiu_base_addr = ufs_get_req_upiu_base_addr(&req->utrd); | ||
176 | + UtpUpiuReq *req_upiu = &req->req_upiu; | ||
177 | + uint32_t copy_size; | ||
178 | + uint16_t data_segment_length; | ||
179 | + MemTxResult ret; | ||
180 | + | ||
181 | + /* | ||
182 | + * To know the size of the req_upiu, we need to read the | ||
183 | + * data_segment_length in the header first. | ||
184 | + */ | ||
185 | + ret = ufs_addr_read(u, req_upiu_base_addr, &req_upiu->header, | ||
186 | + sizeof(UtpUpiuHeader)); | ||
187 | + if (ret) { | ||
188 | + trace_ufs_err_dma_read_req_upiu(req->slot, req_upiu_base_addr); | ||
189 | + return ret; | ||
190 | + } | ||
191 | + data_segment_length = be16_to_cpu(req_upiu->header.data_segment_length); | ||
192 | + | ||
193 | + copy_size = sizeof(UtpUpiuHeader) + UFS_TRANSACTION_SPECIFIC_FIELD_SIZE + | ||
194 | + data_segment_length; | ||
195 | + | ||
196 | + ret = ufs_addr_read(u, req_upiu_base_addr, &req->req_upiu, copy_size); | ||
197 | + if (ret) { | ||
198 | + trace_ufs_err_dma_read_req_upiu(req->slot, req_upiu_base_addr); | ||
199 | + } | ||
200 | + return ret; | ||
201 | +} | ||
202 | + | ||
203 | +static MemTxResult ufs_dma_read_prdt(UfsRequest *req) | ||
204 | +{ | ||
205 | + UfsHc *u = req->hc; | ||
206 | + uint16_t prdt_len = le16_to_cpu(req->utrd.prd_table_length); | ||
207 | + uint16_t prdt_byte_off = | ||
208 | + le16_to_cpu(req->utrd.prd_table_offset) * sizeof(uint32_t); | ||
209 | + uint32_t prdt_size = prdt_len * sizeof(UfshcdSgEntry); | ||
210 | + g_autofree UfshcdSgEntry *prd_entries = NULL; | ||
211 | + hwaddr req_upiu_base_addr, prdt_base_addr; | ||
212 | + int err; | ||
213 | + | ||
214 | + assert(!req->sg); | ||
215 | + | ||
216 | + if (prdt_size == 0) { | ||
217 | + return MEMTX_OK; | ||
218 | + } | ||
219 | + prd_entries = g_new(UfshcdSgEntry, prdt_size); | ||
220 | + | ||
221 | + req_upiu_base_addr = ufs_get_req_upiu_base_addr(&req->utrd); | ||
222 | + prdt_base_addr = req_upiu_base_addr + prdt_byte_off; | ||
223 | + | ||
224 | + err = ufs_addr_read(u, prdt_base_addr, prd_entries, prdt_size); | ||
225 | + if (err) { | ||
226 | + trace_ufs_err_dma_read_prdt(req->slot, prdt_base_addr); | ||
227 | + return err; | ||
228 | + } | ||
229 | + | ||
230 | + req->sg = g_malloc0(sizeof(QEMUSGList)); | ||
231 | + pci_dma_sglist_init(req->sg, PCI_DEVICE(u), prdt_len); | ||
232 | + | ||
233 | + for (uint16_t i = 0; i < prdt_len; ++i) { | ||
234 | + hwaddr data_dma_addr = le64_to_cpu(prd_entries[i].addr); | ||
235 | + uint32_t data_byte_count = le32_to_cpu(prd_entries[i].size) + 1; | ||
236 | + qemu_sglist_add(req->sg, data_dma_addr, data_byte_count); | ||
237 | + } | ||
238 | + return MEMTX_OK; | ||
239 | +} | ||
240 | + | ||
241 | +static MemTxResult ufs_dma_read_upiu(UfsRequest *req) | ||
242 | +{ | ||
243 | + MemTxResult ret; | ||
244 | + | ||
245 | + ret = ufs_dma_read_utrd(req); | ||
246 | + if (ret) { | ||
247 | + return ret; | ||
248 | + } | ||
249 | + | ||
250 | + ret = ufs_dma_read_req_upiu(req); | ||
251 | + if (ret) { | ||
252 | + return ret; | ||
253 | + } | ||
254 | + | ||
255 | + ret = ufs_dma_read_prdt(req); | ||
256 | + if (ret) { | ||
257 | + return ret; | ||
258 | + } | ||
259 | + | ||
260 | + return 0; | ||
261 | +} | ||
262 | + | ||
263 | +static MemTxResult ufs_dma_write_utrd(UfsRequest *req) | ||
264 | +{ | ||
265 | + UfsHc *u = req->hc; | ||
266 | + hwaddr utrd_addr = ufs_get_utrd_addr(u, req->slot); | ||
267 | + MemTxResult ret; | ||
268 | + | ||
269 | + ret = ufs_addr_write(u, utrd_addr, &req->utrd, sizeof(req->utrd)); | ||
270 | + if (ret) { | ||
271 | + trace_ufs_err_dma_write_utrd(req->slot, utrd_addr); | ||
272 | + } | ||
273 | + return ret; | ||
274 | +} | ||
275 | + | ||
276 | +static MemTxResult ufs_dma_write_rsp_upiu(UfsRequest *req) | ||
277 | +{ | ||
278 | + UfsHc *u = req->hc; | ||
279 | + hwaddr rsp_upiu_base_addr = ufs_get_rsp_upiu_base_addr(&req->utrd); | ||
280 | + uint32_t rsp_upiu_byte_len = | ||
281 | + le16_to_cpu(req->utrd.response_upiu_length) * sizeof(uint32_t); | ||
282 | + uint16_t data_segment_length = | ||
283 | + be16_to_cpu(req->rsp_upiu.header.data_segment_length); | ||
284 | + uint32_t copy_size = sizeof(UtpUpiuHeader) + | ||
285 | + UFS_TRANSACTION_SPECIFIC_FIELD_SIZE + | ||
286 | + data_segment_length; | ||
287 | + MemTxResult ret; | ||
288 | + | ||
289 | + if (copy_size > rsp_upiu_byte_len) { | ||
290 | + copy_size = rsp_upiu_byte_len; | ||
291 | + } | ||
292 | + | ||
293 | + ret = ufs_addr_write(u, rsp_upiu_base_addr, &req->rsp_upiu, copy_size); | ||
294 | + if (ret) { | ||
295 | + trace_ufs_err_dma_write_rsp_upiu(req->slot, rsp_upiu_base_addr); | ||
296 | + } | ||
297 | + return ret; | ||
298 | +} | ||
299 | + | ||
300 | +static MemTxResult ufs_dma_write_upiu(UfsRequest *req) | ||
301 | +{ | ||
302 | + MemTxResult ret; | ||
303 | + | ||
304 | + ret = ufs_dma_write_rsp_upiu(req); | ||
305 | + if (ret) { | ||
306 | + return ret; | ||
307 | + } | ||
308 | + | ||
309 | + return ufs_dma_write_utrd(req); | ||
310 | +} | ||
311 | + | ||
312 | static void ufs_irq_check(UfsHc *u) | ||
313 | { | 41 | { |
314 | PCIDevice *pci = PCI_DEVICE(u); | 42 | @@ -XXX,XX +XXX,XX @@ static void qed_unplug_allocating_write_reqs(BDRVQEDState *s) |
315 | @@ -XXX,XX +XXX,XX @@ static void ufs_irq_check(UfsHc *u) | 43 | |
44 | acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs); | ||
45 | if (acb) { | ||
46 | - qed_aio_next_io(acb, 0); | ||
47 | + qed_aio_start_io(acb); | ||
316 | } | 48 | } |
317 | } | 49 | } |
318 | 50 | ||
319 | +static void ufs_process_db(UfsHc *u, uint32_t val) | 51 | @@ -XXX,XX +XXX,XX @@ static void qed_aio_complete(QEDAIOCB *acb, int ret) |
320 | +{ | 52 | QSIMPLEQ_REMOVE_HEAD(&s->allocating_write_reqs, next); |
321 | + unsigned long doorbell; | 53 | acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs); |
322 | + uint32_t slot; | 54 | if (acb) { |
323 | + uint32_t nutrs = u->params.nutrs; | 55 | - qed_aio_next_io(acb, 0); |
324 | + UfsRequest *req; | 56 | + qed_aio_start_io(acb); |
325 | + | 57 | } else if (s->header.features & QED_F_NEED_CHECK) { |
326 | + val &= ~u->reg.utrldbr; | 58 | qed_start_need_check_timer(s); |
327 | + if (!val) { | 59 | } |
328 | + return; | 60 | @@ -XXX,XX +XXX,XX @@ static void qed_commit_l2_update(void *opaque, int ret) |
329 | + } | 61 | acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset); |
330 | + | 62 | assert(acb->request.l2_table != NULL); |
331 | + doorbell = val; | 63 | |
332 | + slot = find_first_bit(&doorbell, nutrs); | 64 | - qed_aio_next_io(opaque, ret); |
333 | + | 65 | + qed_aio_next_io(acb, ret); |
334 | + while (slot < nutrs) { | ||
335 | + req = &u->req_list[slot]; | ||
336 | + if (req->state == UFS_REQUEST_ERROR) { | ||
337 | + trace_ufs_err_utrl_slot_error(req->slot); | ||
338 | + return; | ||
339 | + } | ||
340 | + | ||
341 | + if (req->state != UFS_REQUEST_IDLE) { | ||
342 | + trace_ufs_err_utrl_slot_busy(req->slot); | ||
343 | + return; | ||
344 | + } | ||
345 | + | ||
346 | + trace_ufs_process_db(slot); | ||
347 | + req->state = UFS_REQUEST_READY; | ||
348 | + slot = find_next_bit(&doorbell, nutrs, slot + 1); | ||
349 | + } | ||
350 | + | ||
351 | + qemu_bh_schedule(u->doorbell_bh); | ||
352 | +} | ||
353 | + | ||
354 | static void ufs_process_uiccmd(UfsHc *u, uint32_t val) | ||
355 | { | ||
356 | trace_ufs_process_uiccmd(val, u->reg.ucmdarg1, u->reg.ucmdarg2, | ||
357 | @@ -XXX,XX +XXX,XX @@ static void ufs_write_reg(UfsHc *u, hwaddr offset, uint32_t data, unsigned size) | ||
358 | u->reg.utrlbau = data; | ||
359 | break; | ||
360 | case A_UTRLDBR: | ||
361 | - /* Not yet supported */ | ||
362 | + ufs_process_db(u, data); | ||
363 | + u->reg.utrldbr |= data; | ||
364 | break; | ||
365 | case A_UTRLRSR: | ||
366 | u->reg.utrlrsr = data; | ||
367 | @@ -XXX,XX +XXX,XX @@ static const MemoryRegionOps ufs_mmio_ops = { | ||
368 | }, | ||
369 | }; | ||
370 | |||
371 | +static void ufs_build_upiu_header(UfsRequest *req, uint8_t trans_type, | ||
372 | + uint8_t flags, uint8_t response, | ||
373 | + uint8_t scsi_status, | ||
374 | + uint16_t data_segment_length) | ||
375 | +{ | ||
376 | + memcpy(&req->rsp_upiu.header, &req->req_upiu.header, sizeof(UtpUpiuHeader)); | ||
377 | + req->rsp_upiu.header.trans_type = trans_type; | ||
378 | + req->rsp_upiu.header.flags = flags; | ||
379 | + req->rsp_upiu.header.response = response; | ||
380 | + req->rsp_upiu.header.scsi_status = scsi_status; | ||
381 | + req->rsp_upiu.header.data_segment_length = cpu_to_be16(data_segment_length); | ||
382 | +} | ||
383 | + | ||
384 | +static UfsReqResult ufs_exec_nop_cmd(UfsRequest *req) | ||
385 | +{ | ||
386 | + trace_ufs_exec_nop_cmd(req->slot); | ||
387 | + ufs_build_upiu_header(req, UFS_UPIU_TRANSACTION_NOP_IN, 0, 0, 0, 0); | ||
388 | + return UFS_REQUEST_SUCCESS; | ||
389 | +} | ||
390 | + | ||
391 | +/* | ||
392 | + * This defines the permission of flags based on their IDN. There are some | ||
393 | + * things that are declared read-only, which is inconsistent with the ufs spec, | ||
394 | + * because we want to return an error for features that are not yet supported. | ||
395 | + */ | ||
396 | +static const int flag_permission[UFS_QUERY_FLAG_IDN_COUNT] = { | ||
397 | + [UFS_QUERY_FLAG_IDN_FDEVICEINIT] = UFS_QUERY_FLAG_READ | UFS_QUERY_FLAG_SET, | ||
398 | + /* Write protection is not supported */ | ||
399 | + [UFS_QUERY_FLAG_IDN_PERMANENT_WPE] = UFS_QUERY_FLAG_READ, | ||
400 | + [UFS_QUERY_FLAG_IDN_PWR_ON_WPE] = UFS_QUERY_FLAG_READ, | ||
401 | + [UFS_QUERY_FLAG_IDN_BKOPS_EN] = UFS_QUERY_FLAG_READ | UFS_QUERY_FLAG_SET | | ||
402 | + UFS_QUERY_FLAG_CLEAR | | ||
403 | + UFS_QUERY_FLAG_TOGGLE, | ||
404 | + [UFS_QUERY_FLAG_IDN_LIFE_SPAN_MODE_ENABLE] = | ||
405 | + UFS_QUERY_FLAG_READ | UFS_QUERY_FLAG_SET | UFS_QUERY_FLAG_CLEAR | | ||
406 | + UFS_QUERY_FLAG_TOGGLE, | ||
407 | + /* Purge Operation is not supported */ | ||
408 | + [UFS_QUERY_FLAG_IDN_PURGE_ENABLE] = UFS_QUERY_FLAG_NONE, | ||
409 | + /* Refresh Operation is not supported */ | ||
410 | + [UFS_QUERY_FLAG_IDN_REFRESH_ENABLE] = UFS_QUERY_FLAG_NONE, | ||
411 | + /* Physical Resource Removal is not supported */ | ||
412 | + [UFS_QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL] = UFS_QUERY_FLAG_READ, | ||
413 | + [UFS_QUERY_FLAG_IDN_BUSY_RTC] = UFS_QUERY_FLAG_READ, | ||
414 | + [UFS_QUERY_FLAG_IDN_PERMANENTLY_DISABLE_FW_UPDATE] = UFS_QUERY_FLAG_READ, | ||
415 | + /* Write Booster is not supported */ | ||
416 | + [UFS_QUERY_FLAG_IDN_WB_EN] = UFS_QUERY_FLAG_READ, | ||
417 | + [UFS_QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN] = UFS_QUERY_FLAG_READ, | ||
418 | + [UFS_QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8] = UFS_QUERY_FLAG_READ, | ||
419 | +}; | ||
420 | + | ||
421 | +static inline QueryRespCode ufs_flag_check_idn_valid(uint8_t idn, int op) | ||
422 | +{ | ||
423 | + if (idn >= UFS_QUERY_FLAG_IDN_COUNT) { | ||
424 | + return UFS_QUERY_RESULT_INVALID_IDN; | ||
425 | + } | ||
426 | + | ||
427 | + if (!(flag_permission[idn] & op)) { | ||
428 | + if (op == UFS_QUERY_FLAG_READ) { | ||
429 | + trace_ufs_err_query_flag_not_readable(idn); | ||
430 | + return UFS_QUERY_RESULT_NOT_READABLE; | ||
431 | + } | ||
432 | + trace_ufs_err_query_flag_not_writable(idn); | ||
433 | + return UFS_QUERY_RESULT_NOT_WRITEABLE; | ||
434 | + } | ||
435 | + | ||
436 | + return UFS_QUERY_RESULT_SUCCESS; | ||
437 | +} | ||
438 | + | ||
439 | +static const int attr_permission[UFS_QUERY_ATTR_IDN_COUNT] = { | ||
440 | + /* booting is not supported */ | ||
441 | + [UFS_QUERY_ATTR_IDN_BOOT_LU_EN] = UFS_QUERY_ATTR_READ, | ||
442 | + [UFS_QUERY_ATTR_IDN_POWER_MODE] = UFS_QUERY_ATTR_READ, | ||
443 | + [UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL] = | ||
444 | + UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE, | ||
445 | + [UFS_QUERY_ATTR_IDN_OOO_DATA_EN] = UFS_QUERY_ATTR_READ, | ||
446 | + [UFS_QUERY_ATTR_IDN_BKOPS_STATUS] = UFS_QUERY_ATTR_READ, | ||
447 | + [UFS_QUERY_ATTR_IDN_PURGE_STATUS] = UFS_QUERY_ATTR_READ, | ||
448 | + [UFS_QUERY_ATTR_IDN_MAX_DATA_IN] = | ||
449 | + UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE, | ||
450 | + [UFS_QUERY_ATTR_IDN_MAX_DATA_OUT] = | ||
451 | + UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE, | ||
452 | + [UFS_QUERY_ATTR_IDN_DYN_CAP_NEEDED] = UFS_QUERY_ATTR_READ, | ||
453 | + [UFS_QUERY_ATTR_IDN_REF_CLK_FREQ] = | ||
454 | + UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE, | ||
455 | + [UFS_QUERY_ATTR_IDN_CONF_DESC_LOCK] = UFS_QUERY_ATTR_READ, | ||
456 | + [UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT] = | ||
457 | + UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE, | ||
458 | + [UFS_QUERY_ATTR_IDN_EE_CONTROL] = | ||
459 | + UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE, | ||
460 | + [UFS_QUERY_ATTR_IDN_EE_STATUS] = UFS_QUERY_ATTR_READ, | ||
461 | + [UFS_QUERY_ATTR_IDN_SECONDS_PASSED] = UFS_QUERY_ATTR_WRITE, | ||
462 | + [UFS_QUERY_ATTR_IDN_CNTX_CONF] = UFS_QUERY_ATTR_READ, | ||
463 | + [UFS_QUERY_ATTR_IDN_FFU_STATUS] = UFS_QUERY_ATTR_READ, | ||
464 | + [UFS_QUERY_ATTR_IDN_PSA_STATE] = UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE, | ||
465 | + [UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE] = | ||
466 | + UFS_QUERY_ATTR_READ | UFS_QUERY_ATTR_WRITE, | ||
467 | + [UFS_QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME] = UFS_QUERY_ATTR_READ, | ||
468 | + [UFS_QUERY_ATTR_IDN_CASE_ROUGH_TEMP] = UFS_QUERY_ATTR_READ, | ||
469 | + [UFS_QUERY_ATTR_IDN_HIGH_TEMP_BOUND] = UFS_QUERY_ATTR_READ, | ||
470 | + [UFS_QUERY_ATTR_IDN_LOW_TEMP_BOUND] = UFS_QUERY_ATTR_READ, | ||
471 | + [UFS_QUERY_ATTR_IDN_THROTTLING_STATUS] = UFS_QUERY_ATTR_READ, | ||
472 | + [UFS_QUERY_ATTR_IDN_WB_FLUSH_STATUS] = UFS_QUERY_ATTR_READ, | ||
473 | + [UFS_QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE] = UFS_QUERY_ATTR_READ, | ||
474 | + [UFS_QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST] = UFS_QUERY_ATTR_READ, | ||
475 | + [UFS_QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE] = UFS_QUERY_ATTR_READ, | ||
476 | + /* refresh operation is not supported */ | ||
477 | + [UFS_QUERY_ATTR_IDN_REFRESH_STATUS] = UFS_QUERY_ATTR_READ, | ||
478 | + [UFS_QUERY_ATTR_IDN_REFRESH_FREQ] = UFS_QUERY_ATTR_READ, | ||
479 | + [UFS_QUERY_ATTR_IDN_REFRESH_UNIT] = UFS_QUERY_ATTR_READ, | ||
480 | +}; | ||
481 | + | ||
482 | +static inline QueryRespCode ufs_attr_check_idn_valid(uint8_t idn, int op) | ||
483 | +{ | ||
484 | + if (idn >= UFS_QUERY_ATTR_IDN_COUNT) { | ||
485 | + return UFS_QUERY_RESULT_INVALID_IDN; | ||
486 | + } | ||
487 | + | ||
488 | + if (!(attr_permission[idn] & op)) { | ||
489 | + if (op == UFS_QUERY_ATTR_READ) { | ||
490 | + trace_ufs_err_query_attr_not_readable(idn); | ||
491 | + return UFS_QUERY_RESULT_NOT_READABLE; | ||
492 | + } | ||
493 | + trace_ufs_err_query_attr_not_writable(idn); | ||
494 | + return UFS_QUERY_RESULT_NOT_WRITEABLE; | ||
495 | + } | ||
496 | + | ||
497 | + return UFS_QUERY_RESULT_SUCCESS; | ||
498 | +} | ||
499 | + | ||
500 | +static QueryRespCode ufs_exec_query_flag(UfsRequest *req, int op) | ||
501 | +{ | ||
502 | + UfsHc *u = req->hc; | ||
503 | + uint8_t idn = req->req_upiu.qr.idn; | ||
504 | + uint32_t value; | ||
505 | + QueryRespCode ret; | ||
506 | + | ||
507 | + ret = ufs_flag_check_idn_valid(idn, op); | ||
508 | + if (ret) { | ||
509 | + return ret; | ||
510 | + } | ||
511 | + | ||
512 | + if (idn == UFS_QUERY_FLAG_IDN_FDEVICEINIT) { | ||
513 | + value = 0; | ||
514 | + } else if (op == UFS_QUERY_FLAG_READ) { | ||
515 | + value = *(((uint8_t *)&u->flags) + idn); | ||
516 | + } else if (op == UFS_QUERY_FLAG_SET) { | ||
517 | + value = 1; | ||
518 | + } else if (op == UFS_QUERY_FLAG_CLEAR) { | ||
519 | + value = 0; | ||
520 | + } else if (op == UFS_QUERY_FLAG_TOGGLE) { | ||
521 | + value = *(((uint8_t *)&u->flags) + idn); | ||
522 | + value = !value; | ||
523 | + } else { | ||
524 | + trace_ufs_err_query_invalid_opcode(op); | ||
525 | + return UFS_QUERY_RESULT_INVALID_OPCODE; | ||
526 | + } | ||
527 | + | ||
528 | + *(((uint8_t *)&u->flags) + idn) = value; | ||
529 | + req->rsp_upiu.qr.value = cpu_to_be32(value); | ||
530 | + return UFS_QUERY_RESULT_SUCCESS; | ||
531 | +} | ||
532 | + | ||
533 | +static uint32_t ufs_read_attr_value(UfsHc *u, uint8_t idn) | ||
534 | +{ | ||
535 | + switch (idn) { | ||
536 | + case UFS_QUERY_ATTR_IDN_BOOT_LU_EN: | ||
537 | + return u->attributes.boot_lun_en; | ||
538 | + case UFS_QUERY_ATTR_IDN_POWER_MODE: | ||
539 | + return u->attributes.current_power_mode; | ||
540 | + case UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL: | ||
541 | + return u->attributes.active_icc_level; | ||
542 | + case UFS_QUERY_ATTR_IDN_OOO_DATA_EN: | ||
543 | + return u->attributes.out_of_order_data_en; | ||
544 | + case UFS_QUERY_ATTR_IDN_BKOPS_STATUS: | ||
545 | + return u->attributes.background_op_status; | ||
546 | + case UFS_QUERY_ATTR_IDN_PURGE_STATUS: | ||
547 | + return u->attributes.purge_status; | ||
548 | + case UFS_QUERY_ATTR_IDN_MAX_DATA_IN: | ||
549 | + return u->attributes.max_data_in_size; | ||
550 | + case UFS_QUERY_ATTR_IDN_MAX_DATA_OUT: | ||
551 | + return u->attributes.max_data_out_size; | ||
552 | + case UFS_QUERY_ATTR_IDN_DYN_CAP_NEEDED: | ||
553 | + return be32_to_cpu(u->attributes.dyn_cap_needed); | ||
554 | + case UFS_QUERY_ATTR_IDN_REF_CLK_FREQ: | ||
555 | + return u->attributes.ref_clk_freq; | ||
556 | + case UFS_QUERY_ATTR_IDN_CONF_DESC_LOCK: | ||
557 | + return u->attributes.config_descr_lock; | ||
558 | + case UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT: | ||
559 | + return u->attributes.max_num_of_rtt; | ||
560 | + case UFS_QUERY_ATTR_IDN_EE_CONTROL: | ||
561 | + return be16_to_cpu(u->attributes.exception_event_control); | ||
562 | + case UFS_QUERY_ATTR_IDN_EE_STATUS: | ||
563 | + return be16_to_cpu(u->attributes.exception_event_status); | ||
564 | + case UFS_QUERY_ATTR_IDN_SECONDS_PASSED: | ||
565 | + return be32_to_cpu(u->attributes.seconds_passed); | ||
566 | + case UFS_QUERY_ATTR_IDN_CNTX_CONF: | ||
567 | + return be16_to_cpu(u->attributes.context_conf); | ||
568 | + case UFS_QUERY_ATTR_IDN_FFU_STATUS: | ||
569 | + return u->attributes.device_ffu_status; | ||
570 | + case UFS_QUERY_ATTR_IDN_PSA_STATE: | ||
571 | + return be32_to_cpu(u->attributes.psa_state); | ||
572 | + case UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE: | ||
573 | + return be32_to_cpu(u->attributes.psa_data_size); | ||
574 | + case UFS_QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME: | ||
575 | + return u->attributes.ref_clk_gating_wait_time; | ||
576 | + case UFS_QUERY_ATTR_IDN_CASE_ROUGH_TEMP: | ||
577 | + return u->attributes.device_case_rough_temperaure; | ||
578 | + case UFS_QUERY_ATTR_IDN_HIGH_TEMP_BOUND: | ||
579 | + return u->attributes.device_too_high_temp_boundary; | ||
580 | + case UFS_QUERY_ATTR_IDN_LOW_TEMP_BOUND: | ||
581 | + return u->attributes.device_too_low_temp_boundary; | ||
582 | + case UFS_QUERY_ATTR_IDN_THROTTLING_STATUS: | ||
583 | + return u->attributes.throttling_status; | ||
584 | + case UFS_QUERY_ATTR_IDN_WB_FLUSH_STATUS: | ||
585 | + return u->attributes.wb_buffer_flush_status; | ||
586 | + case UFS_QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE: | ||
587 | + return u->attributes.available_wb_buffer_size; | ||
588 | + case UFS_QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST: | ||
589 | + return u->attributes.wb_buffer_life_time_est; | ||
590 | + case UFS_QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE: | ||
591 | + return be32_to_cpu(u->attributes.current_wb_buffer_size); | ||
592 | + case UFS_QUERY_ATTR_IDN_REFRESH_STATUS: | ||
593 | + return u->attributes.refresh_status; | ||
594 | + case UFS_QUERY_ATTR_IDN_REFRESH_FREQ: | ||
595 | + return u->attributes.refresh_freq; | ||
596 | + case UFS_QUERY_ATTR_IDN_REFRESH_UNIT: | ||
597 | + return u->attributes.refresh_unit; | ||
598 | + } | ||
599 | + return 0; | ||
600 | +} | ||
601 | + | ||
602 | +static void ufs_write_attr_value(UfsHc *u, uint8_t idn, uint32_t value) | ||
603 | +{ | ||
604 | + switch (idn) { | ||
605 | + case UFS_QUERY_ATTR_IDN_ACTIVE_ICC_LVL: | ||
606 | + u->attributes.active_icc_level = value; | ||
607 | + break; | ||
608 | + case UFS_QUERY_ATTR_IDN_MAX_DATA_IN: | ||
609 | + u->attributes.max_data_in_size = value; | ||
610 | + break; | ||
611 | + case UFS_QUERY_ATTR_IDN_MAX_DATA_OUT: | ||
612 | + u->attributes.max_data_out_size = value; | ||
613 | + break; | ||
614 | + case UFS_QUERY_ATTR_IDN_REF_CLK_FREQ: | ||
615 | + u->attributes.ref_clk_freq = value; | ||
616 | + break; | ||
617 | + case UFS_QUERY_ATTR_IDN_MAX_NUM_OF_RTT: | ||
618 | + u->attributes.max_num_of_rtt = value; | ||
619 | + break; | ||
620 | + case UFS_QUERY_ATTR_IDN_EE_CONTROL: | ||
621 | + u->attributes.exception_event_control = cpu_to_be16(value); | ||
622 | + break; | ||
623 | + case UFS_QUERY_ATTR_IDN_SECONDS_PASSED: | ||
624 | + u->attributes.seconds_passed = cpu_to_be32(value); | ||
625 | + break; | ||
626 | + case UFS_QUERY_ATTR_IDN_PSA_STATE: | ||
627 | + u->attributes.psa_state = value; | ||
628 | + break; | ||
629 | + case UFS_QUERY_ATTR_IDN_PSA_DATA_SIZE: | ||
630 | + u->attributes.psa_data_size = cpu_to_be32(value); | ||
631 | + break; | ||
632 | + } | ||
633 | +} | ||
634 | + | ||
635 | +static QueryRespCode ufs_exec_query_attr(UfsRequest *req, int op) | ||
636 | +{ | ||
637 | + UfsHc *u = req->hc; | ||
638 | + uint8_t idn = req->req_upiu.qr.idn; | ||
639 | + uint32_t value; | ||
640 | + QueryRespCode ret; | ||
641 | + | ||
642 | + ret = ufs_attr_check_idn_valid(idn, op); | ||
643 | + if (ret) { | ||
644 | + return ret; | ||
645 | + } | ||
646 | + | ||
647 | + if (op == UFS_QUERY_ATTR_READ) { | ||
648 | + value = ufs_read_attr_value(u, idn); | ||
649 | + } else { | ||
650 | + value = be32_to_cpu(req->req_upiu.qr.value); | ||
651 | + ufs_write_attr_value(u, idn, value); | ||
652 | + } | ||
653 | + | ||
654 | + req->rsp_upiu.qr.value = cpu_to_be32(value); | ||
655 | + return UFS_QUERY_RESULT_SUCCESS; | ||
656 | +} | ||
657 | + | ||
658 | +static const RpmbUnitDescriptor rpmb_unit_desc = { | ||
659 | + .length = sizeof(RpmbUnitDescriptor), | ||
660 | + .descriptor_idn = 2, | ||
661 | + .unit_index = UFS_UPIU_RPMB_WLUN, | ||
662 | + .lu_enable = 0, | ||
663 | +}; | ||
664 | + | ||
665 | +static QueryRespCode ufs_read_unit_desc(UfsRequest *req) | ||
666 | +{ | ||
667 | + uint8_t lun = req->req_upiu.qr.index; | ||
668 | + | ||
669 | + if (lun != UFS_UPIU_RPMB_WLUN && lun > UFS_MAX_LUS) { | ||
670 | + trace_ufs_err_query_invalid_index(req->req_upiu.qr.opcode, lun); | ||
671 | + return UFS_QUERY_RESULT_INVALID_INDEX; | ||
672 | + } | ||
673 | + | ||
674 | + if (lun == UFS_UPIU_RPMB_WLUN) { | ||
675 | + memcpy(&req->rsp_upiu.qr.data, &rpmb_unit_desc, rpmb_unit_desc.length); | ||
676 | + } else { | ||
677 | + /* unit descriptor is not yet supported */ | ||
678 | + return UFS_QUERY_RESULT_INVALID_INDEX; | ||
679 | + } | ||
680 | + | ||
681 | + return UFS_QUERY_RESULT_SUCCESS; | ||
682 | +} | ||
683 | + | ||
684 | +static inline StringDescriptor manufacturer_str_desc(void) | ||
685 | +{ | ||
686 | + StringDescriptor desc = { | ||
687 | + .length = 0x12, | ||
688 | + .descriptor_idn = UFS_QUERY_DESC_IDN_STRING, | ||
689 | + }; | ||
690 | + desc.UC[0] = cpu_to_be16('R'); | ||
691 | + desc.UC[1] = cpu_to_be16('E'); | ||
692 | + desc.UC[2] = cpu_to_be16('D'); | ||
693 | + desc.UC[3] = cpu_to_be16('H'); | ||
694 | + desc.UC[4] = cpu_to_be16('A'); | ||
695 | + desc.UC[5] = cpu_to_be16('T'); | ||
696 | + return desc; | ||
697 | +} | ||
698 | + | ||
699 | +static inline StringDescriptor product_name_str_desc(void) | ||
700 | +{ | ||
701 | + StringDescriptor desc = { | ||
702 | + .length = 0x22, | ||
703 | + .descriptor_idn = UFS_QUERY_DESC_IDN_STRING, | ||
704 | + }; | ||
705 | + desc.UC[0] = cpu_to_be16('Q'); | ||
706 | + desc.UC[1] = cpu_to_be16('E'); | ||
707 | + desc.UC[2] = cpu_to_be16('M'); | ||
708 | + desc.UC[3] = cpu_to_be16('U'); | ||
709 | + desc.UC[4] = cpu_to_be16(' '); | ||
710 | + desc.UC[5] = cpu_to_be16('U'); | ||
711 | + desc.UC[6] = cpu_to_be16('F'); | ||
712 | + desc.UC[7] = cpu_to_be16('S'); | ||
713 | + return desc; | ||
714 | +} | ||
715 | + | ||
716 | +static inline StringDescriptor product_rev_level_str_desc(void) | ||
717 | +{ | ||
718 | + StringDescriptor desc = { | ||
719 | + .length = 0x0a, | ||
720 | + .descriptor_idn = UFS_QUERY_DESC_IDN_STRING, | ||
721 | + }; | ||
722 | + desc.UC[0] = cpu_to_be16('0'); | ||
723 | + desc.UC[1] = cpu_to_be16('0'); | ||
724 | + desc.UC[2] = cpu_to_be16('0'); | ||
725 | + desc.UC[3] = cpu_to_be16('1'); | ||
726 | + return desc; | ||
727 | +} | ||
728 | + | ||
729 | +static const StringDescriptor null_str_desc = { | ||
730 | + .length = 0x02, | ||
731 | + .descriptor_idn = UFS_QUERY_DESC_IDN_STRING, | ||
732 | +}; | ||
733 | + | ||
734 | +static QueryRespCode ufs_read_string_desc(UfsRequest *req) | ||
735 | +{ | ||
736 | + UfsHc *u = req->hc; | ||
737 | + uint8_t index = req->req_upiu.qr.index; | ||
738 | + StringDescriptor desc; | ||
739 | + | ||
740 | + if (index == u->device_desc.manufacturer_name) { | ||
741 | + desc = manufacturer_str_desc(); | ||
742 | + memcpy(&req->rsp_upiu.qr.data, &desc, desc.length); | ||
743 | + } else if (index == u->device_desc.product_name) { | ||
744 | + desc = product_name_str_desc(); | ||
745 | + memcpy(&req->rsp_upiu.qr.data, &desc, desc.length); | ||
746 | + } else if (index == u->device_desc.serial_number) { | ||
747 | + memcpy(&req->rsp_upiu.qr.data, &null_str_desc, null_str_desc.length); | ||
748 | + } else if (index == u->device_desc.oem_id) { | ||
749 | + memcpy(&req->rsp_upiu.qr.data, &null_str_desc, null_str_desc.length); | ||
750 | + } else if (index == u->device_desc.product_revision_level) { | ||
751 | + desc = product_rev_level_str_desc(); | ||
752 | + memcpy(&req->rsp_upiu.qr.data, &desc, desc.length); | ||
753 | + } else { | ||
754 | + trace_ufs_err_query_invalid_index(req->req_upiu.qr.opcode, index); | ||
755 | + return UFS_QUERY_RESULT_INVALID_INDEX; | ||
756 | + } | ||
757 | + return UFS_QUERY_RESULT_SUCCESS; | ||
758 | +} | ||
759 | + | ||
760 | +static inline InterconnectDescriptor interconnect_desc(void) | ||
761 | +{ | ||
762 | + InterconnectDescriptor desc = { | ||
763 | + .length = sizeof(InterconnectDescriptor), | ||
764 | + .descriptor_idn = UFS_QUERY_DESC_IDN_INTERCONNECT, | ||
765 | + }; | ||
766 | + desc.bcd_unipro_version = cpu_to_be16(0x180); | ||
767 | + desc.bcd_mphy_version = cpu_to_be16(0x410); | ||
768 | + return desc; | ||
769 | +} | ||
770 | + | ||
771 | +static QueryRespCode ufs_read_desc(UfsRequest *req) | ||
772 | +{ | ||
773 | + UfsHc *u = req->hc; | ||
774 | + QueryRespCode status; | ||
775 | + uint8_t idn = req->req_upiu.qr.idn; | ||
776 | + uint16_t length = be16_to_cpu(req->req_upiu.qr.length); | ||
777 | + InterconnectDescriptor desc; | ||
778 | + | ||
779 | + switch (idn) { | ||
780 | + case UFS_QUERY_DESC_IDN_DEVICE: | ||
781 | + memcpy(&req->rsp_upiu.qr.data, &u->device_desc, sizeof(u->device_desc)); | ||
782 | + status = UFS_QUERY_RESULT_SUCCESS; | ||
783 | + break; | ||
784 | + case UFS_QUERY_DESC_IDN_UNIT: | ||
785 | + status = ufs_read_unit_desc(req); | ||
786 | + break; | ||
787 | + case UFS_QUERY_DESC_IDN_GEOMETRY: | ||
788 | + memcpy(&req->rsp_upiu.qr.data, &u->geometry_desc, | ||
789 | + sizeof(u->geometry_desc)); | ||
790 | + status = UFS_QUERY_RESULT_SUCCESS; | ||
791 | + break; | ||
792 | + case UFS_QUERY_DESC_IDN_INTERCONNECT: { | ||
793 | + desc = interconnect_desc(); | ||
794 | + memcpy(&req->rsp_upiu.qr.data, &desc, sizeof(InterconnectDescriptor)); | ||
795 | + status = UFS_QUERY_RESULT_SUCCESS; | ||
796 | + break; | ||
797 | + } | ||
798 | + case UFS_QUERY_DESC_IDN_STRING: | ||
799 | + status = ufs_read_string_desc(req); | ||
800 | + break; | ||
801 | + case UFS_QUERY_DESC_IDN_POWER: | ||
802 | + /* mocking of power descriptor is not supported */ | ||
803 | + memset(&req->rsp_upiu.qr.data, 0, sizeof(PowerParametersDescriptor)); | ||
804 | + req->rsp_upiu.qr.data[0] = sizeof(PowerParametersDescriptor); | ||
805 | + req->rsp_upiu.qr.data[1] = UFS_QUERY_DESC_IDN_POWER; | ||
806 | + status = UFS_QUERY_RESULT_SUCCESS; | ||
807 | + break; | ||
808 | + case UFS_QUERY_DESC_IDN_HEALTH: | ||
809 | + /* mocking of health descriptor is not supported */ | ||
810 | + memset(&req->rsp_upiu.qr.data, 0, sizeof(DeviceHealthDescriptor)); | ||
811 | + req->rsp_upiu.qr.data[0] = sizeof(DeviceHealthDescriptor); | ||
812 | + req->rsp_upiu.qr.data[1] = UFS_QUERY_DESC_IDN_HEALTH; | ||
813 | + status = UFS_QUERY_RESULT_SUCCESS; | ||
814 | + break; | ||
815 | + default: | ||
816 | + length = 0; | ||
817 | + trace_ufs_err_query_invalid_idn(req->req_upiu.qr.opcode, idn); | ||
818 | + status = UFS_QUERY_RESULT_INVALID_IDN; | ||
819 | + } | ||
820 | + | ||
821 | + if (length > req->rsp_upiu.qr.data[0]) { | ||
822 | + length = req->rsp_upiu.qr.data[0]; | ||
823 | + } | ||
824 | + req->rsp_upiu.qr.opcode = req->req_upiu.qr.opcode; | ||
825 | + req->rsp_upiu.qr.idn = req->req_upiu.qr.idn; | ||
826 | + req->rsp_upiu.qr.index = req->req_upiu.qr.index; | ||
827 | + req->rsp_upiu.qr.selector = req->req_upiu.qr.selector; | ||
828 | + req->rsp_upiu.qr.length = cpu_to_be16(length); | ||
829 | + | ||
830 | + return status; | ||
831 | +} | ||
832 | + | ||
833 | +static QueryRespCode ufs_exec_query_read(UfsRequest *req) | ||
834 | +{ | ||
835 | + QueryRespCode status; | ||
836 | + switch (req->req_upiu.qr.opcode) { | ||
837 | + case UFS_UPIU_QUERY_OPCODE_NOP: | ||
838 | + status = UFS_QUERY_RESULT_SUCCESS; | ||
839 | + break; | ||
840 | + case UFS_UPIU_QUERY_OPCODE_READ_DESC: | ||
841 | + status = ufs_read_desc(req); | ||
842 | + break; | ||
843 | + case UFS_UPIU_QUERY_OPCODE_READ_ATTR: | ||
844 | + status = ufs_exec_query_attr(req, UFS_QUERY_ATTR_READ); | ||
845 | + break; | ||
846 | + case UFS_UPIU_QUERY_OPCODE_READ_FLAG: | ||
847 | + status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_READ); | ||
848 | + break; | ||
849 | + default: | ||
850 | + trace_ufs_err_query_invalid_opcode(req->req_upiu.qr.opcode); | ||
851 | + status = UFS_QUERY_RESULT_INVALID_OPCODE; | ||
852 | + break; | ||
853 | + } | ||
854 | + | ||
855 | + return status; | ||
856 | +} | ||
857 | + | ||
858 | +static QueryRespCode ufs_exec_query_write(UfsRequest *req) | ||
859 | +{ | ||
860 | + QueryRespCode status; | ||
861 | + switch (req->req_upiu.qr.opcode) { | ||
862 | + case UFS_UPIU_QUERY_OPCODE_NOP: | ||
863 | + status = UFS_QUERY_RESULT_SUCCESS; | ||
864 | + break; | ||
865 | + case UFS_UPIU_QUERY_OPCODE_WRITE_DESC: | ||
866 | + /* write descriptor is not supported */ | ||
867 | + status = UFS_QUERY_RESULT_NOT_WRITEABLE; | ||
868 | + break; | ||
869 | + case UFS_UPIU_QUERY_OPCODE_WRITE_ATTR: | ||
870 | + status = ufs_exec_query_attr(req, UFS_QUERY_ATTR_WRITE); | ||
871 | + break; | ||
872 | + case UFS_UPIU_QUERY_OPCODE_SET_FLAG: | ||
873 | + status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_SET); | ||
874 | + break; | ||
875 | + case UFS_UPIU_QUERY_OPCODE_CLEAR_FLAG: | ||
876 | + status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_CLEAR); | ||
877 | + break; | ||
878 | + case UFS_UPIU_QUERY_OPCODE_TOGGLE_FLAG: | ||
879 | + status = ufs_exec_query_flag(req, UFS_QUERY_FLAG_TOGGLE); | ||
880 | + break; | ||
881 | + default: | ||
882 | + trace_ufs_err_query_invalid_opcode(req->req_upiu.qr.opcode); | ||
883 | + status = UFS_QUERY_RESULT_INVALID_OPCODE; | ||
884 | + break; | ||
885 | + } | ||
886 | + | ||
887 | + return status; | ||
888 | +} | ||
889 | + | ||
890 | +static UfsReqResult ufs_exec_query_cmd(UfsRequest *req) | ||
891 | +{ | ||
892 | + uint8_t query_func = req->req_upiu.header.query_func; | ||
893 | + uint16_t data_segment_length; | ||
894 | + QueryRespCode status; | ||
895 | + | ||
896 | + trace_ufs_exec_query_cmd(req->slot, req->req_upiu.qr.opcode); | ||
897 | + if (query_func == UFS_UPIU_QUERY_FUNC_STANDARD_READ_REQUEST) { | ||
898 | + status = ufs_exec_query_read(req); | ||
899 | + } else if (query_func == UFS_UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST) { | ||
900 | + status = ufs_exec_query_write(req); | ||
901 | + } else { | ||
902 | + status = UFS_QUERY_RESULT_GENERAL_FAILURE; | ||
903 | + } | ||
904 | + | ||
905 | + data_segment_length = be16_to_cpu(req->rsp_upiu.qr.length); | ||
906 | + ufs_build_upiu_header(req, UFS_UPIU_TRANSACTION_QUERY_RSP, 0, status, 0, | ||
907 | + data_segment_length); | ||
908 | + | ||
909 | + if (status != UFS_QUERY_RESULT_SUCCESS) { | ||
910 | + return UFS_REQUEST_FAIL; | ||
911 | + } | ||
912 | + return UFS_REQUEST_SUCCESS; | ||
913 | +} | ||
914 | + | ||
915 | +static void ufs_exec_req(UfsRequest *req) | ||
916 | +{ | ||
917 | + UfsReqResult req_result; | ||
918 | + | ||
919 | + if (ufs_dma_read_upiu(req)) { | ||
920 | + return; | ||
921 | + } | ||
922 | + | ||
923 | + switch (req->req_upiu.header.trans_type) { | ||
924 | + case UFS_UPIU_TRANSACTION_NOP_OUT: | ||
925 | + req_result = ufs_exec_nop_cmd(req); | ||
926 | + break; | ||
927 | + case UFS_UPIU_TRANSACTION_COMMAND: | ||
928 | + /* Not yet implemented */ | ||
929 | + req_result = UFS_REQUEST_FAIL; | ||
930 | + break; | ||
931 | + case UFS_UPIU_TRANSACTION_QUERY_REQ: | ||
932 | + req_result = ufs_exec_query_cmd(req); | ||
933 | + break; | ||
934 | + default: | ||
935 | + trace_ufs_err_invalid_trans_code(req->slot, | ||
936 | + req->req_upiu.header.trans_type); | ||
937 | + req_result = UFS_REQUEST_FAIL; | ||
938 | + } | ||
939 | + | ||
940 | + ufs_complete_req(req, req_result); | ||
941 | +} | ||
942 | + | ||
943 | +static void ufs_process_req(void *opaque) | ||
944 | +{ | ||
945 | + UfsHc *u = opaque; | ||
946 | + UfsRequest *req; | ||
947 | + int slot; | ||
948 | + | ||
949 | + for (slot = 0; slot < u->params.nutrs; slot++) { | ||
950 | + req = &u->req_list[slot]; | ||
951 | + | ||
952 | + if (req->state != UFS_REQUEST_READY) { | ||
953 | + continue; | ||
954 | + } | ||
955 | + trace_ufs_process_req(slot); | ||
956 | + req->state = UFS_REQUEST_RUNNING; | ||
957 | + | ||
958 | + ufs_exec_req(req); | ||
959 | + } | ||
960 | +} | ||
961 | + | ||
962 | +static void ufs_complete_req(UfsRequest *req, UfsReqResult req_result) | ||
963 | +{ | ||
964 | + UfsHc *u = req->hc; | ||
965 | + assert(req->state == UFS_REQUEST_RUNNING); | ||
966 | + | ||
967 | + if (req_result == UFS_REQUEST_SUCCESS) { | ||
968 | + req->utrd.header.dword_2 = cpu_to_le32(UFS_OCS_SUCCESS); | ||
969 | + } else { | ||
970 | + req->utrd.header.dword_2 = cpu_to_le32(UFS_OCS_INVALID_CMD_TABLE_ATTR); | ||
971 | + } | ||
972 | + | ||
973 | + trace_ufs_complete_req(req->slot); | ||
974 | + req->state = UFS_REQUEST_COMPLETE; | ||
975 | + qemu_bh_schedule(u->complete_bh); | ||
976 | +} | ||
977 | + | ||
978 | +static void ufs_clear_req(UfsRequest *req) | ||
979 | +{ | ||
980 | + if (req->sg != NULL) { | ||
981 | + qemu_sglist_destroy(req->sg); | ||
982 | + g_free(req->sg); | ||
983 | + req->sg = NULL; | ||
984 | + } | ||
985 | + | ||
986 | + memset(&req->utrd, 0, sizeof(req->utrd)); | ||
987 | + memset(&req->req_upiu, 0, sizeof(req->req_upiu)); | ||
988 | + memset(&req->rsp_upiu, 0, sizeof(req->rsp_upiu)); | ||
989 | +} | ||
990 | + | ||
991 | +static void ufs_sendback_req(void *opaque) | ||
992 | +{ | ||
993 | + UfsHc *u = opaque; | ||
994 | + UfsRequest *req; | ||
995 | + int slot; | ||
996 | + | ||
997 | + for (slot = 0; slot < u->params.nutrs; slot++) { | ||
998 | + req = &u->req_list[slot]; | ||
999 | + | ||
1000 | + if (req->state != UFS_REQUEST_COMPLETE) { | ||
1001 | + continue; | ||
1002 | + } | ||
1003 | + | ||
1004 | + if (ufs_dma_write_upiu(req)) { | ||
1005 | + req->state = UFS_REQUEST_ERROR; | ||
1006 | + continue; | ||
1007 | + } | ||
1008 | + | ||
1009 | + /* | ||
1010 | + * TODO: UTP Transfer Request Interrupt Aggregation Control is not yet | ||
1011 | + * supported | ||
1012 | + */ | ||
1013 | + if (le32_to_cpu(req->utrd.header.dword_2) != UFS_OCS_SUCCESS || | ||
1014 | + le32_to_cpu(req->utrd.header.dword_0) & UFS_UTP_REQ_DESC_INT_CMD) { | ||
1015 | + u->reg.is = FIELD_DP32(u->reg.is, IS, UTRCS, 1); | ||
1016 | + } | ||
1017 | + | ||
1018 | + u->reg.utrldbr &= ~(1 << slot); | ||
1019 | + u->reg.utrlcnr |= (1 << slot); | ||
1020 | + | ||
1021 | + trace_ufs_sendback_req(req->slot); | ||
1022 | + | ||
1023 | + ufs_clear_req(req); | ||
1024 | + req->state = UFS_REQUEST_IDLE; | ||
1025 | + } | ||
1026 | + | ||
1027 | + ufs_irq_check(u); | ||
1028 | +} | ||
1029 | + | ||
1030 | static bool ufs_check_constraints(UfsHc *u, Error **errp) | ||
1031 | { | ||
1032 | if (u->params.nutrs > UFS_MAX_NUTRS) { | ||
1033 | @@ -XXX,XX +XXX,XX @@ static void ufs_init_pci(UfsHc *u, PCIDevice *pci_dev) | ||
1034 | u->irq = pci_allocate_irq(pci_dev); | ||
1035 | } | 66 | } |
1036 | 67 | ||
1037 | +static void ufs_init_state(UfsHc *u) | 68 | /** |
1038 | +{ | 69 | @@ -XXX,XX +XXX,XX @@ static void qed_aio_write_l2_update(QEDAIOCB *acb, int ret, uint64_t offset) |
1039 | + u->req_list = g_new0(UfsRequest, u->params.nutrs); | 70 | if (need_alloc) { |
1040 | + | 71 | /* Write out the whole new L2 table */ |
1041 | + for (int i = 0; i < u->params.nutrs; i++) { | 72 | qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true, |
1042 | + u->req_list[i].hc = u; | 73 | - qed_aio_write_l1_update, acb); |
1043 | + u->req_list[i].slot = i; | 74 | + qed_aio_write_l1_update, acb); |
1044 | + u->req_list[i].sg = NULL; | 75 | } else { |
1045 | + u->req_list[i].state = UFS_REQUEST_IDLE; | 76 | /* Write out only the updated part of the L2 table */ |
1046 | + } | 77 | qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters, false, |
1047 | + | 78 | - qed_aio_next_io, acb); |
1048 | + u->doorbell_bh = qemu_bh_new_guarded(ufs_process_req, u, | 79 | + qed_aio_next_io_cb, acb); |
1049 | + &DEVICE(u)->mem_reentrancy_guard); | 80 | } |
1050 | + u->complete_bh = qemu_bh_new_guarded(ufs_sendback_req, u, | 81 | return; |
1051 | + &DEVICE(u)->mem_reentrancy_guard); | 82 | |
1052 | +} | 83 | @@ -XXX,XX +XXX,XX @@ static void qed_aio_write_main(void *opaque, int ret) |
1053 | + | 84 | } |
1054 | static void ufs_init_hc(UfsHc *u) | 85 | |
1055 | { | 86 | if (acb->find_cluster_ret == QED_CLUSTER_FOUND) { |
1056 | uint32_t cap = 0; | 87 | - next_fn = qed_aio_next_io; |
1057 | @@ -XXX,XX +XXX,XX @@ static void ufs_init_hc(UfsHc *u) | 88 | + next_fn = qed_aio_next_io_cb; |
1058 | cap = FIELD_DP32(cap, CAP, CS, 0); | 89 | } else { |
1059 | u->reg.cap = cap; | 90 | if (s->bs->backing) { |
1060 | u->reg.ver = UFS_SPEC_VER; | 91 | next_fn = qed_aio_write_flush_before_l2_update; |
1061 | + | 92 | @@ -XXX,XX +XXX,XX @@ static void qed_aio_write_alloc(QEDAIOCB *acb, size_t len) |
1062 | + memset(&u->device_desc, 0, sizeof(DeviceDescriptor)); | 93 | if (acb->flags & QED_AIOCB_ZERO) { |
1063 | + u->device_desc.length = sizeof(DeviceDescriptor); | 94 | /* Skip ahead if the clusters are already zero */ |
1064 | + u->device_desc.descriptor_idn = UFS_QUERY_DESC_IDN_DEVICE; | 95 | if (acb->find_cluster_ret == QED_CLUSTER_ZERO) { |
1065 | + u->device_desc.device_sub_class = 0x01; | 96 | - qed_aio_next_io(acb, 0); |
1066 | + u->device_desc.number_lu = 0x00; | 97 | + qed_aio_start_io(acb); |
1067 | + u->device_desc.number_wlu = 0x04; | 98 | return; |
1068 | + /* TODO: Revisit it when Power Management is implemented */ | 99 | } |
1069 | + u->device_desc.init_power_mode = 0x01; /* Active Mode */ | 100 | |
1070 | + u->device_desc.high_priority_lun = 0x7F; /* Same Priority */ | 101 | @@ -XXX,XX +XXX,XX @@ static void qed_aio_read_data(void *opaque, int ret, |
1071 | + u->device_desc.spec_version = cpu_to_be16(UFS_SPEC_VER); | 102 | /* Handle zero cluster and backing file reads */ |
1072 | + u->device_desc.manufacturer_name = 0x00; | 103 | if (ret == QED_CLUSTER_ZERO) { |
1073 | + u->device_desc.product_name = 0x01; | 104 | qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size); |
1074 | + u->device_desc.serial_number = 0x02; | 105 | - qed_aio_next_io(acb, 0); |
1075 | + u->device_desc.oem_id = 0x03; | 106 | + qed_aio_start_io(acb); |
1076 | + u->device_desc.ud_0_base_offset = 0x16; | 107 | return; |
1077 | + u->device_desc.ud_config_p_length = 0x1A; | 108 | } else if (ret != QED_CLUSTER_FOUND) { |
1078 | + u->device_desc.device_rtt_cap = 0x02; | 109 | qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov, |
1079 | + u->device_desc.queue_depth = u->params.nutrs; | 110 | - &acb->backing_qiov, qed_aio_next_io, acb); |
1080 | + u->device_desc.product_revision_level = 0x04; | 111 | + &acb->backing_qiov, qed_aio_next_io_cb, acb); |
1081 | + | ||
1082 | + memset(&u->geometry_desc, 0, sizeof(GeometryDescriptor)); | ||
1083 | + u->geometry_desc.length = sizeof(GeometryDescriptor); | ||
1084 | + u->geometry_desc.descriptor_idn = UFS_QUERY_DESC_IDN_GEOMETRY; | ||
1085 | + u->geometry_desc.max_number_lu = (UFS_MAX_LUS == 32) ? 0x1 : 0x0; | ||
1086 | + u->geometry_desc.segment_size = cpu_to_be32(0x2000); /* 4KB */ | ||
1087 | + u->geometry_desc.allocation_unit_size = 0x1; /* 4KB */ | ||
1088 | + u->geometry_desc.min_addr_block_size = 0x8; /* 4KB */ | ||
1089 | + u->geometry_desc.max_in_buffer_size = 0x8; | ||
1090 | + u->geometry_desc.max_out_buffer_size = 0x8; | ||
1091 | + u->geometry_desc.rpmb_read_write_size = 0x40; | ||
1092 | + u->geometry_desc.data_ordering = | ||
1093 | + 0x0; /* out-of-order data transfer is not supported */ | ||
1094 | + u->geometry_desc.max_context_id_number = 0x5; | ||
1095 | + u->geometry_desc.supported_memory_types = cpu_to_be16(0x8001); | ||
1096 | + | ||
1097 | + memset(&u->attributes, 0, sizeof(u->attributes)); | ||
1098 | + u->attributes.max_data_in_size = 0x08; | ||
1099 | + u->attributes.max_data_out_size = 0x08; | ||
1100 | + u->attributes.ref_clk_freq = 0x01; /* 26 MHz */ | ||
1101 | + /* configure descriptor is not supported */ | ||
1102 | + u->attributes.config_descr_lock = 0x01; | ||
1103 | + u->attributes.max_num_of_rtt = 0x02; | ||
1104 | + | ||
1105 | + memset(&u->flags, 0, sizeof(u->flags)); | ||
1106 | + u->flags.permanently_disable_fw_update = 1; | ||
1107 | } | ||
1108 | |||
1109 | static void ufs_realize(PCIDevice *pci_dev, Error **errp) | ||
1110 | @@ -XXX,XX +XXX,XX @@ static void ufs_realize(PCIDevice *pci_dev, Error **errp) | ||
1111 | return; | 112 | return; |
1112 | } | 113 | } |
1113 | 114 | ||
1114 | + ufs_init_state(u); | 115 | BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); |
1115 | ufs_init_hc(u); | 116 | bdrv_aio_readv(bs->file, offset / BDRV_SECTOR_SIZE, |
1116 | ufs_init_pci(u, pci_dev); | 117 | &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE, |
118 | - qed_aio_next_io, acb); | ||
119 | + qed_aio_next_io_cb, acb); | ||
120 | return; | ||
121 | |||
122 | err: | ||
123 | @@ -XXX,XX +XXX,XX @@ err: | ||
124 | /** | ||
125 | * Begin next I/O or complete the request | ||
126 | */ | ||
127 | -static void qed_aio_next_io(void *opaque, int ret) | ||
128 | +static void qed_aio_next_io(QEDAIOCB *acb, int ret) | ||
129 | { | ||
130 | - QEDAIOCB *acb = opaque; | ||
131 | BDRVQEDState *s = acb_to_s(acb); | ||
132 | QEDFindClusterFunc *io_fn = (acb->flags & QED_AIOCB_WRITE) ? | ||
133 | qed_aio_write_data : qed_aio_read_data; | ||
134 | @@ -XXX,XX +XXX,XX @@ static BlockAIOCB *qed_aio_setup(BlockDriverState *bs, | ||
135 | qemu_iovec_init(&acb->cur_qiov, qiov->niov); | ||
136 | |||
137 | /* Start request */ | ||
138 | - qed_aio_next_io(acb, 0); | ||
139 | + qed_aio_start_io(acb); | ||
140 | return &acb->common; | ||
1117 | } | 141 | } |
1118 | 142 | ||
1119 | +static void ufs_exit(PCIDevice *pci_dev) | ||
1120 | +{ | ||
1121 | + UfsHc *u = UFS(pci_dev); | ||
1122 | + | ||
1123 | + qemu_bh_delete(u->doorbell_bh); | ||
1124 | + qemu_bh_delete(u->complete_bh); | ||
1125 | + | ||
1126 | + for (int i = 0; i < u->params.nutrs; i++) { | ||
1127 | + ufs_clear_req(&u->req_list[i]); | ||
1128 | + } | ||
1129 | + g_free(u->req_list); | ||
1130 | +} | ||
1131 | + | ||
1132 | static Property ufs_props[] = { | ||
1133 | DEFINE_PROP_STRING("serial", UfsHc, params.serial), | ||
1134 | DEFINE_PROP_UINT8("nutrs", UfsHc, params.nutrs, 32), | ||
1135 | @@ -XXX,XX +XXX,XX @@ static void ufs_class_init(ObjectClass *oc, void *data) | ||
1136 | PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc); | ||
1137 | |||
1138 | pc->realize = ufs_realize; | ||
1139 | + pc->exit = ufs_exit; | ||
1140 | pc->vendor_id = PCI_VENDOR_ID_REDHAT; | ||
1141 | pc->device_id = PCI_DEVICE_ID_REDHAT_UFS; | ||
1142 | pc->class_id = PCI_CLASS_STORAGE_UFS; | ||
1143 | diff --git a/hw/ufs/trace-events b/hw/ufs/trace-events | ||
1144 | index XXXXXXX..XXXXXXX 100644 | ||
1145 | --- a/hw/ufs/trace-events | ||
1146 | +++ b/hw/ufs/trace-events | ||
1147 | @@ -XXX,XX +XXX,XX @@ ufs_err_dma_read_req_upiu(uint32_t slot, uint64_t addr) "failed to read req upiu | ||
1148 | ufs_err_dma_read_prdt(uint32_t slot, uint64_t addr) "failed to read prdt. UTRLDBR slot %"PRIu32", prdt addr %"PRIu64"" | ||
1149 | ufs_err_dma_write_utrd(uint32_t slot, uint64_t addr) "failed to write utrd. UTRLDBR slot %"PRIu32", UTRD dma addr %"PRIu64"" | ||
1150 | ufs_err_dma_write_rsp_upiu(uint32_t slot, uint64_t addr) "failed to write rsp upiu. UTRLDBR slot %"PRIu32", response upiu addr %"PRIu64"" | ||
1151 | +ufs_err_utrl_slot_error(uint32_t slot) "UTRLDBR slot %"PRIu32" is in error" | ||
1152 | ufs_err_utrl_slot_busy(uint32_t slot) "UTRLDBR slot %"PRIu32" is busy" | ||
1153 | ufs_err_unsupport_register_offset(uint32_t offset) "Register offset 0x%"PRIx32" is not yet supported" | ||
1154 | ufs_err_invalid_register_offset(uint32_t offset) "Register offset 0x%"PRIx32" is invalid" | ||
1155 | -- | 143 | -- |
1156 | 2.41.0 | 144 | 2.9.3 |
145 | |||
146 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Paolo Bonzini <pbonzini@redhat.com> | ||
1 | 2 | ||
3 | The AioContext data structures are now protected by list_lock and/or | ||
4 | they are walked with FOREACH_RCU primitives. There is no need anymore | ||
5 | to acquire the AioContext for the entire duration of aio_dispatch. | ||
6 | Instead, just acquire it before and after invoking the callbacks. | ||
7 | The next step is then to push it further down. | ||
8 | |||
9 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
10 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
11 | Reviewed-by: Fam Zheng <famz@redhat.com> | ||
12 | Reviewed-by: Daniel P. Berrange <berrange@redhat.com> | ||
13 | Message-id: 20170213135235.12274-12-pbonzini@redhat.com | ||
14 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
15 | --- | ||
16 | util/aio-posix.c | 25 +++++++++++-------------- | ||
17 | util/aio-win32.c | 15 +++++++-------- | ||
18 | util/async.c | 2 ++ | ||
19 | 3 files changed, 20 insertions(+), 22 deletions(-) | ||
20 | |||
21 | diff --git a/util/aio-posix.c b/util/aio-posix.c | ||
22 | index XXXXXXX..XXXXXXX 100644 | ||
23 | --- a/util/aio-posix.c | ||
24 | +++ b/util/aio-posix.c | ||
25 | @@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx) | ||
26 | (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) && | ||
27 | aio_node_check(ctx, node->is_external) && | ||
28 | node->io_read) { | ||
29 | + aio_context_acquire(ctx); | ||
30 | node->io_read(node->opaque); | ||
31 | + aio_context_release(ctx); | ||
32 | |||
33 | /* aio_notify() does not count as progress */ | ||
34 | if (node->opaque != &ctx->notifier) { | ||
35 | @@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx) | ||
36 | (revents & (G_IO_OUT | G_IO_ERR)) && | ||
37 | aio_node_check(ctx, node->is_external) && | ||
38 | node->io_write) { | ||
39 | + aio_context_acquire(ctx); | ||
40 | node->io_write(node->opaque); | ||
41 | + aio_context_release(ctx); | ||
42 | progress = true; | ||
43 | } | ||
44 | |||
45 | @@ -XXX,XX +XXX,XX @@ bool aio_dispatch(AioContext *ctx, bool dispatch_fds) | ||
46 | } | ||
47 | |||
48 | /* Run our timers */ | ||
49 | + aio_context_acquire(ctx); | ||
50 | progress |= timerlistgroup_run_timers(&ctx->tlg); | ||
51 | + aio_context_release(ctx); | ||
52 | |||
53 | return progress; | ||
54 | } | ||
55 | @@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking) | ||
56 | int64_t timeout; | ||
57 | int64_t start = 0; | ||
58 | |||
59 | - aio_context_acquire(ctx); | ||
60 | - progress = false; | ||
61 | - | ||
62 | /* aio_notify can avoid the expensive event_notifier_set if | ||
63 | * everything (file descriptors, bottom halves, timers) will | ||
64 | * be re-evaluated before the next blocking poll(). This is | ||
65 | @@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking) | ||
66 | start = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); | ||
67 | } | ||
68 | |||
69 | - if (try_poll_mode(ctx, blocking)) { | ||
70 | - progress = true; | ||
71 | - } else { | ||
72 | + aio_context_acquire(ctx); | ||
73 | + progress = try_poll_mode(ctx, blocking); | ||
74 | + aio_context_release(ctx); | ||
75 | + | ||
76 | + if (!progress) { | ||
77 | assert(npfd == 0); | ||
78 | |||
79 | /* fill pollfds */ | ||
80 | @@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking) | ||
81 | timeout = blocking ? aio_compute_timeout(ctx) : 0; | ||
82 | |||
83 | /* wait until next event */ | ||
84 | - if (timeout) { | ||
85 | - aio_context_release(ctx); | ||
86 | - } | ||
87 | if (aio_epoll_check_poll(ctx, pollfds, npfd, timeout)) { | ||
88 | AioHandler epoll_handler; | ||
89 | |||
90 | @@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking) | ||
91 | } else { | ||
92 | ret = qemu_poll_ns(pollfds, npfd, timeout); | ||
93 | } | ||
94 | - if (timeout) { | ||
95 | - aio_context_acquire(ctx); | ||
96 | - } | ||
97 | } | ||
98 | |||
99 | if (blocking) { | ||
100 | @@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking) | ||
101 | progress = true; | ||
102 | } | ||
103 | |||
104 | - aio_context_release(ctx); | ||
105 | - | ||
106 | return progress; | ||
107 | } | ||
108 | |||
109 | diff --git a/util/aio-win32.c b/util/aio-win32.c | ||
110 | index XXXXXXX..XXXXXXX 100644 | ||
111 | --- a/util/aio-win32.c | ||
112 | +++ b/util/aio-win32.c | ||
113 | @@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event) | ||
114 | (revents || event_notifier_get_handle(node->e) == event) && | ||
115 | node->io_notify) { | ||
116 | node->pfd.revents = 0; | ||
117 | + aio_context_acquire(ctx); | ||
118 | node->io_notify(node->e); | ||
119 | + aio_context_release(ctx); | ||
120 | |||
121 | /* aio_notify() does not count as progress */ | ||
122 | if (node->e != &ctx->notifier) { | ||
123 | @@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event) | ||
124 | (node->io_read || node->io_write)) { | ||
125 | node->pfd.revents = 0; | ||
126 | if ((revents & G_IO_IN) && node->io_read) { | ||
127 | + aio_context_acquire(ctx); | ||
128 | node->io_read(node->opaque); | ||
129 | + aio_context_release(ctx); | ||
130 | progress = true; | ||
131 | } | ||
132 | if ((revents & G_IO_OUT) && node->io_write) { | ||
133 | + aio_context_acquire(ctx); | ||
134 | node->io_write(node->opaque); | ||
135 | + aio_context_release(ctx); | ||
136 | progress = true; | ||
137 | } | ||
138 | |||
139 | @@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking) | ||
140 | int count; | ||
141 | int timeout; | ||
142 | |||
143 | - aio_context_acquire(ctx); | ||
144 | progress = false; | ||
145 | |||
146 | /* aio_notify can avoid the expensive event_notifier_set if | ||
147 | @@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking) | ||
148 | |||
149 | timeout = blocking && !have_select_revents | ||
150 | ? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0; | ||
151 | - if (timeout) { | ||
152 | - aio_context_release(ctx); | ||
153 | - } | ||
154 | ret = WaitForMultipleObjects(count, events, FALSE, timeout); | ||
155 | if (blocking) { | ||
156 | assert(first); | ||
157 | atomic_sub(&ctx->notify_me, 2); | ||
158 | } | ||
159 | - if (timeout) { | ||
160 | - aio_context_acquire(ctx); | ||
161 | - } | ||
162 | |||
163 | if (first) { | ||
164 | aio_notify_accept(ctx); | ||
165 | @@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking) | ||
166 | progress |= aio_dispatch_handlers(ctx, event); | ||
167 | } while (count > 0); | ||
168 | |||
169 | + aio_context_acquire(ctx); | ||
170 | progress |= timerlistgroup_run_timers(&ctx->tlg); | ||
171 | - | ||
172 | aio_context_release(ctx); | ||
173 | return progress; | ||
174 | } | ||
175 | diff --git a/util/async.c b/util/async.c | ||
176 | index XXXXXXX..XXXXXXX 100644 | ||
177 | --- a/util/async.c | ||
178 | +++ b/util/async.c | ||
179 | @@ -XXX,XX +XXX,XX @@ int aio_bh_poll(AioContext *ctx) | ||
180 | ret = 1; | ||
181 | } | ||
182 | bh->idle = 0; | ||
183 | + aio_context_acquire(ctx); | ||
184 | aio_bh_call(bh); | ||
185 | + aio_context_release(ctx); | ||
186 | } | ||
187 | if (bh->deleted) { | ||
188 | deleted = true; | ||
189 | -- | ||
190 | 2.9.3 | ||
191 | |||
192 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | 1 | From: Paolo Bonzini <pbonzini@redhat.com> | |
2 | |||
3 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
4 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
5 | Reviewed-by: Fam Zheng <famz@redhat.com> | ||
6 | Reviewed-by: Daniel P. Berrange <berrange@redhat.com> | ||
7 | Message-id: 20170213135235.12274-13-pbonzini@redhat.com | ||
8 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
9 | --- | ||
10 | block/qed.h | 3 +++ | ||
11 | block/curl.c | 2 ++ | ||
12 | block/io.c | 5 +++++ | ||
13 | block/iscsi.c | 8 ++++++-- | ||
14 | block/null.c | 4 ++++ | ||
15 | block/qed.c | 12 ++++++++++++ | ||
16 | block/throttle-groups.c | 2 ++ | ||
17 | util/aio-posix.c | 2 -- | ||
18 | util/aio-win32.c | 2 -- | ||
19 | util/qemu-coroutine-sleep.c | 2 +- | ||
20 | 10 files changed, 35 insertions(+), 7 deletions(-) | ||
21 | |||
22 | diff --git a/block/qed.h b/block/qed.h | ||
23 | index XXXXXXX..XXXXXXX 100644 | ||
24 | --- a/block/qed.h | ||
25 | +++ b/block/qed.h | ||
26 | @@ -XXX,XX +XXX,XX @@ enum { | ||
27 | */ | ||
28 | typedef void QEDFindClusterFunc(void *opaque, int ret, uint64_t offset, size_t len); | ||
29 | |||
30 | +void qed_acquire(BDRVQEDState *s); | ||
31 | +void qed_release(BDRVQEDState *s); | ||
32 | + | ||
33 | /** | ||
34 | * Generic callback for chaining async callbacks | ||
35 | */ | ||
36 | diff --git a/block/curl.c b/block/curl.c | ||
37 | index XXXXXXX..XXXXXXX 100644 | ||
38 | --- a/block/curl.c | ||
39 | +++ b/block/curl.c | ||
40 | @@ -XXX,XX +XXX,XX @@ static void curl_multi_timeout_do(void *arg) | ||
41 | return; | ||
42 | } | ||
43 | |||
44 | + aio_context_acquire(s->aio_context); | ||
45 | curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running); | ||
46 | |||
47 | curl_multi_check_completion(s); | ||
48 | + aio_context_release(s->aio_context); | ||
49 | #else | ||
50 | abort(); | ||
51 | #endif | ||
52 | diff --git a/block/io.c b/block/io.c | ||
53 | index XXXXXXX..XXXXXXX 100644 | ||
54 | --- a/block/io.c | ||
55 | +++ b/block/io.c | ||
56 | @@ -XXX,XX +XXX,XX @@ void bdrv_aio_cancel(BlockAIOCB *acb) | ||
57 | if (acb->aiocb_info->get_aio_context) { | ||
58 | aio_poll(acb->aiocb_info->get_aio_context(acb), true); | ||
59 | } else if (acb->bs) { | ||
60 | + /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so | ||
61 | + * assert that we're not using an I/O thread. Thread-safe | ||
62 | + * code should use bdrv_aio_cancel_async exclusively. | ||
63 | + */ | ||
64 | + assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context()); | ||
65 | aio_poll(bdrv_get_aio_context(acb->bs), true); | ||
66 | } else { | ||
67 | abort(); | ||
68 | diff --git a/block/iscsi.c b/block/iscsi.c | ||
69 | index XXXXXXX..XXXXXXX 100644 | ||
70 | --- a/block/iscsi.c | ||
71 | +++ b/block/iscsi.c | ||
72 | @@ -XXX,XX +XXX,XX @@ static void iscsi_retry_timer_expired(void *opaque) | ||
73 | struct IscsiTask *iTask = opaque; | ||
74 | iTask->complete = 1; | ||
75 | if (iTask->co) { | ||
76 | - qemu_coroutine_enter(iTask->co); | ||
77 | + aio_co_wake(iTask->co); | ||
78 | } | ||
79 | } | ||
80 | |||
81 | @@ -XXX,XX +XXX,XX @@ static void iscsi_nop_timed_event(void *opaque) | ||
82 | { | ||
83 | IscsiLun *iscsilun = opaque; | ||
84 | |||
85 | + aio_context_acquire(iscsilun->aio_context); | ||
86 | if (iscsi_get_nops_in_flight(iscsilun->iscsi) >= MAX_NOP_FAILURES) { | ||
87 | error_report("iSCSI: NOP timeout. Reconnecting..."); | ||
88 | iscsilun->request_timed_out = true; | ||
89 | } else if (iscsi_nop_out_async(iscsilun->iscsi, NULL, NULL, 0, NULL) != 0) { | ||
90 | error_report("iSCSI: failed to sent NOP-Out. Disabling NOP messages."); | ||
91 | - return; | ||
92 | + goto out; | ||
93 | } | ||
94 | |||
95 | timer_mod(iscsilun->nop_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + NOP_INTERVAL); | ||
96 | iscsi_set_events(iscsilun); | ||
97 | + | ||
98 | +out: | ||
99 | + aio_context_release(iscsilun->aio_context); | ||
100 | } | ||
101 | |||
102 | static void iscsi_readcapacity_sync(IscsiLun *iscsilun, Error **errp) | ||
103 | diff --git a/block/null.c b/block/null.c | ||
104 | index XXXXXXX..XXXXXXX 100644 | ||
105 | --- a/block/null.c | ||
106 | +++ b/block/null.c | ||
107 | @@ -XXX,XX +XXX,XX @@ static void null_bh_cb(void *opaque) | ||
108 | static void null_timer_cb(void *opaque) | ||
109 | { | ||
110 | NullAIOCB *acb = opaque; | ||
111 | + AioContext *ctx = bdrv_get_aio_context(acb->common.bs); | ||
112 | + | ||
113 | + aio_context_acquire(ctx); | ||
114 | acb->common.cb(acb->common.opaque, 0); | ||
115 | + aio_context_release(ctx); | ||
116 | timer_deinit(&acb->timer); | ||
117 | qemu_aio_unref(acb); | ||
118 | } | ||
119 | diff --git a/block/qed.c b/block/qed.c | ||
120 | index XXXXXXX..XXXXXXX 100644 | ||
121 | --- a/block/qed.c | ||
122 | +++ b/block/qed.c | ||
123 | @@ -XXX,XX +XXX,XX @@ static void qed_need_check_timer_cb(void *opaque) | ||
124 | |||
125 | trace_qed_need_check_timer_cb(s); | ||
126 | |||
127 | + qed_acquire(s); | ||
128 | qed_plug_allocating_write_reqs(s); | ||
129 | |||
130 | /* Ensure writes are on disk before clearing flag */ | ||
131 | bdrv_aio_flush(s->bs->file->bs, qed_clear_need_check, s); | ||
132 | + qed_release(s); | ||
133 | +} | ||
134 | + | ||
135 | +void qed_acquire(BDRVQEDState *s) | ||
136 | +{ | ||
137 | + aio_context_acquire(bdrv_get_aio_context(s->bs)); | ||
138 | +} | ||
139 | + | ||
140 | +void qed_release(BDRVQEDState *s) | ||
141 | +{ | ||
142 | + aio_context_release(bdrv_get_aio_context(s->bs)); | ||
143 | } | ||
144 | |||
145 | static void qed_start_need_check_timer(BDRVQEDState *s) | ||
146 | diff --git a/block/throttle-groups.c b/block/throttle-groups.c | ||
147 | index XXXXXXX..XXXXXXX 100644 | ||
148 | --- a/block/throttle-groups.c | ||
149 | +++ b/block/throttle-groups.c | ||
150 | @@ -XXX,XX +XXX,XX @@ static void timer_cb(BlockBackend *blk, bool is_write) | ||
151 | qemu_mutex_unlock(&tg->lock); | ||
152 | |||
153 | /* Run the request that was waiting for this timer */ | ||
154 | + aio_context_acquire(blk_get_aio_context(blk)); | ||
155 | empty_queue = !qemu_co_enter_next(&blkp->throttled_reqs[is_write]); | ||
156 | + aio_context_release(blk_get_aio_context(blk)); | ||
157 | |||
158 | /* If the request queue was empty then we have to take care of | ||
159 | * scheduling the next one */ | ||
160 | diff --git a/util/aio-posix.c b/util/aio-posix.c | ||
161 | index XXXXXXX..XXXXXXX 100644 | ||
162 | --- a/util/aio-posix.c | ||
163 | +++ b/util/aio-posix.c | ||
164 | @@ -XXX,XX +XXX,XX @@ bool aio_dispatch(AioContext *ctx, bool dispatch_fds) | ||
165 | } | ||
166 | |||
167 | /* Run our timers */ | ||
168 | - aio_context_acquire(ctx); | ||
169 | progress |= timerlistgroup_run_timers(&ctx->tlg); | ||
170 | - aio_context_release(ctx); | ||
171 | |||
172 | return progress; | ||
173 | } | ||
174 | diff --git a/util/aio-win32.c b/util/aio-win32.c | ||
175 | index XXXXXXX..XXXXXXX 100644 | ||
176 | --- a/util/aio-win32.c | ||
177 | +++ b/util/aio-win32.c | ||
178 | @@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking) | ||
179 | progress |= aio_dispatch_handlers(ctx, event); | ||
180 | } while (count > 0); | ||
181 | |||
182 | - aio_context_acquire(ctx); | ||
183 | progress |= timerlistgroup_run_timers(&ctx->tlg); | ||
184 | - aio_context_release(ctx); | ||
185 | return progress; | ||
186 | } | ||
187 | |||
188 | diff --git a/util/qemu-coroutine-sleep.c b/util/qemu-coroutine-sleep.c | ||
189 | index XXXXXXX..XXXXXXX 100644 | ||
190 | --- a/util/qemu-coroutine-sleep.c | ||
191 | +++ b/util/qemu-coroutine-sleep.c | ||
192 | @@ -XXX,XX +XXX,XX @@ static void co_sleep_cb(void *opaque) | ||
193 | { | ||
194 | CoSleepCB *sleep_cb = opaque; | ||
195 | |||
196 | - qemu_coroutine_enter(sleep_cb->co); | ||
197 | + aio_co_wake(sleep_cb->co); | ||
198 | } | ||
199 | |||
200 | void coroutine_fn co_aio_sleep_ns(AioContext *ctx, QEMUClockType type, | ||
201 | -- | ||
202 | 2.9.3 | ||
203 | |||
204 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Paolo Bonzini <pbonzini@redhat.com> | ||
1 | 2 | ||
3 | This covers both file descriptor callbacks and polling callbacks, | ||
4 | since they execute related code. | ||
5 | |||
6 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
7 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
8 | Reviewed-by: Fam Zheng <famz@redhat.com> | ||
9 | Reviewed-by: Daniel P. Berrange <berrange@redhat.com> | ||
10 | Message-id: 20170213135235.12274-14-pbonzini@redhat.com | ||
11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
12 | --- | ||
13 | block/curl.c | 16 +++++++++++++--- | ||
14 | block/iscsi.c | 4 ++++ | ||
15 | block/linux-aio.c | 4 ++++ | ||
16 | block/nfs.c | 6 ++++++ | ||
17 | block/sheepdog.c | 29 +++++++++++++++-------------- | ||
18 | block/ssh.c | 29 +++++++++-------------------- | ||
19 | block/win32-aio.c | 10 ++++++---- | ||
20 | hw/block/virtio-blk.c | 5 ++++- | ||
21 | hw/scsi/virtio-scsi.c | 7 +++++++ | ||
22 | util/aio-posix.c | 7 ------- | ||
23 | util/aio-win32.c | 6 ------ | ||
24 | 11 files changed, 68 insertions(+), 55 deletions(-) | ||
25 | |||
26 | diff --git a/block/curl.c b/block/curl.c | ||
27 | index XXXXXXX..XXXXXXX 100644 | ||
28 | --- a/block/curl.c | ||
29 | +++ b/block/curl.c | ||
30 | @@ -XXX,XX +XXX,XX @@ static void curl_multi_check_completion(BDRVCURLState *s) | ||
31 | } | ||
32 | } | ||
33 | |||
34 | -static void curl_multi_do(void *arg) | ||
35 | +static void curl_multi_do_locked(CURLState *s) | ||
36 | { | ||
37 | - CURLState *s = (CURLState *)arg; | ||
38 | CURLSocket *socket, *next_socket; | ||
39 | int running; | ||
40 | int r; | ||
41 | @@ -XXX,XX +XXX,XX @@ static void curl_multi_do(void *arg) | ||
42 | } | ||
43 | } | ||
44 | |||
45 | +static void curl_multi_do(void *arg) | ||
46 | +{ | ||
47 | + CURLState *s = (CURLState *)arg; | ||
48 | + | ||
49 | + aio_context_acquire(s->s->aio_context); | ||
50 | + curl_multi_do_locked(s); | ||
51 | + aio_context_release(s->s->aio_context); | ||
52 | +} | ||
53 | + | ||
54 | static void curl_multi_read(void *arg) | ||
55 | { | ||
56 | CURLState *s = (CURLState *)arg; | ||
57 | |||
58 | - curl_multi_do(arg); | ||
59 | + aio_context_acquire(s->s->aio_context); | ||
60 | + curl_multi_do_locked(s); | ||
61 | curl_multi_check_completion(s->s); | ||
62 | + aio_context_release(s->s->aio_context); | ||
63 | } | ||
64 | |||
65 | static void curl_multi_timeout_do(void *arg) | ||
66 | diff --git a/block/iscsi.c b/block/iscsi.c | ||
67 | index XXXXXXX..XXXXXXX 100644 | ||
68 | --- a/block/iscsi.c | ||
69 | +++ b/block/iscsi.c | ||
70 | @@ -XXX,XX +XXX,XX @@ iscsi_process_read(void *arg) | ||
71 | IscsiLun *iscsilun = arg; | ||
72 | struct iscsi_context *iscsi = iscsilun->iscsi; | ||
73 | |||
74 | + aio_context_acquire(iscsilun->aio_context); | ||
75 | iscsi_service(iscsi, POLLIN); | ||
76 | iscsi_set_events(iscsilun); | ||
77 | + aio_context_release(iscsilun->aio_context); | ||
78 | } | ||
79 | |||
80 | static void | ||
81 | @@ -XXX,XX +XXX,XX @@ iscsi_process_write(void *arg) | ||
82 | IscsiLun *iscsilun = arg; | ||
83 | struct iscsi_context *iscsi = iscsilun->iscsi; | ||
84 | |||
85 | + aio_context_acquire(iscsilun->aio_context); | ||
86 | iscsi_service(iscsi, POLLOUT); | ||
87 | iscsi_set_events(iscsilun); | ||
88 | + aio_context_release(iscsilun->aio_context); | ||
89 | } | ||
90 | |||
91 | static int64_t sector_lun2qemu(int64_t sector, IscsiLun *iscsilun) | ||
92 | diff --git a/block/linux-aio.c b/block/linux-aio.c | ||
93 | index XXXXXXX..XXXXXXX 100644 | ||
94 | --- a/block/linux-aio.c | ||
95 | +++ b/block/linux-aio.c | ||
96 | @@ -XXX,XX +XXX,XX @@ static void qemu_laio_completion_cb(EventNotifier *e) | ||
97 | LinuxAioState *s = container_of(e, LinuxAioState, e); | ||
98 | |||
99 | if (event_notifier_test_and_clear(&s->e)) { | ||
100 | + aio_context_acquire(s->aio_context); | ||
101 | qemu_laio_process_completions_and_submit(s); | ||
102 | + aio_context_release(s->aio_context); | ||
103 | } | ||
104 | } | ||
105 | |||
106 | @@ -XXX,XX +XXX,XX @@ static bool qemu_laio_poll_cb(void *opaque) | ||
107 | return false; | ||
108 | } | ||
109 | |||
110 | + aio_context_acquire(s->aio_context); | ||
111 | qemu_laio_process_completions_and_submit(s); | ||
112 | + aio_context_release(s->aio_context); | ||
113 | return true; | ||
114 | } | ||
115 | |||
116 | diff --git a/block/nfs.c b/block/nfs.c | ||
117 | index XXXXXXX..XXXXXXX 100644 | ||
118 | --- a/block/nfs.c | ||
119 | +++ b/block/nfs.c | ||
120 | @@ -XXX,XX +XXX,XX @@ static void nfs_set_events(NFSClient *client) | ||
121 | static void nfs_process_read(void *arg) | ||
122 | { | ||
123 | NFSClient *client = arg; | ||
124 | + | ||
125 | + aio_context_acquire(client->aio_context); | ||
126 | nfs_service(client->context, POLLIN); | ||
127 | nfs_set_events(client); | ||
128 | + aio_context_release(client->aio_context); | ||
129 | } | ||
130 | |||
131 | static void nfs_process_write(void *arg) | ||
132 | { | ||
133 | NFSClient *client = arg; | ||
134 | + | ||
135 | + aio_context_acquire(client->aio_context); | ||
136 | nfs_service(client->context, POLLOUT); | ||
137 | nfs_set_events(client); | ||
138 | + aio_context_release(client->aio_context); | ||
139 | } | ||
140 | |||
141 | static void nfs_co_init_task(BlockDriverState *bs, NFSRPC *task) | ||
142 | diff --git a/block/sheepdog.c b/block/sheepdog.c | ||
143 | index XXXXXXX..XXXXXXX 100644 | ||
144 | --- a/block/sheepdog.c | ||
145 | +++ b/block/sheepdog.c | ||
146 | @@ -XXX,XX +XXX,XX @@ static coroutine_fn int send_co_req(int sockfd, SheepdogReq *hdr, void *data, | ||
147 | return ret; | ||
148 | } | ||
149 | |||
150 | -static void restart_co_req(void *opaque) | ||
151 | -{ | ||
152 | - Coroutine *co = opaque; | ||
153 | - | ||
154 | - qemu_coroutine_enter(co); | ||
155 | -} | ||
156 | - | ||
157 | typedef struct SheepdogReqCo { | ||
158 | int sockfd; | ||
159 | BlockDriverState *bs; | ||
160 | @@ -XXX,XX +XXX,XX @@ typedef struct SheepdogReqCo { | ||
161 | unsigned int *rlen; | ||
162 | int ret; | ||
163 | bool finished; | ||
164 | + Coroutine *co; | ||
165 | } SheepdogReqCo; | ||
166 | |||
167 | +static void restart_co_req(void *opaque) | ||
168 | +{ | ||
169 | + SheepdogReqCo *srco = opaque; | ||
170 | + | ||
171 | + aio_co_wake(srco->co); | ||
172 | +} | ||
173 | + | ||
174 | static coroutine_fn void do_co_req(void *opaque) | ||
175 | { | ||
176 | int ret; | ||
177 | - Coroutine *co; | ||
178 | SheepdogReqCo *srco = opaque; | ||
179 | int sockfd = srco->sockfd; | ||
180 | SheepdogReq *hdr = srco->hdr; | ||
181 | @@ -XXX,XX +XXX,XX @@ static coroutine_fn void do_co_req(void *opaque) | ||
182 | unsigned int *wlen = srco->wlen; | ||
183 | unsigned int *rlen = srco->rlen; | ||
184 | |||
185 | - co = qemu_coroutine_self(); | ||
186 | + srco->co = qemu_coroutine_self(); | ||
187 | aio_set_fd_handler(srco->aio_context, sockfd, false, | ||
188 | - NULL, restart_co_req, NULL, co); | ||
189 | + NULL, restart_co_req, NULL, srco); | ||
190 | |||
191 | ret = send_co_req(sockfd, hdr, data, wlen); | ||
192 | if (ret < 0) { | ||
193 | @@ -XXX,XX +XXX,XX @@ static coroutine_fn void do_co_req(void *opaque) | ||
194 | } | ||
195 | |||
196 | aio_set_fd_handler(srco->aio_context, sockfd, false, | ||
197 | - restart_co_req, NULL, NULL, co); | ||
198 | + restart_co_req, NULL, NULL, srco); | ||
199 | |||
200 | ret = qemu_co_recv(sockfd, hdr, sizeof(*hdr)); | ||
201 | if (ret != sizeof(*hdr)) { | ||
202 | @@ -XXX,XX +XXX,XX @@ out: | ||
203 | aio_set_fd_handler(srco->aio_context, sockfd, false, | ||
204 | NULL, NULL, NULL, NULL); | ||
205 | |||
206 | + srco->co = NULL; | ||
207 | srco->ret = ret; | ||
208 | srco->finished = true; | ||
209 | if (srco->bs) { | ||
210 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn aio_read_response(void *opaque) | ||
211 | * We've finished all requests which belong to the AIOCB, so | ||
212 | * we can switch back to sd_co_readv/writev now. | ||
213 | */ | ||
214 | - qemu_coroutine_enter(acb->coroutine); | ||
215 | + aio_co_wake(acb->coroutine); | ||
216 | } | ||
217 | |||
218 | return; | ||
219 | @@ -XXX,XX +XXX,XX @@ static void co_read_response(void *opaque) | ||
220 | s->co_recv = qemu_coroutine_create(aio_read_response, opaque); | ||
221 | } | ||
222 | |||
223 | - qemu_coroutine_enter(s->co_recv); | ||
224 | + aio_co_wake(s->co_recv); | ||
225 | } | ||
226 | |||
227 | static void co_write_request(void *opaque) | ||
228 | { | ||
229 | BDRVSheepdogState *s = opaque; | ||
230 | |||
231 | - qemu_coroutine_enter(s->co_send); | ||
232 | + aio_co_wake(s->co_send); | ||
233 | } | ||
234 | |||
235 | /* | ||
236 | diff --git a/block/ssh.c b/block/ssh.c | ||
237 | index XXXXXXX..XXXXXXX 100644 | ||
238 | --- a/block/ssh.c | ||
239 | +++ b/block/ssh.c | ||
240 | @@ -XXX,XX +XXX,XX @@ static void restart_coroutine(void *opaque) | ||
241 | |||
242 | DPRINTF("co=%p", co); | ||
243 | |||
244 | - qemu_coroutine_enter(co); | ||
245 | + aio_co_wake(co); | ||
246 | } | ||
247 | |||
248 | -static coroutine_fn void set_fd_handler(BDRVSSHState *s, BlockDriverState *bs) | ||
249 | +/* A non-blocking call returned EAGAIN, so yield, ensuring the | ||
250 | + * handlers are set up so that we'll be rescheduled when there is an | ||
251 | + * interesting event on the socket. | ||
252 | + */ | ||
253 | +static coroutine_fn void co_yield(BDRVSSHState *s, BlockDriverState *bs) | ||
254 | { | ||
255 | int r; | ||
256 | IOHandler *rd_handler = NULL, *wr_handler = NULL; | ||
257 | @@ -XXX,XX +XXX,XX @@ static coroutine_fn void set_fd_handler(BDRVSSHState *s, BlockDriverState *bs) | ||
258 | |||
259 | aio_set_fd_handler(bdrv_get_aio_context(bs), s->sock, | ||
260 | false, rd_handler, wr_handler, NULL, co); | ||
261 | -} | ||
262 | - | ||
263 | -static coroutine_fn void clear_fd_handler(BDRVSSHState *s, | ||
264 | - BlockDriverState *bs) | ||
265 | -{ | ||
266 | - DPRINTF("s->sock=%d", s->sock); | ||
267 | - aio_set_fd_handler(bdrv_get_aio_context(bs), s->sock, | ||
268 | - false, NULL, NULL, NULL, NULL); | ||
269 | -} | ||
270 | - | ||
271 | -/* A non-blocking call returned EAGAIN, so yield, ensuring the | ||
272 | - * handlers are set up so that we'll be rescheduled when there is an | ||
273 | - * interesting event on the socket. | ||
274 | - */ | ||
275 | -static coroutine_fn void co_yield(BDRVSSHState *s, BlockDriverState *bs) | ||
276 | -{ | ||
277 | - set_fd_handler(s, bs); | ||
278 | qemu_coroutine_yield(); | ||
279 | - clear_fd_handler(s, bs); | ||
280 | + DPRINTF("s->sock=%d - back", s->sock); | ||
281 | + aio_set_fd_handler(bdrv_get_aio_context(bs), s->sock, false, | ||
282 | + NULL, NULL, NULL, NULL); | ||
283 | } | ||
284 | |||
285 | /* SFTP has a function `libssh2_sftp_seek64' which seeks to a position | ||
286 | diff --git a/block/win32-aio.c b/block/win32-aio.c | ||
287 | index XXXXXXX..XXXXXXX 100644 | ||
288 | --- a/block/win32-aio.c | ||
289 | +++ b/block/win32-aio.c | ||
290 | @@ -XXX,XX +XXX,XX @@ struct QEMUWin32AIOState { | ||
291 | HANDLE hIOCP; | ||
292 | EventNotifier e; | ||
293 | int count; | ||
294 | - bool is_aio_context_attached; | ||
295 | + AioContext *aio_ctx; | ||
296 | }; | ||
297 | |||
298 | typedef struct QEMUWin32AIOCB { | ||
299 | @@ -XXX,XX +XXX,XX @@ static void win32_aio_process_completion(QEMUWin32AIOState *s, | ||
300 | } | ||
301 | |||
302 | |||
303 | + aio_context_acquire(s->aio_ctx); | ||
304 | waiocb->common.cb(waiocb->common.opaque, ret); | ||
305 | + aio_context_release(s->aio_ctx); | ||
306 | qemu_aio_unref(waiocb); | ||
307 | } | ||
308 | |||
309 | @@ -XXX,XX +XXX,XX @@ void win32_aio_detach_aio_context(QEMUWin32AIOState *aio, | ||
310 | AioContext *old_context) | ||
311 | { | ||
312 | aio_set_event_notifier(old_context, &aio->e, false, NULL, NULL); | ||
313 | - aio->is_aio_context_attached = false; | ||
314 | + aio->aio_ctx = NULL; | ||
315 | } | ||
316 | |||
317 | void win32_aio_attach_aio_context(QEMUWin32AIOState *aio, | ||
318 | AioContext *new_context) | ||
319 | { | ||
320 | - aio->is_aio_context_attached = true; | ||
321 | + aio->aio_ctx = new_context; | ||
322 | aio_set_event_notifier(new_context, &aio->e, false, | ||
323 | win32_aio_completion_cb, NULL); | ||
324 | } | ||
325 | @@ -XXX,XX +XXX,XX @@ out_free_state: | ||
326 | |||
327 | void win32_aio_cleanup(QEMUWin32AIOState *aio) | ||
328 | { | ||
329 | - assert(!aio->is_aio_context_attached); | ||
330 | + assert(!aio->aio_ctx); | ||
331 | CloseHandle(aio->hIOCP); | ||
332 | event_notifier_cleanup(&aio->e); | ||
333 | g_free(aio); | ||
334 | diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c | ||
335 | index XXXXXXX..XXXXXXX 100644 | ||
336 | --- a/hw/block/virtio-blk.c | ||
337 | +++ b/hw/block/virtio-blk.c | ||
338 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_ioctl_complete(void *opaque, int status) | ||
339 | { | ||
340 | VirtIOBlockIoctlReq *ioctl_req = opaque; | ||
341 | VirtIOBlockReq *req = ioctl_req->req; | ||
342 | - VirtIODevice *vdev = VIRTIO_DEVICE(req->dev); | ||
343 | + VirtIOBlock *s = req->dev; | ||
344 | + VirtIODevice *vdev = VIRTIO_DEVICE(s); | ||
345 | struct virtio_scsi_inhdr *scsi; | ||
346 | struct sg_io_hdr *hdr; | ||
347 | |||
348 | @@ -XXX,XX +XXX,XX @@ bool virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq) | ||
349 | MultiReqBuffer mrb = {}; | ||
350 | bool progress = false; | ||
351 | |||
352 | + aio_context_acquire(blk_get_aio_context(s->blk)); | ||
353 | blk_io_plug(s->blk); | ||
354 | |||
355 | do { | ||
356 | @@ -XXX,XX +XXX,XX @@ bool virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq) | ||
357 | } | ||
358 | |||
359 | blk_io_unplug(s->blk); | ||
360 | + aio_context_release(blk_get_aio_context(s->blk)); | ||
361 | return progress; | ||
362 | } | ||
363 | |||
364 | diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c | ||
365 | index XXXXXXX..XXXXXXX 100644 | ||
366 | --- a/hw/scsi/virtio-scsi.c | ||
367 | +++ b/hw/scsi/virtio-scsi.c | ||
368 | @@ -XXX,XX +XXX,XX @@ bool virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq) | ||
369 | VirtIOSCSIReq *req; | ||
370 | bool progress = false; | ||
371 | |||
372 | + virtio_scsi_acquire(s); | ||
373 | while ((req = virtio_scsi_pop_req(s, vq))) { | ||
374 | progress = true; | ||
375 | virtio_scsi_handle_ctrl_req(s, req); | ||
376 | } | ||
377 | + virtio_scsi_release(s); | ||
378 | return progress; | ||
379 | } | ||
380 | |||
381 | @@ -XXX,XX +XXX,XX @@ bool virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq) | ||
382 | |||
383 | QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs); | ||
384 | |||
385 | + virtio_scsi_acquire(s); | ||
386 | do { | ||
387 | virtio_queue_set_notification(vq, 0); | ||
388 | |||
389 | @@ -XXX,XX +XXX,XX @@ bool virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq) | ||
390 | QTAILQ_FOREACH_SAFE(req, &reqs, next, next) { | ||
391 | virtio_scsi_handle_cmd_req_submit(s, req); | ||
392 | } | ||
393 | + virtio_scsi_release(s); | ||
394 | return progress; | ||
395 | } | ||
396 | |||
397 | @@ -XXX,XX +XXX,XX @@ out: | ||
398 | |||
399 | bool virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq) | ||
400 | { | ||
401 | + virtio_scsi_acquire(s); | ||
402 | if (s->events_dropped) { | ||
403 | virtio_scsi_push_event(s, NULL, VIRTIO_SCSI_T_NO_EVENT, 0); | ||
404 | + virtio_scsi_release(s); | ||
405 | return true; | ||
406 | } | ||
407 | + virtio_scsi_release(s); | ||
408 | return false; | ||
409 | } | ||
410 | |||
411 | diff --git a/util/aio-posix.c b/util/aio-posix.c | ||
412 | index XXXXXXX..XXXXXXX 100644 | ||
413 | --- a/util/aio-posix.c | ||
414 | +++ b/util/aio-posix.c | ||
415 | @@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx) | ||
416 | (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) && | ||
417 | aio_node_check(ctx, node->is_external) && | ||
418 | node->io_read) { | ||
419 | - aio_context_acquire(ctx); | ||
420 | node->io_read(node->opaque); | ||
421 | - aio_context_release(ctx); | ||
422 | |||
423 | /* aio_notify() does not count as progress */ | ||
424 | if (node->opaque != &ctx->notifier) { | ||
425 | @@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx) | ||
426 | (revents & (G_IO_OUT | G_IO_ERR)) && | ||
427 | aio_node_check(ctx, node->is_external) && | ||
428 | node->io_write) { | ||
429 | - aio_context_acquire(ctx); | ||
430 | node->io_write(node->opaque); | ||
431 | - aio_context_release(ctx); | ||
432 | progress = true; | ||
433 | } | ||
434 | |||
435 | @@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking) | ||
436 | start = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); | ||
437 | } | ||
438 | |||
439 | - aio_context_acquire(ctx); | ||
440 | progress = try_poll_mode(ctx, blocking); | ||
441 | - aio_context_release(ctx); | ||
442 | - | ||
443 | if (!progress) { | ||
444 | assert(npfd == 0); | ||
445 | |||
446 | diff --git a/util/aio-win32.c b/util/aio-win32.c | ||
447 | index XXXXXXX..XXXXXXX 100644 | ||
448 | --- a/util/aio-win32.c | ||
449 | +++ b/util/aio-win32.c | ||
450 | @@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event) | ||
451 | (revents || event_notifier_get_handle(node->e) == event) && | ||
452 | node->io_notify) { | ||
453 | node->pfd.revents = 0; | ||
454 | - aio_context_acquire(ctx); | ||
455 | node->io_notify(node->e); | ||
456 | - aio_context_release(ctx); | ||
457 | |||
458 | /* aio_notify() does not count as progress */ | ||
459 | if (node->e != &ctx->notifier) { | ||
460 | @@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event) | ||
461 | (node->io_read || node->io_write)) { | ||
462 | node->pfd.revents = 0; | ||
463 | if ((revents & G_IO_IN) && node->io_read) { | ||
464 | - aio_context_acquire(ctx); | ||
465 | node->io_read(node->opaque); | ||
466 | - aio_context_release(ctx); | ||
467 | progress = true; | ||
468 | } | ||
469 | if ((revents & G_IO_OUT) && node->io_write) { | ||
470 | - aio_context_acquire(ctx); | ||
471 | node->io_write(node->opaque); | ||
472 | - aio_context_release(ctx); | ||
473 | progress = true; | ||
474 | } | ||
475 | |||
476 | -- | ||
477 | 2.9.3 | ||
478 | |||
479 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Paolo Bonzini <pbonzini@redhat.com> | ||
1 | 2 | ||
3 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
4 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
5 | Reviewed-by: Fam Zheng <famz@redhat.com> | ||
6 | Reviewed-by: Daniel P. Berrange <berrange@redhat.com> | ||
7 | Message-id: 20170213135235.12274-15-pbonzini@redhat.com | ||
8 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
9 | --- | ||
10 | block/archipelago.c | 3 +++ | ||
11 | block/blkreplay.c | 2 +- | ||
12 | block/block-backend.c | 6 ++++++ | ||
13 | block/curl.c | 26 ++++++++++++++++++-------- | ||
14 | block/gluster.c | 9 +-------- | ||
15 | block/io.c | 6 +++++- | ||
16 | block/iscsi.c | 6 +++++- | ||
17 | block/linux-aio.c | 15 +++++++++------ | ||
18 | block/nfs.c | 3 ++- | ||
19 | block/null.c | 4 ++++ | ||
20 | block/qed.c | 3 +++ | ||
21 | block/rbd.c | 4 ++++ | ||
22 | dma-helpers.c | 2 ++ | ||
23 | hw/block/virtio-blk.c | 2 ++ | ||
24 | hw/scsi/scsi-bus.c | 2 ++ | ||
25 | util/async.c | 4 ++-- | ||
26 | util/thread-pool.c | 2 ++ | ||
27 | 17 files changed, 71 insertions(+), 28 deletions(-) | ||
28 | |||
29 | diff --git a/block/archipelago.c b/block/archipelago.c | ||
30 | index XXXXXXX..XXXXXXX 100644 | ||
31 | --- a/block/archipelago.c | ||
32 | +++ b/block/archipelago.c | ||
33 | @@ -XXX,XX +XXX,XX @@ static void qemu_archipelago_complete_aio(void *opaque) | ||
34 | { | ||
35 | AIORequestData *reqdata = (AIORequestData *) opaque; | ||
36 | ArchipelagoAIOCB *aio_cb = (ArchipelagoAIOCB *) reqdata->aio_cb; | ||
37 | + AioContext *ctx = bdrv_get_aio_context(aio_cb->common.bs); | ||
38 | |||
39 | + aio_context_acquire(ctx); | ||
40 | aio_cb->common.cb(aio_cb->common.opaque, aio_cb->ret); | ||
41 | + aio_context_release(ctx); | ||
42 | aio_cb->status = 0; | ||
43 | |||
44 | qemu_aio_unref(aio_cb); | ||
45 | diff --git a/block/blkreplay.c b/block/blkreplay.c | ||
46 | index XXXXXXX..XXXXXXX 100755 | ||
47 | --- a/block/blkreplay.c | ||
48 | +++ b/block/blkreplay.c | ||
49 | @@ -XXX,XX +XXX,XX @@ static int64_t blkreplay_getlength(BlockDriverState *bs) | ||
50 | static void blkreplay_bh_cb(void *opaque) | ||
51 | { | ||
52 | Request *req = opaque; | ||
53 | - qemu_coroutine_enter(req->co); | ||
54 | + aio_co_wake(req->co); | ||
55 | qemu_bh_delete(req->bh); | ||
56 | g_free(req); | ||
57 | } | ||
58 | diff --git a/block/block-backend.c b/block/block-backend.c | ||
59 | index XXXXXXX..XXXXXXX 100644 | ||
60 | --- a/block/block-backend.c | ||
61 | +++ b/block/block-backend.c | ||
62 | @@ -XXX,XX +XXX,XX @@ int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags) | ||
63 | static void error_callback_bh(void *opaque) | ||
64 | { | ||
65 | struct BlockBackendAIOCB *acb = opaque; | ||
66 | + AioContext *ctx = bdrv_get_aio_context(acb->common.bs); | ||
67 | |||
68 | bdrv_dec_in_flight(acb->common.bs); | ||
69 | + aio_context_acquire(ctx); | ||
70 | acb->common.cb(acb->common.opaque, acb->ret); | ||
71 | + aio_context_release(ctx); | ||
72 | qemu_aio_unref(acb); | ||
73 | } | ||
74 | |||
75 | @@ -XXX,XX +XXX,XX @@ static void blk_aio_complete(BlkAioEmAIOCB *acb) | ||
76 | static void blk_aio_complete_bh(void *opaque) | ||
77 | { | ||
78 | BlkAioEmAIOCB *acb = opaque; | ||
79 | + AioContext *ctx = bdrv_get_aio_context(acb->common.bs); | ||
80 | |||
81 | assert(acb->has_returned); | ||
82 | + aio_context_acquire(ctx); | ||
83 | blk_aio_complete(acb); | ||
84 | + aio_context_release(ctx); | ||
85 | } | ||
86 | |||
87 | static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes, | ||
88 | diff --git a/block/curl.c b/block/curl.c | ||
89 | index XXXXXXX..XXXXXXX 100644 | ||
90 | --- a/block/curl.c | ||
91 | +++ b/block/curl.c | ||
92 | @@ -XXX,XX +XXX,XX @@ static void curl_readv_bh_cb(void *p) | ||
93 | { | ||
94 | CURLState *state; | ||
95 | int running; | ||
96 | + int ret = -EINPROGRESS; | ||
97 | |||
98 | CURLAIOCB *acb = p; | ||
99 | - BDRVCURLState *s = acb->common.bs->opaque; | ||
100 | + BlockDriverState *bs = acb->common.bs; | ||
101 | + BDRVCURLState *s = bs->opaque; | ||
102 | + AioContext *ctx = bdrv_get_aio_context(bs); | ||
103 | |||
104 | size_t start = acb->sector_num * BDRV_SECTOR_SIZE; | ||
105 | size_t end; | ||
106 | |||
107 | + aio_context_acquire(ctx); | ||
108 | + | ||
109 | // In case we have the requested data already (e.g. read-ahead), | ||
110 | // we can just call the callback and be done. | ||
111 | switch (curl_find_buf(s, start, acb->nb_sectors * BDRV_SECTOR_SIZE, acb)) { | ||
112 | @@ -XXX,XX +XXX,XX @@ static void curl_readv_bh_cb(void *p) | ||
113 | qemu_aio_unref(acb); | ||
114 | // fall through | ||
115 | case FIND_RET_WAIT: | ||
116 | - return; | ||
117 | + goto out; | ||
118 | default: | ||
119 | break; | ||
120 | } | ||
121 | @@ -XXX,XX +XXX,XX @@ static void curl_readv_bh_cb(void *p) | ||
122 | // No cache found, so let's start a new request | ||
123 | state = curl_init_state(acb->common.bs, s); | ||
124 | if (!state) { | ||
125 | - acb->common.cb(acb->common.opaque, -EIO); | ||
126 | - qemu_aio_unref(acb); | ||
127 | - return; | ||
128 | + ret = -EIO; | ||
129 | + goto out; | ||
130 | } | ||
131 | |||
132 | acb->start = 0; | ||
133 | @@ -XXX,XX +XXX,XX @@ static void curl_readv_bh_cb(void *p) | ||
134 | state->orig_buf = g_try_malloc(state->buf_len); | ||
135 | if (state->buf_len && state->orig_buf == NULL) { | ||
136 | curl_clean_state(state); | ||
137 | - acb->common.cb(acb->common.opaque, -ENOMEM); | ||
138 | - qemu_aio_unref(acb); | ||
139 | - return; | ||
140 | + ret = -ENOMEM; | ||
141 | + goto out; | ||
142 | } | ||
143 | state->acb[0] = acb; | ||
144 | |||
145 | @@ -XXX,XX +XXX,XX @@ static void curl_readv_bh_cb(void *p) | ||
146 | |||
147 | /* Tell curl it needs to kick things off */ | ||
148 | curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running); | ||
149 | + | ||
150 | +out: | ||
151 | + if (ret != -EINPROGRESS) { | ||
152 | + acb->common.cb(acb->common.opaque, ret); | ||
153 | + qemu_aio_unref(acb); | ||
154 | + } | ||
155 | + aio_context_release(ctx); | ||
156 | } | ||
157 | |||
158 | static BlockAIOCB *curl_aio_readv(BlockDriverState *bs, | ||
159 | diff --git a/block/gluster.c b/block/gluster.c | ||
160 | index XXXXXXX..XXXXXXX 100644 | ||
161 | --- a/block/gluster.c | ||
162 | +++ b/block/gluster.c | ||
163 | @@ -XXX,XX +XXX,XX @@ static struct glfs *qemu_gluster_init(BlockdevOptionsGluster *gconf, | ||
164 | return qemu_gluster_glfs_init(gconf, errp); | ||
165 | } | ||
166 | |||
167 | -static void qemu_gluster_complete_aio(void *opaque) | ||
168 | -{ | ||
169 | - GlusterAIOCB *acb = (GlusterAIOCB *)opaque; | ||
170 | - | ||
171 | - qemu_coroutine_enter(acb->coroutine); | ||
172 | -} | ||
173 | - | ||
174 | /* | ||
175 | * AIO callback routine called from GlusterFS thread. | ||
176 | */ | ||
177 | @@ -XXX,XX +XXX,XX @@ static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg) | ||
178 | acb->ret = -EIO; /* Partial read/write - fail it */ | ||
179 | } | ||
180 | |||
181 | - aio_bh_schedule_oneshot(acb->aio_context, qemu_gluster_complete_aio, acb); | ||
182 | + aio_co_schedule(acb->aio_context, acb->coroutine); | ||
183 | } | ||
184 | |||
185 | static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags) | ||
186 | diff --git a/block/io.c b/block/io.c | ||
187 | index XXXXXXX..XXXXXXX 100644 | ||
188 | --- a/block/io.c | ||
189 | +++ b/block/io.c | ||
190 | @@ -XXX,XX +XXX,XX @@ static void bdrv_co_drain_bh_cb(void *opaque) | ||
191 | bdrv_dec_in_flight(bs); | ||
192 | bdrv_drained_begin(bs); | ||
193 | data->done = true; | ||
194 | - qemu_coroutine_enter(co); | ||
195 | + aio_co_wake(co); | ||
196 | } | ||
197 | |||
198 | static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs) | ||
199 | @@ -XXX,XX +XXX,XX @@ static void bdrv_co_complete(BlockAIOCBCoroutine *acb) | ||
200 | static void bdrv_co_em_bh(void *opaque) | ||
201 | { | ||
202 | BlockAIOCBCoroutine *acb = opaque; | ||
203 | + BlockDriverState *bs = acb->common.bs; | ||
204 | + AioContext *ctx = bdrv_get_aio_context(bs); | ||
205 | |||
206 | assert(!acb->need_bh); | ||
207 | + aio_context_acquire(ctx); | ||
208 | bdrv_co_complete(acb); | ||
209 | + aio_context_release(ctx); | ||
210 | } | ||
211 | |||
212 | static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb) | ||
213 | diff --git a/block/iscsi.c b/block/iscsi.c | ||
214 | index XXXXXXX..XXXXXXX 100644 | ||
215 | --- a/block/iscsi.c | ||
216 | +++ b/block/iscsi.c | ||
217 | @@ -XXX,XX +XXX,XX @@ static void | ||
218 | iscsi_bh_cb(void *p) | ||
219 | { | ||
220 | IscsiAIOCB *acb = p; | ||
221 | + AioContext *ctx = bdrv_get_aio_context(acb->common.bs); | ||
222 | |||
223 | qemu_bh_delete(acb->bh); | ||
224 | |||
225 | g_free(acb->buf); | ||
226 | acb->buf = NULL; | ||
227 | |||
228 | + aio_context_acquire(ctx); | ||
229 | acb->common.cb(acb->common.opaque, acb->status); | ||
230 | + aio_context_release(ctx); | ||
231 | |||
232 | if (acb->task != NULL) { | ||
233 | scsi_free_scsi_task(acb->task); | ||
234 | @@ -XXX,XX +XXX,XX @@ iscsi_schedule_bh(IscsiAIOCB *acb) | ||
235 | static void iscsi_co_generic_bh_cb(void *opaque) | ||
236 | { | ||
237 | struct IscsiTask *iTask = opaque; | ||
238 | + | ||
239 | iTask->complete = 1; | ||
240 | - qemu_coroutine_enter(iTask->co); | ||
241 | + aio_co_wake(iTask->co); | ||
242 | } | ||
243 | |||
244 | static void iscsi_retry_timer_expired(void *opaque) | ||
245 | diff --git a/block/linux-aio.c b/block/linux-aio.c | ||
246 | index XXXXXXX..XXXXXXX 100644 | ||
247 | --- a/block/linux-aio.c | ||
248 | +++ b/block/linux-aio.c | ||
249 | @@ -XXX,XX +XXX,XX @@ struct LinuxAioState { | ||
250 | io_context_t ctx; | ||
251 | EventNotifier e; | ||
252 | |||
253 | - /* io queue for submit at batch */ | ||
254 | + /* io queue for submit at batch. Protected by AioContext lock. */ | ||
255 | LaioQueue io_q; | ||
256 | |||
257 | - /* I/O completion processing */ | ||
258 | + /* I/O completion processing. Only runs in I/O thread. */ | ||
259 | QEMUBH *completion_bh; | ||
260 | int event_idx; | ||
261 | int event_max; | ||
262 | @@ -XXX,XX +XXX,XX @@ static inline ssize_t io_event_ret(struct io_event *ev) | ||
263 | */ | ||
264 | static void qemu_laio_process_completion(struct qemu_laiocb *laiocb) | ||
265 | { | ||
266 | + LinuxAioState *s = laiocb->ctx; | ||
267 | int ret; | ||
268 | |||
269 | ret = laiocb->ret; | ||
270 | @@ -XXX,XX +XXX,XX @@ static void qemu_laio_process_completion(struct qemu_laiocb *laiocb) | ||
271 | } | ||
272 | |||
273 | laiocb->ret = ret; | ||
274 | + aio_context_acquire(s->aio_context); | ||
275 | if (laiocb->co) { | ||
276 | /* If the coroutine is already entered it must be in ioq_submit() and | ||
277 | * will notice laio->ret has been filled in when it eventually runs | ||
278 | @@ -XXX,XX +XXX,XX @@ static void qemu_laio_process_completion(struct qemu_laiocb *laiocb) | ||
279 | laiocb->common.cb(laiocb->common.opaque, ret); | ||
280 | qemu_aio_unref(laiocb); | ||
281 | } | ||
282 | + aio_context_release(s->aio_context); | ||
283 | } | ||
284 | |||
285 | /** | ||
286 | @@ -XXX,XX +XXX,XX @@ static void qemu_laio_process_completions(LinuxAioState *s) | ||
287 | static void qemu_laio_process_completions_and_submit(LinuxAioState *s) | ||
288 | { | ||
289 | qemu_laio_process_completions(s); | ||
290 | + | ||
291 | + aio_context_acquire(s->aio_context); | ||
292 | if (!s->io_q.plugged && !QSIMPLEQ_EMPTY(&s->io_q.pending)) { | ||
293 | ioq_submit(s); | ||
294 | } | ||
295 | + aio_context_release(s->aio_context); | ||
296 | } | ||
297 | |||
298 | static void qemu_laio_completion_bh(void *opaque) | ||
299 | @@ -XXX,XX +XXX,XX @@ static void qemu_laio_completion_cb(EventNotifier *e) | ||
300 | LinuxAioState *s = container_of(e, LinuxAioState, e); | ||
301 | |||
302 | if (event_notifier_test_and_clear(&s->e)) { | ||
303 | - aio_context_acquire(s->aio_context); | ||
304 | qemu_laio_process_completions_and_submit(s); | ||
305 | - aio_context_release(s->aio_context); | ||
306 | } | ||
307 | } | ||
308 | |||
309 | @@ -XXX,XX +XXX,XX @@ static bool qemu_laio_poll_cb(void *opaque) | ||
310 | return false; | ||
311 | } | ||
312 | |||
313 | - aio_context_acquire(s->aio_context); | ||
314 | qemu_laio_process_completions_and_submit(s); | ||
315 | - aio_context_release(s->aio_context); | ||
316 | return true; | ||
317 | } | ||
318 | |||
319 | @@ -XXX,XX +XXX,XX @@ void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context) | ||
320 | { | ||
321 | aio_set_event_notifier(old_context, &s->e, false, NULL, NULL); | ||
322 | qemu_bh_delete(s->completion_bh); | ||
323 | + s->aio_context = NULL; | ||
324 | } | ||
325 | |||
326 | void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context) | ||
327 | diff --git a/block/nfs.c b/block/nfs.c | ||
328 | index XXXXXXX..XXXXXXX 100644 | ||
329 | --- a/block/nfs.c | ||
330 | +++ b/block/nfs.c | ||
331 | @@ -XXX,XX +XXX,XX @@ static void nfs_co_init_task(BlockDriverState *bs, NFSRPC *task) | ||
332 | static void nfs_co_generic_bh_cb(void *opaque) | ||
333 | { | ||
334 | NFSRPC *task = opaque; | ||
335 | + | ||
336 | task->complete = 1; | ||
337 | - qemu_coroutine_enter(task->co); | ||
338 | + aio_co_wake(task->co); | ||
339 | } | ||
340 | |||
341 | static void | ||
342 | diff --git a/block/null.c b/block/null.c | ||
343 | index XXXXXXX..XXXXXXX 100644 | ||
344 | --- a/block/null.c | ||
345 | +++ b/block/null.c | ||
346 | @@ -XXX,XX +XXX,XX @@ static const AIOCBInfo null_aiocb_info = { | ||
347 | static void null_bh_cb(void *opaque) | ||
348 | { | ||
349 | NullAIOCB *acb = opaque; | ||
350 | + AioContext *ctx = bdrv_get_aio_context(acb->common.bs); | ||
351 | + | ||
352 | + aio_context_acquire(ctx); | ||
353 | acb->common.cb(acb->common.opaque, 0); | ||
354 | + aio_context_release(ctx); | ||
355 | qemu_aio_unref(acb); | ||
356 | } | ||
357 | |||
358 | diff --git a/block/qed.c b/block/qed.c | ||
359 | index XXXXXXX..XXXXXXX 100644 | ||
360 | --- a/block/qed.c | ||
361 | +++ b/block/qed.c | ||
362 | @@ -XXX,XX +XXX,XX @@ static void qed_update_l2_table(BDRVQEDState *s, QEDTable *table, int index, | ||
363 | static void qed_aio_complete_bh(void *opaque) | ||
364 | { | ||
365 | QEDAIOCB *acb = opaque; | ||
366 | + BDRVQEDState *s = acb_to_s(acb); | ||
367 | BlockCompletionFunc *cb = acb->common.cb; | ||
368 | void *user_opaque = acb->common.opaque; | ||
369 | int ret = acb->bh_ret; | ||
370 | @@ -XXX,XX +XXX,XX @@ static void qed_aio_complete_bh(void *opaque) | ||
371 | qemu_aio_unref(acb); | ||
372 | |||
373 | /* Invoke callback */ | ||
374 | + qed_acquire(s); | ||
375 | cb(user_opaque, ret); | ||
376 | + qed_release(s); | ||
377 | } | ||
378 | |||
379 | static void qed_aio_complete(QEDAIOCB *acb, int ret) | ||
380 | diff --git a/block/rbd.c b/block/rbd.c | ||
381 | index XXXXXXX..XXXXXXX 100644 | ||
382 | --- a/block/rbd.c | ||
383 | +++ b/block/rbd.c | ||
384 | @@ -XXX,XX +XXX,XX @@ shutdown: | ||
385 | static void qemu_rbd_complete_aio(RADOSCB *rcb) | ||
386 | { | ||
387 | RBDAIOCB *acb = rcb->acb; | ||
388 | + AioContext *ctx = bdrv_get_aio_context(acb->common.bs); | ||
389 | int64_t r; | ||
390 | |||
391 | r = rcb->ret; | ||
392 | @@ -XXX,XX +XXX,XX @@ static void qemu_rbd_complete_aio(RADOSCB *rcb) | ||
393 | qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size); | ||
394 | } | ||
395 | qemu_vfree(acb->bounce); | ||
396 | + | ||
397 | + aio_context_acquire(ctx); | ||
398 | acb->common.cb(acb->common.opaque, (acb->ret > 0 ? 0 : acb->ret)); | ||
399 | + aio_context_release(ctx); | ||
400 | |||
401 | qemu_aio_unref(acb); | ||
402 | } | ||
403 | diff --git a/dma-helpers.c b/dma-helpers.c | ||
404 | index XXXXXXX..XXXXXXX 100644 | ||
405 | --- a/dma-helpers.c | ||
406 | +++ b/dma-helpers.c | ||
407 | @@ -XXX,XX +XXX,XX @@ static void dma_blk_cb(void *opaque, int ret) | ||
408 | QEMU_ALIGN_DOWN(dbs->iov.size, dbs->align)); | ||
409 | } | ||
410 | |||
411 | + aio_context_acquire(dbs->ctx); | ||
412 | dbs->acb = dbs->io_func(dbs->offset, &dbs->iov, | ||
413 | dma_blk_cb, dbs, dbs->io_func_opaque); | ||
414 | + aio_context_release(dbs->ctx); | ||
415 | assert(dbs->acb); | ||
416 | } | ||
417 | |||
418 | diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c | ||
419 | index XXXXXXX..XXXXXXX 100644 | ||
420 | --- a/hw/block/virtio-blk.c | ||
421 | +++ b/hw/block/virtio-blk.c | ||
422 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_dma_restart_bh(void *opaque) | ||
423 | |||
424 | s->rq = NULL; | ||
425 | |||
426 | + aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); | ||
427 | while (req) { | ||
428 | VirtIOBlockReq *next = req->next; | ||
429 | if (virtio_blk_handle_request(req, &mrb)) { | ||
430 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_dma_restart_bh(void *opaque) | ||
431 | if (mrb.num_reqs) { | ||
432 | virtio_blk_submit_multireq(s->blk, &mrb); | ||
433 | } | ||
434 | + aio_context_release(blk_get_aio_context(s->conf.conf.blk)); | ||
435 | } | ||
436 | |||
437 | static void virtio_blk_dma_restart_cb(void *opaque, int running, | ||
438 | diff --git a/hw/scsi/scsi-bus.c b/hw/scsi/scsi-bus.c | ||
439 | index XXXXXXX..XXXXXXX 100644 | ||
440 | --- a/hw/scsi/scsi-bus.c | ||
441 | +++ b/hw/scsi/scsi-bus.c | ||
442 | @@ -XXX,XX +XXX,XX @@ static void scsi_dma_restart_bh(void *opaque) | ||
443 | qemu_bh_delete(s->bh); | ||
444 | s->bh = NULL; | ||
445 | |||
446 | + aio_context_acquire(blk_get_aio_context(s->conf.blk)); | ||
447 | QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) { | ||
448 | scsi_req_ref(req); | ||
449 | if (req->retry) { | ||
450 | @@ -XXX,XX +XXX,XX @@ static void scsi_dma_restart_bh(void *opaque) | ||
451 | } | ||
452 | scsi_req_unref(req); | ||
453 | } | ||
454 | + aio_context_release(blk_get_aio_context(s->conf.blk)); | ||
455 | } | ||
456 | |||
457 | void scsi_req_retry(SCSIRequest *req) | ||
458 | diff --git a/util/async.c b/util/async.c | ||
459 | index XXXXXXX..XXXXXXX 100644 | ||
460 | --- a/util/async.c | ||
461 | +++ b/util/async.c | ||
462 | @@ -XXX,XX +XXX,XX @@ int aio_bh_poll(AioContext *ctx) | ||
463 | ret = 1; | ||
464 | } | ||
465 | bh->idle = 0; | ||
466 | - aio_context_acquire(ctx); | ||
467 | aio_bh_call(bh); | ||
468 | - aio_context_release(ctx); | ||
469 | } | ||
470 | if (bh->deleted) { | ||
471 | deleted = true; | ||
472 | @@ -XXX,XX +XXX,XX @@ static void co_schedule_bh_cb(void *opaque) | ||
473 | Coroutine *co = QSLIST_FIRST(&straight); | ||
474 | QSLIST_REMOVE_HEAD(&straight, co_scheduled_next); | ||
475 | trace_aio_co_schedule_bh_cb(ctx, co); | ||
476 | + aio_context_acquire(ctx); | ||
477 | qemu_coroutine_enter(co); | ||
478 | + aio_context_release(ctx); | ||
479 | } | ||
480 | } | ||
481 | |||
482 | diff --git a/util/thread-pool.c b/util/thread-pool.c | ||
483 | index XXXXXXX..XXXXXXX 100644 | ||
484 | --- a/util/thread-pool.c | ||
485 | +++ b/util/thread-pool.c | ||
486 | @@ -XXX,XX +XXX,XX @@ static void thread_pool_completion_bh(void *opaque) | ||
487 | ThreadPool *pool = opaque; | ||
488 | ThreadPoolElement *elem, *next; | ||
489 | |||
490 | + aio_context_acquire(pool->ctx); | ||
491 | restart: | ||
492 | QLIST_FOREACH_SAFE(elem, &pool->head, all, next) { | ||
493 | if (elem->state != THREAD_DONE) { | ||
494 | @@ -XXX,XX +XXX,XX @@ restart: | ||
495 | qemu_aio_unref(elem); | ||
496 | } | ||
497 | } | ||
498 | + aio_context_release(pool->ctx); | ||
499 | } | ||
500 | |||
501 | static void thread_pool_cancel(BlockAIOCB *acb) | ||
502 | -- | ||
503 | 2.9.3 | ||
504 | |||
505 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Paolo Bonzini <pbonzini@redhat.com> | ||
1 | 2 | ||
3 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
4 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
5 | Reviewed-by: Fam Zheng <famz@redhat.com> | ||
6 | Reviewed-by: Daniel P. Berrange <berrange@redhat.com> | ||
7 | Message-id: 20170213135235.12274-16-pbonzini@redhat.com | ||
8 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
9 | --- | ||
10 | block/archipelago.c | 3 --- | ||
11 | block/block-backend.c | 7 ------- | ||
12 | block/curl.c | 2 +- | ||
13 | block/io.c | 6 +----- | ||
14 | block/iscsi.c | 3 --- | ||
15 | block/linux-aio.c | 5 +---- | ||
16 | block/mirror.c | 12 +++++++++--- | ||
17 | block/null.c | 8 -------- | ||
18 | block/qed-cluster.c | 2 ++ | ||
19 | block/qed-table.c | 12 ++++++++++-- | ||
20 | block/qed.c | 4 ++-- | ||
21 | block/rbd.c | 4 ---- | ||
22 | block/win32-aio.c | 3 --- | ||
23 | hw/block/virtio-blk.c | 12 +++++++++++- | ||
24 | hw/scsi/scsi-disk.c | 15 +++++++++++++++ | ||
25 | hw/scsi/scsi-generic.c | 20 +++++++++++++++++--- | ||
26 | util/thread-pool.c | 4 +++- | ||
27 | 17 files changed, 72 insertions(+), 50 deletions(-) | ||
28 | |||
29 | diff --git a/block/archipelago.c b/block/archipelago.c | ||
30 | index XXXXXXX..XXXXXXX 100644 | ||
31 | --- a/block/archipelago.c | ||
32 | +++ b/block/archipelago.c | ||
33 | @@ -XXX,XX +XXX,XX @@ static void qemu_archipelago_complete_aio(void *opaque) | ||
34 | { | ||
35 | AIORequestData *reqdata = (AIORequestData *) opaque; | ||
36 | ArchipelagoAIOCB *aio_cb = (ArchipelagoAIOCB *) reqdata->aio_cb; | ||
37 | - AioContext *ctx = bdrv_get_aio_context(aio_cb->common.bs); | ||
38 | |||
39 | - aio_context_acquire(ctx); | ||
40 | aio_cb->common.cb(aio_cb->common.opaque, aio_cb->ret); | ||
41 | - aio_context_release(ctx); | ||
42 | aio_cb->status = 0; | ||
43 | |||
44 | qemu_aio_unref(aio_cb); | ||
45 | diff --git a/block/block-backend.c b/block/block-backend.c | ||
46 | index XXXXXXX..XXXXXXX 100644 | ||
47 | --- a/block/block-backend.c | ||
48 | +++ b/block/block-backend.c | ||
49 | @@ -XXX,XX +XXX,XX @@ int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags) | ||
50 | static void error_callback_bh(void *opaque) | ||
51 | { | ||
52 | struct BlockBackendAIOCB *acb = opaque; | ||
53 | - AioContext *ctx = bdrv_get_aio_context(acb->common.bs); | ||
54 | |||
55 | bdrv_dec_in_flight(acb->common.bs); | ||
56 | - aio_context_acquire(ctx); | ||
57 | acb->common.cb(acb->common.opaque, acb->ret); | ||
58 | - aio_context_release(ctx); | ||
59 | qemu_aio_unref(acb); | ||
60 | } | ||
61 | |||
62 | @@ -XXX,XX +XXX,XX @@ static void blk_aio_complete(BlkAioEmAIOCB *acb) | ||
63 | static void blk_aio_complete_bh(void *opaque) | ||
64 | { | ||
65 | BlkAioEmAIOCB *acb = opaque; | ||
66 | - AioContext *ctx = bdrv_get_aio_context(acb->common.bs); | ||
67 | - | ||
68 | assert(acb->has_returned); | ||
69 | - aio_context_acquire(ctx); | ||
70 | blk_aio_complete(acb); | ||
71 | - aio_context_release(ctx); | ||
72 | } | ||
73 | |||
74 | static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes, | ||
75 | diff --git a/block/curl.c b/block/curl.c | ||
76 | index XXXXXXX..XXXXXXX 100644 | ||
77 | --- a/block/curl.c | ||
78 | +++ b/block/curl.c | ||
79 | @@ -XXX,XX +XXX,XX @@ static void curl_readv_bh_cb(void *p) | ||
80 | curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running); | ||
81 | |||
82 | out: | ||
83 | + aio_context_release(ctx); | ||
84 | if (ret != -EINPROGRESS) { | ||
85 | acb->common.cb(acb->common.opaque, ret); | ||
86 | qemu_aio_unref(acb); | ||
87 | } | ||
88 | - aio_context_release(ctx); | ||
89 | } | ||
90 | |||
91 | static BlockAIOCB *curl_aio_readv(BlockDriverState *bs, | ||
92 | diff --git a/block/io.c b/block/io.c | ||
93 | index XXXXXXX..XXXXXXX 100644 | ||
94 | --- a/block/io.c | ||
95 | +++ b/block/io.c | ||
96 | @@ -XXX,XX +XXX,XX @@ static void bdrv_co_io_em_complete(void *opaque, int ret) | ||
97 | CoroutineIOCompletion *co = opaque; | ||
98 | |||
99 | co->ret = ret; | ||
100 | - qemu_coroutine_enter(co->coroutine); | ||
101 | + aio_co_wake(co->coroutine); | ||
102 | } | ||
103 | |||
104 | static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs, | ||
105 | @@ -XXX,XX +XXX,XX @@ static void bdrv_co_complete(BlockAIOCBCoroutine *acb) | ||
106 | static void bdrv_co_em_bh(void *opaque) | ||
107 | { | ||
108 | BlockAIOCBCoroutine *acb = opaque; | ||
109 | - BlockDriverState *bs = acb->common.bs; | ||
110 | - AioContext *ctx = bdrv_get_aio_context(bs); | ||
111 | |||
112 | assert(!acb->need_bh); | ||
113 | - aio_context_acquire(ctx); | ||
114 | bdrv_co_complete(acb); | ||
115 | - aio_context_release(ctx); | ||
116 | } | ||
117 | |||
118 | static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb) | ||
119 | diff --git a/block/iscsi.c b/block/iscsi.c | ||
120 | index XXXXXXX..XXXXXXX 100644 | ||
121 | --- a/block/iscsi.c | ||
122 | +++ b/block/iscsi.c | ||
123 | @@ -XXX,XX +XXX,XX @@ static void | ||
124 | iscsi_bh_cb(void *p) | ||
125 | { | ||
126 | IscsiAIOCB *acb = p; | ||
127 | - AioContext *ctx = bdrv_get_aio_context(acb->common.bs); | ||
128 | |||
129 | qemu_bh_delete(acb->bh); | ||
130 | |||
131 | g_free(acb->buf); | ||
132 | acb->buf = NULL; | ||
133 | |||
134 | - aio_context_acquire(ctx); | ||
135 | acb->common.cb(acb->common.opaque, acb->status); | ||
136 | - aio_context_release(ctx); | ||
137 | |||
138 | if (acb->task != NULL) { | ||
139 | scsi_free_scsi_task(acb->task); | ||
140 | diff --git a/block/linux-aio.c b/block/linux-aio.c | ||
141 | index XXXXXXX..XXXXXXX 100644 | ||
142 | --- a/block/linux-aio.c | ||
143 | +++ b/block/linux-aio.c | ||
144 | @@ -XXX,XX +XXX,XX @@ static inline ssize_t io_event_ret(struct io_event *ev) | ||
145 | */ | ||
146 | static void qemu_laio_process_completion(struct qemu_laiocb *laiocb) | ||
147 | { | ||
148 | - LinuxAioState *s = laiocb->ctx; | ||
149 | int ret; | ||
150 | |||
151 | ret = laiocb->ret; | ||
152 | @@ -XXX,XX +XXX,XX @@ static void qemu_laio_process_completion(struct qemu_laiocb *laiocb) | ||
153 | } | ||
154 | |||
155 | laiocb->ret = ret; | ||
156 | - aio_context_acquire(s->aio_context); | ||
157 | if (laiocb->co) { | ||
158 | /* If the coroutine is already entered it must be in ioq_submit() and | ||
159 | * will notice laio->ret has been filled in when it eventually runs | ||
160 | @@ -XXX,XX +XXX,XX @@ static void qemu_laio_process_completion(struct qemu_laiocb *laiocb) | ||
161 | * that! | ||
162 | */ | ||
163 | if (!qemu_coroutine_entered(laiocb->co)) { | ||
164 | - qemu_coroutine_enter(laiocb->co); | ||
165 | + aio_co_wake(laiocb->co); | ||
166 | } | ||
167 | } else { | ||
168 | laiocb->common.cb(laiocb->common.opaque, ret); | ||
169 | qemu_aio_unref(laiocb); | ||
170 | } | ||
171 | - aio_context_release(s->aio_context); | ||
172 | } | ||
173 | |||
174 | /** | ||
175 | diff --git a/block/mirror.c b/block/mirror.c | ||
176 | index XXXXXXX..XXXXXXX 100644 | ||
177 | --- a/block/mirror.c | ||
178 | +++ b/block/mirror.c | ||
179 | @@ -XXX,XX +XXX,XX @@ static void mirror_write_complete(void *opaque, int ret) | ||
180 | { | ||
181 | MirrorOp *op = opaque; | ||
182 | MirrorBlockJob *s = op->s; | ||
183 | + | ||
184 | + aio_context_acquire(blk_get_aio_context(s->common.blk)); | ||
185 | if (ret < 0) { | ||
186 | BlockErrorAction action; | ||
187 | |||
188 | @@ -XXX,XX +XXX,XX @@ static void mirror_write_complete(void *opaque, int ret) | ||
189 | } | ||
190 | } | ||
191 | mirror_iteration_done(op, ret); | ||
192 | + aio_context_release(blk_get_aio_context(s->common.blk)); | ||
193 | } | ||
194 | |||
195 | static void mirror_read_complete(void *opaque, int ret) | ||
196 | { | ||
197 | MirrorOp *op = opaque; | ||
198 | MirrorBlockJob *s = op->s; | ||
199 | + | ||
200 | + aio_context_acquire(blk_get_aio_context(s->common.blk)); | ||
201 | if (ret < 0) { | ||
202 | BlockErrorAction action; | ||
203 | |||
204 | @@ -XXX,XX +XXX,XX @@ static void mirror_read_complete(void *opaque, int ret) | ||
205 | } | ||
206 | |||
207 | mirror_iteration_done(op, ret); | ||
208 | - return; | ||
209 | + } else { | ||
210 | + blk_aio_pwritev(s->target, op->sector_num * BDRV_SECTOR_SIZE, &op->qiov, | ||
211 | + 0, mirror_write_complete, op); | ||
212 | } | ||
213 | - blk_aio_pwritev(s->target, op->sector_num * BDRV_SECTOR_SIZE, &op->qiov, | ||
214 | - 0, mirror_write_complete, op); | ||
215 | + aio_context_release(blk_get_aio_context(s->common.blk)); | ||
216 | } | ||
217 | |||
218 | static inline void mirror_clip_sectors(MirrorBlockJob *s, | ||
219 | diff --git a/block/null.c b/block/null.c | ||
220 | index XXXXXXX..XXXXXXX 100644 | ||
221 | --- a/block/null.c | ||
222 | +++ b/block/null.c | ||
223 | @@ -XXX,XX +XXX,XX @@ static const AIOCBInfo null_aiocb_info = { | ||
224 | static void null_bh_cb(void *opaque) | ||
225 | { | ||
226 | NullAIOCB *acb = opaque; | ||
227 | - AioContext *ctx = bdrv_get_aio_context(acb->common.bs); | ||
228 | - | ||
229 | - aio_context_acquire(ctx); | ||
230 | acb->common.cb(acb->common.opaque, 0); | ||
231 | - aio_context_release(ctx); | ||
232 | qemu_aio_unref(acb); | ||
233 | } | ||
234 | |||
235 | static void null_timer_cb(void *opaque) | ||
236 | { | ||
237 | NullAIOCB *acb = opaque; | ||
238 | - AioContext *ctx = bdrv_get_aio_context(acb->common.bs); | ||
239 | - | ||
240 | - aio_context_acquire(ctx); | ||
241 | acb->common.cb(acb->common.opaque, 0); | ||
242 | - aio_context_release(ctx); | ||
243 | timer_deinit(&acb->timer); | ||
244 | qemu_aio_unref(acb); | ||
245 | } | ||
246 | diff --git a/block/qed-cluster.c b/block/qed-cluster.c | ||
247 | index XXXXXXX..XXXXXXX 100644 | ||
248 | --- a/block/qed-cluster.c | ||
249 | +++ b/block/qed-cluster.c | ||
250 | @@ -XXX,XX +XXX,XX @@ static void qed_find_cluster_cb(void *opaque, int ret) | ||
251 | unsigned int index; | ||
252 | unsigned int n; | ||
253 | |||
254 | + qed_acquire(s); | ||
255 | if (ret) { | ||
256 | goto out; | ||
257 | } | ||
258 | @@ -XXX,XX +XXX,XX @@ static void qed_find_cluster_cb(void *opaque, int ret) | ||
259 | |||
260 | out: | ||
261 | find_cluster_cb->cb(find_cluster_cb->opaque, ret, offset, len); | ||
262 | + qed_release(s); | ||
263 | g_free(find_cluster_cb); | ||
264 | } | ||
265 | |||
266 | diff --git a/block/qed-table.c b/block/qed-table.c | ||
267 | index XXXXXXX..XXXXXXX 100644 | ||
268 | --- a/block/qed-table.c | ||
269 | +++ b/block/qed-table.c | ||
270 | @@ -XXX,XX +XXX,XX @@ static void qed_read_table_cb(void *opaque, int ret) | ||
271 | { | ||
272 | QEDReadTableCB *read_table_cb = opaque; | ||
273 | QEDTable *table = read_table_cb->table; | ||
274 | + BDRVQEDState *s = read_table_cb->s; | ||
275 | int noffsets = read_table_cb->qiov.size / sizeof(uint64_t); | ||
276 | int i; | ||
277 | |||
278 | @@ -XXX,XX +XXX,XX @@ static void qed_read_table_cb(void *opaque, int ret) | ||
279 | } | ||
280 | |||
281 | /* Byteswap offsets */ | ||
282 | + qed_acquire(s); | ||
283 | for (i = 0; i < noffsets; i++) { | ||
284 | table->offsets[i] = le64_to_cpu(table->offsets[i]); | ||
285 | } | ||
286 | + qed_release(s); | ||
287 | |||
288 | out: | ||
289 | /* Completion */ | ||
290 | - trace_qed_read_table_cb(read_table_cb->s, read_table_cb->table, ret); | ||
291 | + trace_qed_read_table_cb(s, read_table_cb->table, ret); | ||
292 | gencb_complete(&read_table_cb->gencb, ret); | ||
293 | } | ||
294 | |||
295 | @@ -XXX,XX +XXX,XX @@ typedef struct { | ||
296 | static void qed_write_table_cb(void *opaque, int ret) | ||
297 | { | ||
298 | QEDWriteTableCB *write_table_cb = opaque; | ||
299 | + BDRVQEDState *s = write_table_cb->s; | ||
300 | |||
301 | - trace_qed_write_table_cb(write_table_cb->s, | ||
302 | + trace_qed_write_table_cb(s, | ||
303 | write_table_cb->orig_table, | ||
304 | write_table_cb->flush, | ||
305 | ret); | ||
306 | @@ -XXX,XX +XXX,XX @@ static void qed_write_table_cb(void *opaque, int ret) | ||
307 | if (write_table_cb->flush) { | ||
308 | /* We still need to flush first */ | ||
309 | write_table_cb->flush = false; | ||
310 | + qed_acquire(s); | ||
311 | bdrv_aio_flush(write_table_cb->s->bs, qed_write_table_cb, | ||
312 | write_table_cb); | ||
313 | + qed_release(s); | ||
314 | return; | ||
315 | } | ||
316 | |||
317 | @@ -XXX,XX +XXX,XX @@ static void qed_read_l2_table_cb(void *opaque, int ret) | ||
318 | CachedL2Table *l2_table = request->l2_table; | ||
319 | uint64_t l2_offset = read_l2_table_cb->l2_offset; | ||
320 | |||
321 | + qed_acquire(s); | ||
322 | if (ret) { | ||
323 | /* can't trust loaded L2 table anymore */ | ||
324 | qed_unref_l2_cache_entry(l2_table); | ||
325 | @@ -XXX,XX +XXX,XX @@ static void qed_read_l2_table_cb(void *opaque, int ret) | ||
326 | request->l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset); | ||
327 | assert(request->l2_table != NULL); | ||
328 | } | ||
329 | + qed_release(s); | ||
330 | |||
331 | gencb_complete(&read_l2_table_cb->gencb, ret); | ||
332 | } | ||
333 | diff --git a/block/qed.c b/block/qed.c | ||
334 | index XXXXXXX..XXXXXXX 100644 | ||
335 | --- a/block/qed.c | ||
336 | +++ b/block/qed.c | ||
337 | @@ -XXX,XX +XXX,XX @@ static void qed_is_allocated_cb(void *opaque, int ret, uint64_t offset, size_t l | ||
338 | } | ||
339 | |||
340 | if (cb->co) { | ||
341 | - qemu_coroutine_enter(cb->co); | ||
342 | + aio_co_wake(cb->co); | ||
343 | } | ||
344 | } | ||
345 | |||
346 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn qed_co_pwrite_zeroes_cb(void *opaque, int ret) | ||
347 | cb->done = true; | ||
348 | cb->ret = ret; | ||
349 | if (cb->co) { | ||
350 | - qemu_coroutine_enter(cb->co); | ||
351 | + aio_co_wake(cb->co); | ||
352 | } | ||
353 | } | ||
354 | |||
355 | diff --git a/block/rbd.c b/block/rbd.c | ||
356 | index XXXXXXX..XXXXXXX 100644 | ||
357 | --- a/block/rbd.c | ||
358 | +++ b/block/rbd.c | ||
359 | @@ -XXX,XX +XXX,XX @@ shutdown: | ||
360 | static void qemu_rbd_complete_aio(RADOSCB *rcb) | ||
361 | { | ||
362 | RBDAIOCB *acb = rcb->acb; | ||
363 | - AioContext *ctx = bdrv_get_aio_context(acb->common.bs); | ||
364 | int64_t r; | ||
365 | |||
366 | r = rcb->ret; | ||
367 | @@ -XXX,XX +XXX,XX @@ static void qemu_rbd_complete_aio(RADOSCB *rcb) | ||
368 | qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size); | ||
369 | } | ||
370 | qemu_vfree(acb->bounce); | ||
371 | - | ||
372 | - aio_context_acquire(ctx); | ||
373 | acb->common.cb(acb->common.opaque, (acb->ret > 0 ? 0 : acb->ret)); | ||
374 | - aio_context_release(ctx); | ||
375 | |||
376 | qemu_aio_unref(acb); | ||
377 | } | ||
378 | diff --git a/block/win32-aio.c b/block/win32-aio.c | ||
379 | index XXXXXXX..XXXXXXX 100644 | ||
380 | --- a/block/win32-aio.c | ||
381 | +++ b/block/win32-aio.c | ||
382 | @@ -XXX,XX +XXX,XX @@ static void win32_aio_process_completion(QEMUWin32AIOState *s, | ||
383 | qemu_vfree(waiocb->buf); | ||
384 | } | ||
385 | |||
386 | - | ||
387 | - aio_context_acquire(s->aio_ctx); | ||
388 | waiocb->common.cb(waiocb->common.opaque, ret); | ||
389 | - aio_context_release(s->aio_ctx); | ||
390 | qemu_aio_unref(waiocb); | ||
391 | } | ||
392 | |||
393 | diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c | ||
394 | index XXXXXXX..XXXXXXX 100644 | ||
395 | --- a/hw/block/virtio-blk.c | ||
396 | +++ b/hw/block/virtio-blk.c | ||
397 | @@ -XXX,XX +XXX,XX @@ static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error, | ||
398 | static void virtio_blk_rw_complete(void *opaque, int ret) | ||
399 | { | ||
400 | VirtIOBlockReq *next = opaque; | ||
401 | + VirtIOBlock *s = next->dev; | ||
402 | |||
403 | + aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); | ||
404 | while (next) { | ||
405 | VirtIOBlockReq *req = next; | ||
406 | next = req->mr_next; | ||
407 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_rw_complete(void *opaque, int ret) | ||
408 | block_acct_done(blk_get_stats(req->dev->blk), &req->acct); | ||
409 | virtio_blk_free_request(req); | ||
410 | } | ||
411 | + aio_context_release(blk_get_aio_context(s->conf.conf.blk)); | ||
412 | } | ||
413 | |||
414 | static void virtio_blk_flush_complete(void *opaque, int ret) | ||
415 | { | ||
416 | VirtIOBlockReq *req = opaque; | ||
417 | + VirtIOBlock *s = req->dev; | ||
418 | |||
419 | + aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); | ||
420 | if (ret) { | ||
421 | if (virtio_blk_handle_rw_error(req, -ret, 0)) { | ||
422 | - return; | ||
423 | + goto out; | ||
424 | } | ||
425 | } | ||
426 | |||
427 | virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); | ||
428 | block_acct_done(blk_get_stats(req->dev->blk), &req->acct); | ||
429 | virtio_blk_free_request(req); | ||
430 | + | ||
431 | +out: | ||
432 | + aio_context_release(blk_get_aio_context(s->conf.conf.blk)); | ||
433 | } | ||
434 | |||
435 | #ifdef __linux__ | ||
436 | @@ -XXX,XX +XXX,XX @@ static void virtio_blk_ioctl_complete(void *opaque, int status) | ||
437 | virtio_stl_p(vdev, &scsi->data_len, hdr->dxfer_len); | ||
438 | |||
439 | out: | ||
440 | + aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); | ||
441 | virtio_blk_req_complete(req, status); | ||
442 | virtio_blk_free_request(req); | ||
443 | + aio_context_release(blk_get_aio_context(s->conf.conf.blk)); | ||
444 | g_free(ioctl_req); | ||
445 | } | ||
446 | |||
447 | diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c | ||
448 | index XXXXXXX..XXXXXXX 100644 | ||
449 | --- a/hw/scsi/scsi-disk.c | ||
450 | +++ b/hw/scsi/scsi-disk.c | ||
451 | @@ -XXX,XX +XXX,XX @@ static void scsi_aio_complete(void *opaque, int ret) | ||
452 | |||
453 | assert(r->req.aiocb != NULL); | ||
454 | r->req.aiocb = NULL; | ||
455 | + aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); | ||
456 | if (scsi_disk_req_check_error(r, ret, true)) { | ||
457 | goto done; | ||
458 | } | ||
459 | @@ -XXX,XX +XXX,XX @@ static void scsi_aio_complete(void *opaque, int ret) | ||
460 | scsi_req_complete(&r->req, GOOD); | ||
461 | |||
462 | done: | ||
463 | + aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); | ||
464 | scsi_req_unref(&r->req); | ||
465 | } | ||
466 | |||
467 | @@ -XXX,XX +XXX,XX @@ static void scsi_dma_complete(void *opaque, int ret) | ||
468 | assert(r->req.aiocb != NULL); | ||
469 | r->req.aiocb = NULL; | ||
470 | |||
471 | + aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); | ||
472 | if (ret < 0) { | ||
473 | block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); | ||
474 | } else { | ||
475 | block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); | ||
476 | } | ||
477 | scsi_dma_complete_noio(r, ret); | ||
478 | + aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); | ||
479 | } | ||
480 | |||
481 | static void scsi_read_complete(void * opaque, int ret) | ||
482 | @@ -XXX,XX +XXX,XX @@ static void scsi_read_complete(void * opaque, int ret) | ||
483 | |||
484 | assert(r->req.aiocb != NULL); | ||
485 | r->req.aiocb = NULL; | ||
486 | + aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); | ||
487 | if (scsi_disk_req_check_error(r, ret, true)) { | ||
488 | goto done; | ||
489 | } | ||
490 | @@ -XXX,XX +XXX,XX @@ static void scsi_read_complete(void * opaque, int ret) | ||
491 | |||
492 | done: | ||
493 | scsi_req_unref(&r->req); | ||
494 | + aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); | ||
495 | } | ||
496 | |||
497 | /* Actually issue a read to the block device. */ | ||
498 | @@ -XXX,XX +XXX,XX @@ static void scsi_do_read_cb(void *opaque, int ret) | ||
499 | assert (r->req.aiocb != NULL); | ||
500 | r->req.aiocb = NULL; | ||
501 | |||
502 | + aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); | ||
503 | if (ret < 0) { | ||
504 | block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); | ||
505 | } else { | ||
506 | block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); | ||
507 | } | ||
508 | scsi_do_read(opaque, ret); | ||
509 | + aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); | ||
510 | } | ||
511 | |||
512 | /* Read more data from scsi device into buffer. */ | ||
513 | @@ -XXX,XX +XXX,XX @@ static void scsi_write_complete(void * opaque, int ret) | ||
514 | assert (r->req.aiocb != NULL); | ||
515 | r->req.aiocb = NULL; | ||
516 | |||
517 | + aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); | ||
518 | if (ret < 0) { | ||
519 | block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); | ||
520 | } else { | ||
521 | block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); | ||
522 | } | ||
523 | scsi_write_complete_noio(r, ret); | ||
524 | + aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); | ||
525 | } | ||
526 | |||
527 | static void scsi_write_data(SCSIRequest *req) | ||
528 | @@ -XXX,XX +XXX,XX @@ static void scsi_unmap_complete(void *opaque, int ret) | ||
529 | { | ||
530 | UnmapCBData *data = opaque; | ||
531 | SCSIDiskReq *r = data->r; | ||
532 | + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); | ||
533 | |||
534 | assert(r->req.aiocb != NULL); | ||
535 | r->req.aiocb = NULL; | ||
536 | |||
537 | + aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); | ||
538 | scsi_unmap_complete_noio(data, ret); | ||
539 | + aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); | ||
540 | } | ||
541 | |||
542 | static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf) | ||
543 | @@ -XXX,XX +XXX,XX @@ static void scsi_write_same_complete(void *opaque, int ret) | ||
544 | |||
545 | assert(r->req.aiocb != NULL); | ||
546 | r->req.aiocb = NULL; | ||
547 | + aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); | ||
548 | if (scsi_disk_req_check_error(r, ret, true)) { | ||
549 | goto done; | ||
550 | } | ||
551 | @@ -XXX,XX +XXX,XX @@ done: | ||
552 | scsi_req_unref(&r->req); | ||
553 | qemu_vfree(data->iov.iov_base); | ||
554 | g_free(data); | ||
555 | + aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); | ||
556 | } | ||
557 | |||
558 | static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf) | ||
559 | diff --git a/hw/scsi/scsi-generic.c b/hw/scsi/scsi-generic.c | ||
560 | index XXXXXXX..XXXXXXX 100644 | ||
561 | --- a/hw/scsi/scsi-generic.c | ||
562 | +++ b/hw/scsi/scsi-generic.c | ||
563 | @@ -XXX,XX +XXX,XX @@ done: | ||
564 | static void scsi_command_complete(void *opaque, int ret) | ||
565 | { | ||
566 | SCSIGenericReq *r = (SCSIGenericReq *)opaque; | ||
567 | + SCSIDevice *s = r->req.dev; | ||
568 | |||
569 | assert(r->req.aiocb != NULL); | ||
570 | r->req.aiocb = NULL; | ||
571 | + | ||
572 | + aio_context_acquire(blk_get_aio_context(s->conf.blk)); | ||
573 | scsi_command_complete_noio(r, ret); | ||
574 | + aio_context_release(blk_get_aio_context(s->conf.blk)); | ||
575 | } | ||
576 | |||
577 | static int execute_command(BlockBackend *blk, | ||
578 | @@ -XXX,XX +XXX,XX @@ static void scsi_read_complete(void * opaque, int ret) | ||
579 | assert(r->req.aiocb != NULL); | ||
580 | r->req.aiocb = NULL; | ||
581 | |||
582 | + aio_context_acquire(blk_get_aio_context(s->conf.blk)); | ||
583 | + | ||
584 | if (ret || r->req.io_canceled) { | ||
585 | scsi_command_complete_noio(r, ret); | ||
586 | - return; | ||
587 | + goto done; | ||
588 | } | ||
589 | |||
590 | len = r->io_header.dxfer_len - r->io_header.resid; | ||
591 | @@ -XXX,XX +XXX,XX @@ static void scsi_read_complete(void * opaque, int ret) | ||
592 | r->len = -1; | ||
593 | if (len == 0) { | ||
594 | scsi_command_complete_noio(r, 0); | ||
595 | - return; | ||
596 | + goto done; | ||
597 | } | ||
598 | |||
599 | /* Snoop READ CAPACITY output to set the blocksize. */ | ||
600 | @@ -XXX,XX +XXX,XX @@ static void scsi_read_complete(void * opaque, int ret) | ||
601 | } | ||
602 | scsi_req_data(&r->req, len); | ||
603 | scsi_req_unref(&r->req); | ||
604 | + | ||
605 | +done: | ||
606 | + aio_context_release(blk_get_aio_context(s->conf.blk)); | ||
607 | } | ||
608 | |||
609 | /* Read more data from scsi device into buffer. */ | ||
610 | @@ -XXX,XX +XXX,XX @@ static void scsi_write_complete(void * opaque, int ret) | ||
611 | assert(r->req.aiocb != NULL); | ||
612 | r->req.aiocb = NULL; | ||
613 | |||
614 | + aio_context_acquire(blk_get_aio_context(s->conf.blk)); | ||
615 | + | ||
616 | if (ret || r->req.io_canceled) { | ||
617 | scsi_command_complete_noio(r, ret); | ||
618 | - return; | ||
619 | + goto done; | ||
620 | } | ||
621 | |||
622 | if (r->req.cmd.buf[0] == MODE_SELECT && r->req.cmd.buf[4] == 12 && | ||
623 | @@ -XXX,XX +XXX,XX @@ static void scsi_write_complete(void * opaque, int ret) | ||
624 | } | ||
625 | |||
626 | scsi_command_complete_noio(r, ret); | ||
627 | + | ||
628 | +done: | ||
629 | + aio_context_release(blk_get_aio_context(s->conf.blk)); | ||
630 | } | ||
631 | |||
632 | /* Write data to a scsi device. Returns nonzero on failure. | ||
633 | diff --git a/util/thread-pool.c b/util/thread-pool.c | ||
634 | index XXXXXXX..XXXXXXX 100644 | ||
635 | --- a/util/thread-pool.c | ||
636 | +++ b/util/thread-pool.c | ||
637 | @@ -XXX,XX +XXX,XX @@ restart: | ||
638 | */ | ||
639 | qemu_bh_schedule(pool->completion_bh); | ||
640 | |||
641 | + aio_context_release(pool->ctx); | ||
642 | elem->common.cb(elem->common.opaque, elem->ret); | ||
643 | + aio_context_acquire(pool->ctx); | ||
644 | qemu_aio_unref(elem); | ||
645 | goto restart; | ||
646 | } else { | ||
647 | @@ -XXX,XX +XXX,XX @@ static void thread_pool_co_cb(void *opaque, int ret) | ||
648 | ThreadPoolCo *co = opaque; | ||
649 | |||
650 | co->ret = ret; | ||
651 | - qemu_coroutine_enter(co->co); | ||
652 | + aio_co_wake(co->co); | ||
653 | } | ||
654 | |||
655 | int coroutine_fn thread_pool_submit_co(ThreadPool *pool, ThreadPoolFunc *func, | ||
656 | -- | ||
657 | 2.9.3 | ||
658 | |||
659 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Paolo Bonzini <pbonzini@redhat.com> | ||
1 | 2 | ||
3 | This patch prepares for the removal of unnecessary lockcnt inc/dec pairs. | ||
4 | Extract the dispatching loop for file descriptor handlers into a new | ||
5 | function aio_dispatch_handlers, and then inline aio_dispatch into | ||
6 | aio_poll. | ||
7 | |||
8 | aio_dispatch can now become void. | ||
9 | |||
10 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
11 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
12 | Reviewed-by: Fam Zheng <famz@redhat.com> | ||
13 | Reviewed-by: Daniel P. Berrange <berrange@redhat.com> | ||
14 | Message-id: 20170213135235.12274-17-pbonzini@redhat.com | ||
15 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
16 | --- | ||
17 | include/block/aio.h | 6 +----- | ||
18 | util/aio-posix.c | 44 ++++++++++++++------------------------------ | ||
19 | util/aio-win32.c | 13 ++++--------- | ||
20 | util/async.c | 2 +- | ||
21 | 4 files changed, 20 insertions(+), 45 deletions(-) | ||
22 | |||
23 | diff --git a/include/block/aio.h b/include/block/aio.h | ||
24 | index XXXXXXX..XXXXXXX 100644 | ||
25 | --- a/include/block/aio.h | ||
26 | +++ b/include/block/aio.h | ||
27 | @@ -XXX,XX +XXX,XX @@ bool aio_pending(AioContext *ctx); | ||
28 | /* Dispatch any pending callbacks from the GSource attached to the AioContext. | ||
29 | * | ||
30 | * This is used internally in the implementation of the GSource. | ||
31 | - * | ||
32 | - * @dispatch_fds: true to process fds, false to skip them | ||
33 | - * (can be used as an optimization by callers that know there | ||
34 | - * are no fds ready) | ||
35 | */ | ||
36 | -bool aio_dispatch(AioContext *ctx, bool dispatch_fds); | ||
37 | +void aio_dispatch(AioContext *ctx); | ||
38 | |||
39 | /* Progress in completing AIO work to occur. This can issue new pending | ||
40 | * aio as a result of executing I/O completion or bh callbacks. | ||
41 | diff --git a/util/aio-posix.c b/util/aio-posix.c | ||
42 | index XXXXXXX..XXXXXXX 100644 | ||
43 | --- a/util/aio-posix.c | ||
44 | +++ b/util/aio-posix.c | ||
45 | @@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx) | ||
46 | AioHandler *node, *tmp; | ||
47 | bool progress = false; | ||
48 | |||
49 | - /* | ||
50 | - * We have to walk very carefully in case aio_set_fd_handler is | ||
51 | - * called while we're walking. | ||
52 | - */ | ||
53 | - qemu_lockcnt_inc(&ctx->list_lock); | ||
54 | - | ||
55 | QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) { | ||
56 | int revents; | ||
57 | |||
58 | @@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx) | ||
59 | } | ||
60 | } | ||
61 | |||
62 | - qemu_lockcnt_dec(&ctx->list_lock); | ||
63 | return progress; | ||
64 | } | ||
65 | |||
66 | -/* | ||
67 | - * Note that dispatch_fds == false has the side-effect of post-poning the | ||
68 | - * freeing of deleted handlers. | ||
69 | - */ | ||
70 | -bool aio_dispatch(AioContext *ctx, bool dispatch_fds) | ||
71 | +void aio_dispatch(AioContext *ctx) | ||
72 | { | ||
73 | - bool progress; | ||
74 | + aio_bh_poll(ctx); | ||
75 | |||
76 | - /* | ||
77 | - * If there are callbacks left that have been queued, we need to call them. | ||
78 | - * Do not call select in this case, because it is possible that the caller | ||
79 | - * does not need a complete flush (as is the case for aio_poll loops). | ||
80 | - */ | ||
81 | - progress = aio_bh_poll(ctx); | ||
82 | + qemu_lockcnt_inc(&ctx->list_lock); | ||
83 | + aio_dispatch_handlers(ctx); | ||
84 | + qemu_lockcnt_dec(&ctx->list_lock); | ||
85 | |||
86 | - if (dispatch_fds) { | ||
87 | - progress |= aio_dispatch_handlers(ctx); | ||
88 | - } | ||
89 | - | ||
90 | - /* Run our timers */ | ||
91 | - progress |= timerlistgroup_run_timers(&ctx->tlg); | ||
92 | - | ||
93 | - return progress; | ||
94 | + timerlistgroup_run_timers(&ctx->tlg); | ||
95 | } | ||
96 | |||
97 | /* These thread-local variables are used only in a small part of aio_poll | ||
98 | @@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking) | ||
99 | npfd = 0; | ||
100 | qemu_lockcnt_dec(&ctx->list_lock); | ||
101 | |||
102 | - /* Run dispatch even if there were no readable fds to run timers */ | ||
103 | - if (aio_dispatch(ctx, ret > 0)) { | ||
104 | - progress = true; | ||
105 | + progress |= aio_bh_poll(ctx); | ||
106 | + | ||
107 | + if (ret > 0) { | ||
108 | + qemu_lockcnt_inc(&ctx->list_lock); | ||
109 | + progress |= aio_dispatch_handlers(ctx); | ||
110 | + qemu_lockcnt_dec(&ctx->list_lock); | ||
111 | } | ||
112 | |||
113 | + progress |= timerlistgroup_run_timers(&ctx->tlg); | ||
114 | + | ||
115 | return progress; | ||
116 | } | ||
117 | |||
118 | diff --git a/util/aio-win32.c b/util/aio-win32.c | ||
119 | index XXXXXXX..XXXXXXX 100644 | ||
120 | --- a/util/aio-win32.c | ||
121 | +++ b/util/aio-win32.c | ||
122 | @@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event) | ||
123 | return progress; | ||
124 | } | ||
125 | |||
126 | -bool aio_dispatch(AioContext *ctx, bool dispatch_fds) | ||
127 | +void aio_dispatch(AioContext *ctx) | ||
128 | { | ||
129 | - bool progress; | ||
130 | - | ||
131 | - progress = aio_bh_poll(ctx); | ||
132 | - if (dispatch_fds) { | ||
133 | - progress |= aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE); | ||
134 | - } | ||
135 | - progress |= timerlistgroup_run_timers(&ctx->tlg); | ||
136 | - return progress; | ||
137 | + aio_bh_poll(ctx); | ||
138 | + aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE); | ||
139 | + timerlistgroup_run_timers(&ctx->tlg); | ||
140 | } | ||
141 | |||
142 | bool aio_poll(AioContext *ctx, bool blocking) | ||
143 | diff --git a/util/async.c b/util/async.c | ||
144 | index XXXXXXX..XXXXXXX 100644 | ||
145 | --- a/util/async.c | ||
146 | +++ b/util/async.c | ||
147 | @@ -XXX,XX +XXX,XX @@ aio_ctx_dispatch(GSource *source, | ||
148 | AioContext *ctx = (AioContext *) source; | ||
149 | |||
150 | assert(callback == NULL); | ||
151 | - aio_dispatch(ctx, true); | ||
152 | + aio_dispatch(ctx); | ||
153 | return true; | ||
154 | } | ||
155 | |||
156 | -- | ||
157 | 2.9.3 | ||
158 | |||
159 | diff view generated by jsdifflib |
1 | From: Fabiano Rosas <farosas@suse.de> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | Having a name in the source helps with debugging core dumps when one | 3 | Pull the increment/decrement pair out of aio_bh_poll and into the |
4 | might not have access to TLS data to cross-reference AioContexts with | 4 | callers. |
5 | their addresses. | ||
6 | 5 | ||
7 | Signed-off-by: Fabiano Rosas <farosas@suse.de> | 6 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> |
8 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | 7 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> |
9 | Message-id: 20230905180359.14083-1-farosas@suse.de | 8 | Reviewed-by: Fam Zheng <famz@redhat.com> |
9 | Reviewed-by: Daniel P. Berrange <berrange@redhat.com> | ||
10 | Message-id: 20170213135235.12274-18-pbonzini@redhat.com | ||
10 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
11 | --- | 12 | --- |
12 | iothread.c | 14 ++++++++------ | 13 | util/aio-posix.c | 8 +++----- |
13 | 1 file changed, 8 insertions(+), 6 deletions(-) | 14 | util/aio-win32.c | 8 ++++---- |
15 | util/async.c | 12 ++++++------ | ||
16 | 3 files changed, 13 insertions(+), 15 deletions(-) | ||
14 | 17 | ||
15 | diff --git a/iothread.c b/iothread.c | 18 | diff --git a/util/aio-posix.c b/util/aio-posix.c |
16 | index XXXXXXX..XXXXXXX 100644 | 19 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/iothread.c | 20 | --- a/util/aio-posix.c |
18 | +++ b/iothread.c | 21 | +++ b/util/aio-posix.c |
19 | @@ -XXX,XX +XXX,XX @@ static void iothread_instance_finalize(Object *obj) | 22 | @@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx) |
20 | qemu_sem_destroy(&iothread->init_done_sem); | 23 | |
24 | void aio_dispatch(AioContext *ctx) | ||
25 | { | ||
26 | + qemu_lockcnt_inc(&ctx->list_lock); | ||
27 | aio_bh_poll(ctx); | ||
28 | - | ||
29 | - qemu_lockcnt_inc(&ctx->list_lock); | ||
30 | aio_dispatch_handlers(ctx); | ||
31 | qemu_lockcnt_dec(&ctx->list_lock); | ||
32 | |||
33 | @@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking) | ||
34 | } | ||
35 | |||
36 | npfd = 0; | ||
37 | - qemu_lockcnt_dec(&ctx->list_lock); | ||
38 | |||
39 | progress |= aio_bh_poll(ctx); | ||
40 | |||
41 | if (ret > 0) { | ||
42 | - qemu_lockcnt_inc(&ctx->list_lock); | ||
43 | progress |= aio_dispatch_handlers(ctx); | ||
44 | - qemu_lockcnt_dec(&ctx->list_lock); | ||
45 | } | ||
46 | |||
47 | + qemu_lockcnt_dec(&ctx->list_lock); | ||
48 | + | ||
49 | progress |= timerlistgroup_run_timers(&ctx->tlg); | ||
50 | |||
51 | return progress; | ||
52 | diff --git a/util/aio-win32.c b/util/aio-win32.c | ||
53 | index XXXXXXX..XXXXXXX 100644 | ||
54 | --- a/util/aio-win32.c | ||
55 | +++ b/util/aio-win32.c | ||
56 | @@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event) | ||
57 | bool progress = false; | ||
58 | AioHandler *tmp; | ||
59 | |||
60 | - qemu_lockcnt_inc(&ctx->list_lock); | ||
61 | - | ||
62 | /* | ||
63 | * We have to walk very carefully in case aio_set_fd_handler is | ||
64 | * called while we're walking. | ||
65 | @@ -XXX,XX +XXX,XX @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event) | ||
66 | } | ||
67 | } | ||
68 | |||
69 | - qemu_lockcnt_dec(&ctx->list_lock); | ||
70 | return progress; | ||
21 | } | 71 | } |
22 | 72 | ||
23 | -static void iothread_init_gcontext(IOThread *iothread) | 73 | void aio_dispatch(AioContext *ctx) |
24 | +static void iothread_init_gcontext(IOThread *iothread, const char *thread_name) | ||
25 | { | 74 | { |
26 | GSource *source; | 75 | + qemu_lockcnt_inc(&ctx->list_lock); |
27 | + g_autofree char *name = g_strdup_printf("%s aio-context", thread_name); | 76 | aio_bh_poll(ctx); |
28 | 77 | aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE); | |
29 | iothread->worker_context = g_main_context_new(); | 78 | + qemu_lockcnt_dec(&ctx->list_lock); |
30 | source = aio_get_g_source(iothread_get_aio_context(iothread)); | 79 | timerlistgroup_run_timers(&ctx->tlg); |
31 | + g_source_set_name(source, name); | 80 | } |
32 | g_source_attach(source, iothread->worker_context); | 81 | |
33 | g_source_unref(source); | 82 | @@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking) |
34 | iothread->main_loop = g_main_loop_new(iothread->worker_context, TRUE); | 83 | } |
35 | @@ -XXX,XX +XXX,XX @@ static void iothread_init(EventLoopBase *base, Error **errp) | 84 | } |
85 | |||
86 | - qemu_lockcnt_dec(&ctx->list_lock); | ||
87 | first = true; | ||
88 | |||
89 | /* ctx->notifier is always registered. */ | ||
90 | @@ -XXX,XX +XXX,XX @@ bool aio_poll(AioContext *ctx, bool blocking) | ||
91 | progress |= aio_dispatch_handlers(ctx, event); | ||
92 | } while (count > 0); | ||
93 | |||
94 | + qemu_lockcnt_dec(&ctx->list_lock); | ||
95 | + | ||
96 | progress |= timerlistgroup_run_timers(&ctx->tlg); | ||
97 | return progress; | ||
98 | } | ||
99 | diff --git a/util/async.c b/util/async.c | ||
100 | index XXXXXXX..XXXXXXX 100644 | ||
101 | --- a/util/async.c | ||
102 | +++ b/util/async.c | ||
103 | @@ -XXX,XX +XXX,XX @@ void aio_bh_call(QEMUBH *bh) | ||
104 | bh->cb(bh->opaque); | ||
105 | } | ||
106 | |||
107 | -/* Multiple occurrences of aio_bh_poll cannot be called concurrently */ | ||
108 | +/* Multiple occurrences of aio_bh_poll cannot be called concurrently. | ||
109 | + * The count in ctx->list_lock is incremented before the call, and is | ||
110 | + * not affected by the call. | ||
111 | + */ | ||
112 | int aio_bh_poll(AioContext *ctx) | ||
36 | { | 113 | { |
37 | Error *local_error = NULL; | 114 | QEMUBH *bh, **bhp, *next; |
38 | IOThread *iothread = IOTHREAD(base); | 115 | int ret; |
39 | - char *thread_name; | 116 | bool deleted = false; |
40 | + g_autofree char *thread_name = NULL; | 117 | |
41 | 118 | - qemu_lockcnt_inc(&ctx->list_lock); | |
42 | iothread->stopping = false; | 119 | - |
43 | iothread->running = true; | 120 | ret = 0; |
44 | @@ -XXX,XX +XXX,XX @@ static void iothread_init(EventLoopBase *base, Error **errp) | 121 | for (bh = atomic_rcu_read(&ctx->first_bh); bh; bh = next) { |
45 | return; | 122 | next = atomic_rcu_read(&bh->next); |
123 | @@ -XXX,XX +XXX,XX @@ int aio_bh_poll(AioContext *ctx) | ||
124 | |||
125 | /* remove deleted bhs */ | ||
126 | if (!deleted) { | ||
127 | - qemu_lockcnt_dec(&ctx->list_lock); | ||
128 | return ret; | ||
46 | } | 129 | } |
47 | 130 | ||
48 | + thread_name = g_strdup_printf("IO %s", | 131 | - if (qemu_lockcnt_dec_and_lock(&ctx->list_lock)) { |
49 | + object_get_canonical_path_component(OBJECT(base))); | 132 | + if (qemu_lockcnt_dec_if_lock(&ctx->list_lock)) { |
50 | + | 133 | bhp = &ctx->first_bh; |
51 | /* | 134 | while (*bhp) { |
52 | * Init one GMainContext for the iothread unconditionally, even if | 135 | bh = *bhp; |
53 | * it's not used | 136 | @@ -XXX,XX +XXX,XX @@ int aio_bh_poll(AioContext *ctx) |
54 | */ | 137 | bhp = &bh->next; |
55 | - iothread_init_gcontext(iothread); | 138 | } |
56 | + iothread_init_gcontext(iothread, thread_name); | 139 | } |
57 | 140 | - qemu_lockcnt_unlock(&ctx->list_lock); | |
58 | iothread_set_aio_context_params(base, &local_error); | 141 | + qemu_lockcnt_inc_and_unlock(&ctx->list_lock); |
59 | if (local_error) { | 142 | } |
60 | @@ -XXX,XX +XXX,XX @@ static void iothread_init(EventLoopBase *base, Error **errp) | 143 | return ret; |
61 | /* This assumes we are called from a thread with useful CPU affinity for us | 144 | } |
62 | * to inherit. | ||
63 | */ | ||
64 | - thread_name = g_strdup_printf("IO %s", | ||
65 | - object_get_canonical_path_component(OBJECT(base))); | ||
66 | qemu_thread_create(&iothread->thread, thread_name, iothread_run, | ||
67 | iothread, QEMU_THREAD_JOINABLE); | ||
68 | - g_free(thread_name); | ||
69 | |||
70 | /* Wait for initialization to complete */ | ||
71 | while (iothread->thread_id == -1) { | ||
72 | -- | 145 | -- |
73 | 2.41.0 | 146 | 2.9.3 |
74 | 147 | ||
75 | 148 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Paolo Bonzini <pbonzini@redhat.com> | ||
1 | 2 | ||
3 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
4 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
5 | Reviewed-by: Fam Zheng <famz@redhat.com> | ||
6 | Reviewed-by: Daniel P. Berrange <berrange@redhat.com> | ||
7 | Message-id: 20170213135235.12274-19-pbonzini@redhat.com | ||
8 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
9 | --- | ||
10 | include/block/block_int.h | 64 +++++++++++++++++++++++++----------------- | ||
11 | include/sysemu/block-backend.h | 14 ++++++--- | ||
12 | 2 files changed, 49 insertions(+), 29 deletions(-) | ||
13 | |||
14 | diff --git a/include/block/block_int.h b/include/block/block_int.h | ||
15 | index XXXXXXX..XXXXXXX 100644 | ||
16 | --- a/include/block/block_int.h | ||
17 | +++ b/include/block/block_int.h | ||
18 | @@ -XXX,XX +XXX,XX @@ struct BdrvChild { | ||
19 | * copied as well. | ||
20 | */ | ||
21 | struct BlockDriverState { | ||
22 | - int64_t total_sectors; /* if we are reading a disk image, give its | ||
23 | - size in sectors */ | ||
24 | + /* Protected by big QEMU lock or read-only after opening. No special | ||
25 | + * locking needed during I/O... | ||
26 | + */ | ||
27 | int open_flags; /* flags used to open the file, re-used for re-open */ | ||
28 | bool read_only; /* if true, the media is read only */ | ||
29 | bool encrypted; /* if true, the media is encrypted */ | ||
30 | @@ -XXX,XX +XXX,XX @@ struct BlockDriverState { | ||
31 | bool sg; /* if true, the device is a /dev/sg* */ | ||
32 | bool probed; /* if true, format was probed rather than specified */ | ||
33 | |||
34 | - int copy_on_read; /* if nonzero, copy read backing sectors into image. | ||
35 | - note this is a reference count */ | ||
36 | - | ||
37 | - CoQueue flush_queue; /* Serializing flush queue */ | ||
38 | - bool active_flush_req; /* Flush request in flight? */ | ||
39 | - unsigned int write_gen; /* Current data generation */ | ||
40 | - unsigned int flushed_gen; /* Flushed write generation */ | ||
41 | - | ||
42 | BlockDriver *drv; /* NULL means no media */ | ||
43 | void *opaque; | ||
44 | |||
45 | @@ -XXX,XX +XXX,XX @@ struct BlockDriverState { | ||
46 | BdrvChild *backing; | ||
47 | BdrvChild *file; | ||
48 | |||
49 | - /* Callback before write request is processed */ | ||
50 | - NotifierWithReturnList before_write_notifiers; | ||
51 | - | ||
52 | - /* number of in-flight requests; overall and serialising */ | ||
53 | - unsigned int in_flight; | ||
54 | - unsigned int serialising_in_flight; | ||
55 | - | ||
56 | - bool wakeup; | ||
57 | - | ||
58 | - /* Offset after the highest byte written to */ | ||
59 | - uint64_t wr_highest_offset; | ||
60 | - | ||
61 | /* I/O Limits */ | ||
62 | BlockLimits bl; | ||
63 | |||
64 | @@ -XXX,XX +XXX,XX @@ struct BlockDriverState { | ||
65 | QTAILQ_ENTRY(BlockDriverState) bs_list; | ||
66 | /* element of the list of monitor-owned BDS */ | ||
67 | QTAILQ_ENTRY(BlockDriverState) monitor_list; | ||
68 | - QLIST_HEAD(, BdrvDirtyBitmap) dirty_bitmaps; | ||
69 | int refcnt; | ||
70 | |||
71 | - QLIST_HEAD(, BdrvTrackedRequest) tracked_requests; | ||
72 | - | ||
73 | /* operation blockers */ | ||
74 | QLIST_HEAD(, BdrvOpBlocker) op_blockers[BLOCK_OP_TYPE_MAX]; | ||
75 | |||
76 | @@ -XXX,XX +XXX,XX @@ struct BlockDriverState { | ||
77 | /* The error object in use for blocking operations on backing_hd */ | ||
78 | Error *backing_blocker; | ||
79 | |||
80 | + /* Protected by AioContext lock */ | ||
81 | + | ||
82 | + /* If true, copy read backing sectors into image. Can be >1 if more | ||
83 | + * than one client has requested copy-on-read. | ||
84 | + */ | ||
85 | + int copy_on_read; | ||
86 | + | ||
87 | + /* If we are reading a disk image, give its size in sectors. | ||
88 | + * Generally read-only; it is written to by load_vmstate and save_vmstate, | ||
89 | + * but the block layer is quiescent during those. | ||
90 | + */ | ||
91 | + int64_t total_sectors; | ||
92 | + | ||
93 | + /* Callback before write request is processed */ | ||
94 | + NotifierWithReturnList before_write_notifiers; | ||
95 | + | ||
96 | + /* number of in-flight requests; overall and serialising */ | ||
97 | + unsigned int in_flight; | ||
98 | + unsigned int serialising_in_flight; | ||
99 | + | ||
100 | + bool wakeup; | ||
101 | + | ||
102 | + /* Offset after the highest byte written to */ | ||
103 | + uint64_t wr_highest_offset; | ||
104 | + | ||
105 | /* threshold limit for writes, in bytes. "High water mark". */ | ||
106 | uint64_t write_threshold_offset; | ||
107 | NotifierWithReturn write_threshold_notifier; | ||
108 | @@ -XXX,XX +XXX,XX @@ struct BlockDriverState { | ||
109 | /* counter for nested bdrv_io_plug */ | ||
110 | unsigned io_plugged; | ||
111 | |||
112 | + QLIST_HEAD(, BdrvTrackedRequest) tracked_requests; | ||
113 | + CoQueue flush_queue; /* Serializing flush queue */ | ||
114 | + bool active_flush_req; /* Flush request in flight? */ | ||
115 | + unsigned int write_gen; /* Current data generation */ | ||
116 | + unsigned int flushed_gen; /* Flushed write generation */ | ||
117 | + | ||
118 | + QLIST_HEAD(, BdrvDirtyBitmap) dirty_bitmaps; | ||
119 | + | ||
120 | + /* do we need to tell the quest if we have a volatile write cache? */ | ||
121 | + int enable_write_cache; | ||
122 | + | ||
123 | int quiesce_counter; | ||
124 | }; | ||
125 | |||
126 | diff --git a/include/sysemu/block-backend.h b/include/sysemu/block-backend.h | ||
127 | index XXXXXXX..XXXXXXX 100644 | ||
128 | --- a/include/sysemu/block-backend.h | ||
129 | +++ b/include/sysemu/block-backend.h | ||
130 | @@ -XXX,XX +XXX,XX @@ typedef struct BlockDevOps { | ||
131 | * fields that must be public. This is in particular for QLIST_ENTRY() and | ||
132 | * friends so that BlockBackends can be kept in lists outside block-backend.c */ | ||
133 | typedef struct BlockBackendPublic { | ||
134 | - /* I/O throttling. | ||
135 | - * throttle_state tells us if this BlockBackend has I/O limits configured. | ||
136 | - * io_limits_disabled tells us if they are currently being enforced */ | ||
137 | + /* I/O throttling has its own locking, but also some fields are | ||
138 | + * protected by the AioContext lock. | ||
139 | + */ | ||
140 | + | ||
141 | + /* Protected by AioContext lock. */ | ||
142 | CoQueue throttled_reqs[2]; | ||
143 | + | ||
144 | + /* Nonzero if the I/O limits are currently being ignored; generally | ||
145 | + * it is zero. */ | ||
146 | unsigned int io_limits_disabled; | ||
147 | |||
148 | /* The following fields are protected by the ThrottleGroup lock. | ||
149 | - * See the ThrottleGroup documentation for details. */ | ||
150 | + * See the ThrottleGroup documentation for details. | ||
151 | + * throttle_state tells us if I/O limits are configured. */ | ||
152 | ThrottleState *throttle_state; | ||
153 | ThrottleTimers throttle_timers; | ||
154 | unsigned pending_reqs[2]; | ||
155 | -- | ||
156 | 2.9.3 | ||
157 | |||
158 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | 1 | From: Paolo Bonzini <pbonzini@redhat.com> | |
2 | |||
3 | This uses the lock-free mutex described in the paper '"Blocking without | ||
4 | Locking", or LFTHREADS: A lock-free thread library' by Gidenstam and | ||
5 | Papatriantafilou. The same technique is used in OSv, and in fact | ||
6 | the code is essentially a conversion to C of OSv's code. | ||
7 | |||
8 | [Added missing coroutine_fn in tests/test-aio-multithread.c. | ||
9 | --Stefan] | ||
10 | |||
11 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
12 | Reviewed-by: Fam Zheng <famz@redhat.com> | ||
13 | Message-id: 20170213181244.16297-2-pbonzini@redhat.com | ||
14 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
15 | --- | ||
16 | include/qemu/coroutine.h | 17 ++++- | ||
17 | tests/test-aio-multithread.c | 86 ++++++++++++++++++++++++ | ||
18 | util/qemu-coroutine-lock.c | 155 ++++++++++++++++++++++++++++++++++++++++--- | ||
19 | util/trace-events | 1 + | ||
20 | 4 files changed, 246 insertions(+), 13 deletions(-) | ||
21 | |||
22 | diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h | ||
23 | index XXXXXXX..XXXXXXX 100644 | ||
24 | --- a/include/qemu/coroutine.h | ||
25 | +++ b/include/qemu/coroutine.h | ||
26 | @@ -XXX,XX +XXX,XX @@ bool qemu_co_queue_empty(CoQueue *queue); | ||
27 | /** | ||
28 | * Provides a mutex that can be used to synchronise coroutines | ||
29 | */ | ||
30 | +struct CoWaitRecord; | ||
31 | typedef struct CoMutex { | ||
32 | - bool locked; | ||
33 | + /* Count of pending lockers; 0 for a free mutex, 1 for an | ||
34 | + * uncontended mutex. | ||
35 | + */ | ||
36 | + unsigned locked; | ||
37 | + | ||
38 | + /* A queue of waiters. Elements are added atomically in front of | ||
39 | + * from_push. to_pop is only populated, and popped from, by whoever | ||
40 | + * is in charge of the next wakeup. This can be an unlocker or, | ||
41 | + * through the handoff protocol, a locker that is about to go to sleep. | ||
42 | + */ | ||
43 | + QSLIST_HEAD(, CoWaitRecord) from_push, to_pop; | ||
44 | + | ||
45 | + unsigned handoff, sequence; | ||
46 | + | ||
47 | Coroutine *holder; | ||
48 | - CoQueue queue; | ||
49 | } CoMutex; | ||
50 | |||
51 | /** | ||
52 | diff --git a/tests/test-aio-multithread.c b/tests/test-aio-multithread.c | ||
53 | index XXXXXXX..XXXXXXX 100644 | ||
54 | --- a/tests/test-aio-multithread.c | ||
55 | +++ b/tests/test-aio-multithread.c | ||
56 | @@ -XXX,XX +XXX,XX @@ static void test_multi_co_schedule_10(void) | ||
57 | test_multi_co_schedule(10); | ||
58 | } | ||
59 | |||
60 | +/* CoMutex thread-safety. */ | ||
61 | + | ||
62 | +static uint32_t atomic_counter; | ||
63 | +static uint32_t running; | ||
64 | +static uint32_t counter; | ||
65 | +static CoMutex comutex; | ||
66 | + | ||
67 | +static void coroutine_fn test_multi_co_mutex_entry(void *opaque) | ||
68 | +{ | ||
69 | + while (!atomic_mb_read(&now_stopping)) { | ||
70 | + qemu_co_mutex_lock(&comutex); | ||
71 | + counter++; | ||
72 | + qemu_co_mutex_unlock(&comutex); | ||
73 | + | ||
74 | + /* Increase atomic_counter *after* releasing the mutex. Otherwise | ||
75 | + * there is a chance (it happens about 1 in 3 runs) that the iothread | ||
76 | + * exits before the coroutine is woken up, causing a spurious | ||
77 | + * assertion failure. | ||
78 | + */ | ||
79 | + atomic_inc(&atomic_counter); | ||
80 | + } | ||
81 | + atomic_dec(&running); | ||
82 | +} | ||
83 | + | ||
84 | +static void test_multi_co_mutex(int threads, int seconds) | ||
85 | +{ | ||
86 | + int i; | ||
87 | + | ||
88 | + qemu_co_mutex_init(&comutex); | ||
89 | + counter = 0; | ||
90 | + atomic_counter = 0; | ||
91 | + now_stopping = false; | ||
92 | + | ||
93 | + create_aio_contexts(); | ||
94 | + assert(threads <= NUM_CONTEXTS); | ||
95 | + running = threads; | ||
96 | + for (i = 0; i < threads; i++) { | ||
97 | + Coroutine *co1 = qemu_coroutine_create(test_multi_co_mutex_entry, NULL); | ||
98 | + aio_co_schedule(ctx[i], co1); | ||
99 | + } | ||
100 | + | ||
101 | + g_usleep(seconds * 1000000); | ||
102 | + | ||
103 | + atomic_mb_set(&now_stopping, true); | ||
104 | + while (running > 0) { | ||
105 | + g_usleep(100000); | ||
106 | + } | ||
107 | + | ||
108 | + join_aio_contexts(); | ||
109 | + g_test_message("%d iterations/second\n", counter / seconds); | ||
110 | + g_assert_cmpint(counter, ==, atomic_counter); | ||
111 | +} | ||
112 | + | ||
113 | +/* Testing with NUM_CONTEXTS threads focuses on the queue. The mutex however | ||
114 | + * is too contended (and the threads spend too much time in aio_poll) | ||
115 | + * to actually stress the handoff protocol. | ||
116 | + */ | ||
117 | +static void test_multi_co_mutex_1(void) | ||
118 | +{ | ||
119 | + test_multi_co_mutex(NUM_CONTEXTS, 1); | ||
120 | +} | ||
121 | + | ||
122 | +static void test_multi_co_mutex_10(void) | ||
123 | +{ | ||
124 | + test_multi_co_mutex(NUM_CONTEXTS, 10); | ||
125 | +} | ||
126 | + | ||
127 | +/* Testing with fewer threads stresses the handoff protocol too. Still, the | ||
128 | + * case where the locker _can_ pick up a handoff is very rare, happening | ||
129 | + * about 10 times in 1 million, so increase the runtime a bit compared to | ||
130 | + * other "quick" testcases that only run for 1 second. | ||
131 | + */ | ||
132 | +static void test_multi_co_mutex_2_3(void) | ||
133 | +{ | ||
134 | + test_multi_co_mutex(2, 3); | ||
135 | +} | ||
136 | + | ||
137 | +static void test_multi_co_mutex_2_30(void) | ||
138 | +{ | ||
139 | + test_multi_co_mutex(2, 30); | ||
140 | +} | ||
141 | + | ||
142 | /* End of tests. */ | ||
143 | |||
144 | int main(int argc, char **argv) | ||
145 | @@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv) | ||
146 | g_test_add_func("/aio/multi/lifecycle", test_lifecycle); | ||
147 | if (g_test_quick()) { | ||
148 | g_test_add_func("/aio/multi/schedule", test_multi_co_schedule_1); | ||
149 | + g_test_add_func("/aio/multi/mutex/contended", test_multi_co_mutex_1); | ||
150 | + g_test_add_func("/aio/multi/mutex/handoff", test_multi_co_mutex_2_3); | ||
151 | } else { | ||
152 | g_test_add_func("/aio/multi/schedule", test_multi_co_schedule_10); | ||
153 | + g_test_add_func("/aio/multi/mutex/contended", test_multi_co_mutex_10); | ||
154 | + g_test_add_func("/aio/multi/mutex/handoff", test_multi_co_mutex_2_30); | ||
155 | } | ||
156 | return g_test_run(); | ||
157 | } | ||
158 | diff --git a/util/qemu-coroutine-lock.c b/util/qemu-coroutine-lock.c | ||
159 | index XXXXXXX..XXXXXXX 100644 | ||
160 | --- a/util/qemu-coroutine-lock.c | ||
161 | +++ b/util/qemu-coroutine-lock.c | ||
162 | @@ -XXX,XX +XXX,XX @@ | ||
163 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
164 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | ||
165 | * THE SOFTWARE. | ||
166 | + * | ||
167 | + * The lock-free mutex implementation is based on OSv | ||
168 | + * (core/lfmutex.cc, include/lockfree/mutex.hh). | ||
169 | + * Copyright (C) 2013 Cloudius Systems, Ltd. | ||
170 | */ | ||
171 | |||
172 | #include "qemu/osdep.h" | ||
173 | @@ -XXX,XX +XXX,XX @@ bool qemu_co_queue_empty(CoQueue *queue) | ||
174 | return QSIMPLEQ_FIRST(&queue->entries) == NULL; | ||
175 | } | ||
176 | |||
177 | +/* The wait records are handled with a multiple-producer, single-consumer | ||
178 | + * lock-free queue. There cannot be two concurrent pop_waiter() calls | ||
179 | + * because pop_waiter() can only be called while mutex->handoff is zero. | ||
180 | + * This can happen in three cases: | ||
181 | + * - in qemu_co_mutex_unlock, before the hand-off protocol has started. | ||
182 | + * In this case, qemu_co_mutex_lock will see mutex->handoff == 0 and | ||
183 | + * not take part in the handoff. | ||
184 | + * - in qemu_co_mutex_lock, if it steals the hand-off responsibility from | ||
185 | + * qemu_co_mutex_unlock. In this case, qemu_co_mutex_unlock will fail | ||
186 | + * the cmpxchg (it will see either 0 or the next sequence value) and | ||
187 | + * exit. The next hand-off cannot begin until qemu_co_mutex_lock has | ||
188 | + * woken up someone. | ||
189 | + * - in qemu_co_mutex_unlock, if it takes the hand-off token itself. | ||
190 | + * In this case another iteration starts with mutex->handoff == 0; | ||
191 | + * a concurrent qemu_co_mutex_lock will fail the cmpxchg, and | ||
192 | + * qemu_co_mutex_unlock will go back to case (1). | ||
193 | + * | ||
194 | + * The following functions manage this queue. | ||
195 | + */ | ||
196 | +typedef struct CoWaitRecord { | ||
197 | + Coroutine *co; | ||
198 | + QSLIST_ENTRY(CoWaitRecord) next; | ||
199 | +} CoWaitRecord; | ||
200 | + | ||
201 | +static void push_waiter(CoMutex *mutex, CoWaitRecord *w) | ||
202 | +{ | ||
203 | + w->co = qemu_coroutine_self(); | ||
204 | + QSLIST_INSERT_HEAD_ATOMIC(&mutex->from_push, w, next); | ||
205 | +} | ||
206 | + | ||
207 | +static void move_waiters(CoMutex *mutex) | ||
208 | +{ | ||
209 | + QSLIST_HEAD(, CoWaitRecord) reversed; | ||
210 | + QSLIST_MOVE_ATOMIC(&reversed, &mutex->from_push); | ||
211 | + while (!QSLIST_EMPTY(&reversed)) { | ||
212 | + CoWaitRecord *w = QSLIST_FIRST(&reversed); | ||
213 | + QSLIST_REMOVE_HEAD(&reversed, next); | ||
214 | + QSLIST_INSERT_HEAD(&mutex->to_pop, w, next); | ||
215 | + } | ||
216 | +} | ||
217 | + | ||
218 | +static CoWaitRecord *pop_waiter(CoMutex *mutex) | ||
219 | +{ | ||
220 | + CoWaitRecord *w; | ||
221 | + | ||
222 | + if (QSLIST_EMPTY(&mutex->to_pop)) { | ||
223 | + move_waiters(mutex); | ||
224 | + if (QSLIST_EMPTY(&mutex->to_pop)) { | ||
225 | + return NULL; | ||
226 | + } | ||
227 | + } | ||
228 | + w = QSLIST_FIRST(&mutex->to_pop); | ||
229 | + QSLIST_REMOVE_HEAD(&mutex->to_pop, next); | ||
230 | + return w; | ||
231 | +} | ||
232 | + | ||
233 | +static bool has_waiters(CoMutex *mutex) | ||
234 | +{ | ||
235 | + return QSLIST_EMPTY(&mutex->to_pop) || QSLIST_EMPTY(&mutex->from_push); | ||
236 | +} | ||
237 | + | ||
238 | void qemu_co_mutex_init(CoMutex *mutex) | ||
239 | { | ||
240 | memset(mutex, 0, sizeof(*mutex)); | ||
241 | - qemu_co_queue_init(&mutex->queue); | ||
242 | } | ||
243 | |||
244 | -void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex) | ||
245 | +static void coroutine_fn qemu_co_mutex_lock_slowpath(CoMutex *mutex) | ||
246 | { | ||
247 | Coroutine *self = qemu_coroutine_self(); | ||
248 | + CoWaitRecord w; | ||
249 | + unsigned old_handoff; | ||
250 | |||
251 | trace_qemu_co_mutex_lock_entry(mutex, self); | ||
252 | + w.co = self; | ||
253 | + push_waiter(mutex, &w); | ||
254 | |||
255 | - while (mutex->locked) { | ||
256 | - qemu_co_queue_wait(&mutex->queue); | ||
257 | + /* This is the "Responsibility Hand-Off" protocol; a lock() picks from | ||
258 | + * a concurrent unlock() the responsibility of waking somebody up. | ||
259 | + */ | ||
260 | + old_handoff = atomic_mb_read(&mutex->handoff); | ||
261 | + if (old_handoff && | ||
262 | + has_waiters(mutex) && | ||
263 | + atomic_cmpxchg(&mutex->handoff, old_handoff, 0) == old_handoff) { | ||
264 | + /* There can be no concurrent pops, because there can be only | ||
265 | + * one active handoff at a time. | ||
266 | + */ | ||
267 | + CoWaitRecord *to_wake = pop_waiter(mutex); | ||
268 | + Coroutine *co = to_wake->co; | ||
269 | + if (co == self) { | ||
270 | + /* We got the lock ourselves! */ | ||
271 | + assert(to_wake == &w); | ||
272 | + return; | ||
273 | + } | ||
274 | + | ||
275 | + aio_co_wake(co); | ||
276 | } | ||
277 | |||
278 | - mutex->locked = true; | ||
279 | - mutex->holder = self; | ||
280 | - self->locks_held++; | ||
281 | - | ||
282 | + qemu_coroutine_yield(); | ||
283 | trace_qemu_co_mutex_lock_return(mutex, self); | ||
284 | } | ||
285 | |||
286 | +void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex) | ||
287 | +{ | ||
288 | + Coroutine *self = qemu_coroutine_self(); | ||
289 | + | ||
290 | + if (atomic_fetch_inc(&mutex->locked) == 0) { | ||
291 | + /* Uncontended. */ | ||
292 | + trace_qemu_co_mutex_lock_uncontended(mutex, self); | ||
293 | + } else { | ||
294 | + qemu_co_mutex_lock_slowpath(mutex); | ||
295 | + } | ||
296 | + mutex->holder = self; | ||
297 | + self->locks_held++; | ||
298 | +} | ||
299 | + | ||
300 | void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex) | ||
301 | { | ||
302 | Coroutine *self = qemu_coroutine_self(); | ||
303 | |||
304 | trace_qemu_co_mutex_unlock_entry(mutex, self); | ||
305 | |||
306 | - assert(mutex->locked == true); | ||
307 | + assert(mutex->locked); | ||
308 | assert(mutex->holder == self); | ||
309 | assert(qemu_in_coroutine()); | ||
310 | |||
311 | - mutex->locked = false; | ||
312 | mutex->holder = NULL; | ||
313 | self->locks_held--; | ||
314 | - qemu_co_queue_next(&mutex->queue); | ||
315 | + if (atomic_fetch_dec(&mutex->locked) == 1) { | ||
316 | + /* No waiting qemu_co_mutex_lock(). Pfew, that was easy! */ | ||
317 | + return; | ||
318 | + } | ||
319 | + | ||
320 | + for (;;) { | ||
321 | + CoWaitRecord *to_wake = pop_waiter(mutex); | ||
322 | + unsigned our_handoff; | ||
323 | + | ||
324 | + if (to_wake) { | ||
325 | + Coroutine *co = to_wake->co; | ||
326 | + aio_co_wake(co); | ||
327 | + break; | ||
328 | + } | ||
329 | + | ||
330 | + /* Some concurrent lock() is in progress (we know this because | ||
331 | + * mutex->locked was >1) but it hasn't yet put itself on the wait | ||
332 | + * queue. Pick a sequence number for the handoff protocol (not 0). | ||
333 | + */ | ||
334 | + if (++mutex->sequence == 0) { | ||
335 | + mutex->sequence = 1; | ||
336 | + } | ||
337 | + | ||
338 | + our_handoff = mutex->sequence; | ||
339 | + atomic_mb_set(&mutex->handoff, our_handoff); | ||
340 | + if (!has_waiters(mutex)) { | ||
341 | + /* The concurrent lock has not added itself yet, so it | ||
342 | + * will be able to pick our handoff. | ||
343 | + */ | ||
344 | + break; | ||
345 | + } | ||
346 | + | ||
347 | + /* Try to do the handoff protocol ourselves; if somebody else has | ||
348 | + * already taken it, however, we're done and they're responsible. | ||
349 | + */ | ||
350 | + if (atomic_cmpxchg(&mutex->handoff, our_handoff, 0) != our_handoff) { | ||
351 | + break; | ||
352 | + } | ||
353 | + } | ||
354 | |||
355 | trace_qemu_co_mutex_unlock_return(mutex, self); | ||
356 | } | ||
357 | diff --git a/util/trace-events b/util/trace-events | ||
358 | index XXXXXXX..XXXXXXX 100644 | ||
359 | --- a/util/trace-events | ||
360 | +++ b/util/trace-events | ||
361 | @@ -XXX,XX +XXX,XX @@ qemu_coroutine_terminate(void *co) "self %p" | ||
362 | |||
363 | # util/qemu-coroutine-lock.c | ||
364 | qemu_co_queue_run_restart(void *co) "co %p" | ||
365 | +qemu_co_mutex_lock_uncontended(void *mutex, void *self) "mutex %p self %p" | ||
366 | qemu_co_mutex_lock_entry(void *mutex, void *self) "mutex %p self %p" | ||
367 | qemu_co_mutex_lock_return(void *mutex, void *self) "mutex %p self %p" | ||
368 | qemu_co_mutex_unlock_entry(void *mutex, void *self) "mutex %p self %p" | ||
369 | -- | ||
370 | 2.9.3 | ||
371 | |||
372 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Paolo Bonzini <pbonzini@redhat.com> | ||
1 | 2 | ||
3 | Running a very small critical section on pthread_mutex_t and CoMutex | ||
4 | shows that pthread_mutex_t is much faster because it doesn't actually | ||
5 | go to sleep. What happens is that the critical section is shorter | ||
6 | than the latency of entering the kernel and thus FUTEX_WAIT always | ||
7 | fails. With CoMutex there is no such latency but you still want to | ||
8 | avoid wait and wakeup. So introduce it artificially. | ||
9 | |||
10 | This only works with one waiters; because CoMutex is fair, it will | ||
11 | always have more waits and wakeups than a pthread_mutex_t. | ||
12 | |||
13 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
14 | Reviewed-by: Fam Zheng <famz@redhat.com> | ||
15 | Message-id: 20170213181244.16297-3-pbonzini@redhat.com | ||
16 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
17 | --- | ||
18 | include/qemu/coroutine.h | 5 +++++ | ||
19 | util/qemu-coroutine-lock.c | 51 ++++++++++++++++++++++++++++++++++++++++------ | ||
20 | util/qemu-coroutine.c | 2 +- | ||
21 | 3 files changed, 51 insertions(+), 7 deletions(-) | ||
22 | |||
23 | diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h | ||
24 | index XXXXXXX..XXXXXXX 100644 | ||
25 | --- a/include/qemu/coroutine.h | ||
26 | +++ b/include/qemu/coroutine.h | ||
27 | @@ -XXX,XX +XXX,XX @@ typedef struct CoMutex { | ||
28 | */ | ||
29 | unsigned locked; | ||
30 | |||
31 | + /* Context that is holding the lock. Useful to avoid spinning | ||
32 | + * when two coroutines on the same AioContext try to get the lock. :) | ||
33 | + */ | ||
34 | + AioContext *ctx; | ||
35 | + | ||
36 | /* A queue of waiters. Elements are added atomically in front of | ||
37 | * from_push. to_pop is only populated, and popped from, by whoever | ||
38 | * is in charge of the next wakeup. This can be an unlocker or, | ||
39 | diff --git a/util/qemu-coroutine-lock.c b/util/qemu-coroutine-lock.c | ||
40 | index XXXXXXX..XXXXXXX 100644 | ||
41 | --- a/util/qemu-coroutine-lock.c | ||
42 | +++ b/util/qemu-coroutine-lock.c | ||
43 | @@ -XXX,XX +XXX,XX @@ | ||
44 | #include "qemu-common.h" | ||
45 | #include "qemu/coroutine.h" | ||
46 | #include "qemu/coroutine_int.h" | ||
47 | +#include "qemu/processor.h" | ||
48 | #include "qemu/queue.h" | ||
49 | #include "block/aio.h" | ||
50 | #include "trace.h" | ||
51 | @@ -XXX,XX +XXX,XX @@ void qemu_co_mutex_init(CoMutex *mutex) | ||
52 | memset(mutex, 0, sizeof(*mutex)); | ||
53 | } | ||
54 | |||
55 | -static void coroutine_fn qemu_co_mutex_lock_slowpath(CoMutex *mutex) | ||
56 | +static void coroutine_fn qemu_co_mutex_wake(CoMutex *mutex, Coroutine *co) | ||
57 | +{ | ||
58 | + /* Read co before co->ctx; pairs with smp_wmb() in | ||
59 | + * qemu_coroutine_enter(). | ||
60 | + */ | ||
61 | + smp_read_barrier_depends(); | ||
62 | + mutex->ctx = co->ctx; | ||
63 | + aio_co_wake(co); | ||
64 | +} | ||
65 | + | ||
66 | +static void coroutine_fn qemu_co_mutex_lock_slowpath(AioContext *ctx, | ||
67 | + CoMutex *mutex) | ||
68 | { | ||
69 | Coroutine *self = qemu_coroutine_self(); | ||
70 | CoWaitRecord w; | ||
71 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn qemu_co_mutex_lock_slowpath(CoMutex *mutex) | ||
72 | if (co == self) { | ||
73 | /* We got the lock ourselves! */ | ||
74 | assert(to_wake == &w); | ||
75 | + mutex->ctx = ctx; | ||
76 | return; | ||
77 | } | ||
78 | |||
79 | - aio_co_wake(co); | ||
80 | + qemu_co_mutex_wake(mutex, co); | ||
81 | } | ||
82 | |||
83 | qemu_coroutine_yield(); | ||
84 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn qemu_co_mutex_lock_slowpath(CoMutex *mutex) | ||
85 | |||
86 | void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex) | ||
87 | { | ||
88 | + AioContext *ctx = qemu_get_current_aio_context(); | ||
89 | Coroutine *self = qemu_coroutine_self(); | ||
90 | + int waiters, i; | ||
91 | |||
92 | - if (atomic_fetch_inc(&mutex->locked) == 0) { | ||
93 | + /* Running a very small critical section on pthread_mutex_t and CoMutex | ||
94 | + * shows that pthread_mutex_t is much faster because it doesn't actually | ||
95 | + * go to sleep. What happens is that the critical section is shorter | ||
96 | + * than the latency of entering the kernel and thus FUTEX_WAIT always | ||
97 | + * fails. With CoMutex there is no such latency but you still want to | ||
98 | + * avoid wait and wakeup. So introduce it artificially. | ||
99 | + */ | ||
100 | + i = 0; | ||
101 | +retry_fast_path: | ||
102 | + waiters = atomic_cmpxchg(&mutex->locked, 0, 1); | ||
103 | + if (waiters != 0) { | ||
104 | + while (waiters == 1 && ++i < 1000) { | ||
105 | + if (atomic_read(&mutex->ctx) == ctx) { | ||
106 | + break; | ||
107 | + } | ||
108 | + if (atomic_read(&mutex->locked) == 0) { | ||
109 | + goto retry_fast_path; | ||
110 | + } | ||
111 | + cpu_relax(); | ||
112 | + } | ||
113 | + waiters = atomic_fetch_inc(&mutex->locked); | ||
114 | + } | ||
115 | + | ||
116 | + if (waiters == 0) { | ||
117 | /* Uncontended. */ | ||
118 | trace_qemu_co_mutex_lock_uncontended(mutex, self); | ||
119 | + mutex->ctx = ctx; | ||
120 | } else { | ||
121 | - qemu_co_mutex_lock_slowpath(mutex); | ||
122 | + qemu_co_mutex_lock_slowpath(ctx, mutex); | ||
123 | } | ||
124 | mutex->holder = self; | ||
125 | self->locks_held++; | ||
126 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex) | ||
127 | assert(mutex->holder == self); | ||
128 | assert(qemu_in_coroutine()); | ||
129 | |||
130 | + mutex->ctx = NULL; | ||
131 | mutex->holder = NULL; | ||
132 | self->locks_held--; | ||
133 | if (atomic_fetch_dec(&mutex->locked) == 1) { | ||
134 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex) | ||
135 | unsigned our_handoff; | ||
136 | |||
137 | if (to_wake) { | ||
138 | - Coroutine *co = to_wake->co; | ||
139 | - aio_co_wake(co); | ||
140 | + qemu_co_mutex_wake(mutex, to_wake->co); | ||
141 | break; | ||
142 | } | ||
143 | |||
144 | diff --git a/util/qemu-coroutine.c b/util/qemu-coroutine.c | ||
145 | index XXXXXXX..XXXXXXX 100644 | ||
146 | --- a/util/qemu-coroutine.c | ||
147 | +++ b/util/qemu-coroutine.c | ||
148 | @@ -XXX,XX +XXX,XX @@ void qemu_coroutine_enter(Coroutine *co) | ||
149 | co->ctx = qemu_get_current_aio_context(); | ||
150 | |||
151 | /* Store co->ctx before anything that stores co. Matches | ||
152 | - * barrier in aio_co_wake. | ||
153 | + * barrier in aio_co_wake and qemu_co_mutex_wake. | ||
154 | */ | ||
155 | smp_wmb(); | ||
156 | |||
157 | -- | ||
158 | 2.9.3 | ||
159 | |||
160 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | 1 | From: Paolo Bonzini <pbonzini@redhat.com> | |
2 | |||
3 | Add two implementations of the same benchmark as the previous patch, | ||
4 | but using pthreads. One uses a normal QemuMutex, the other is Linux | ||
5 | only and implements a fair mutex based on MCS locks and futexes. | ||
6 | This shows that the slower performance of the 5-thread case is due to | ||
7 | the fairness of CoMutex, rather than to coroutines. If fairness does | ||
8 | not matter, as is the case with two threads, CoMutex can actually be | ||
9 | faster than pthreads. | ||
10 | |||
11 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
12 | Reviewed-by: Fam Zheng <famz@redhat.com> | ||
13 | Message-id: 20170213181244.16297-4-pbonzini@redhat.com | ||
14 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
15 | --- | ||
16 | tests/test-aio-multithread.c | 164 +++++++++++++++++++++++++++++++++++++++++++ | ||
17 | 1 file changed, 164 insertions(+) | ||
18 | |||
19 | diff --git a/tests/test-aio-multithread.c b/tests/test-aio-multithread.c | ||
20 | index XXXXXXX..XXXXXXX 100644 | ||
21 | --- a/tests/test-aio-multithread.c | ||
22 | +++ b/tests/test-aio-multithread.c | ||
23 | @@ -XXX,XX +XXX,XX @@ static void test_multi_co_mutex_2_30(void) | ||
24 | test_multi_co_mutex(2, 30); | ||
25 | } | ||
26 | |||
27 | +/* Same test with fair mutexes, for performance comparison. */ | ||
28 | + | ||
29 | +#ifdef CONFIG_LINUX | ||
30 | +#include "qemu/futex.h" | ||
31 | + | ||
32 | +/* The nodes for the mutex reside in this structure (on which we try to avoid | ||
33 | + * false sharing). The head of the mutex is in the "mutex_head" variable. | ||
34 | + */ | ||
35 | +static struct { | ||
36 | + int next, locked; | ||
37 | + int padding[14]; | ||
38 | +} nodes[NUM_CONTEXTS] __attribute__((__aligned__(64))); | ||
39 | + | ||
40 | +static int mutex_head = -1; | ||
41 | + | ||
42 | +static void mcs_mutex_lock(void) | ||
43 | +{ | ||
44 | + int prev; | ||
45 | + | ||
46 | + nodes[id].next = -1; | ||
47 | + nodes[id].locked = 1; | ||
48 | + prev = atomic_xchg(&mutex_head, id); | ||
49 | + if (prev != -1) { | ||
50 | + atomic_set(&nodes[prev].next, id); | ||
51 | + qemu_futex_wait(&nodes[id].locked, 1); | ||
52 | + } | ||
53 | +} | ||
54 | + | ||
55 | +static void mcs_mutex_unlock(void) | ||
56 | +{ | ||
57 | + int next; | ||
58 | + if (nodes[id].next == -1) { | ||
59 | + if (atomic_read(&mutex_head) == id && | ||
60 | + atomic_cmpxchg(&mutex_head, id, -1) == id) { | ||
61 | + /* Last item in the list, exit. */ | ||
62 | + return; | ||
63 | + } | ||
64 | + while (atomic_read(&nodes[id].next) == -1) { | ||
65 | + /* mcs_mutex_lock did the xchg, but has not updated | ||
66 | + * nodes[prev].next yet. | ||
67 | + */ | ||
68 | + } | ||
69 | + } | ||
70 | + | ||
71 | + /* Wake up the next in line. */ | ||
72 | + next = nodes[id].next; | ||
73 | + nodes[next].locked = 0; | ||
74 | + qemu_futex_wake(&nodes[next].locked, 1); | ||
75 | +} | ||
76 | + | ||
77 | +static void test_multi_fair_mutex_entry(void *opaque) | ||
78 | +{ | ||
79 | + while (!atomic_mb_read(&now_stopping)) { | ||
80 | + mcs_mutex_lock(); | ||
81 | + counter++; | ||
82 | + mcs_mutex_unlock(); | ||
83 | + atomic_inc(&atomic_counter); | ||
84 | + } | ||
85 | + atomic_dec(&running); | ||
86 | +} | ||
87 | + | ||
88 | +static void test_multi_fair_mutex(int threads, int seconds) | ||
89 | +{ | ||
90 | + int i; | ||
91 | + | ||
92 | + assert(mutex_head == -1); | ||
93 | + counter = 0; | ||
94 | + atomic_counter = 0; | ||
95 | + now_stopping = false; | ||
96 | + | ||
97 | + create_aio_contexts(); | ||
98 | + assert(threads <= NUM_CONTEXTS); | ||
99 | + running = threads; | ||
100 | + for (i = 0; i < threads; i++) { | ||
101 | + Coroutine *co1 = qemu_coroutine_create(test_multi_fair_mutex_entry, NULL); | ||
102 | + aio_co_schedule(ctx[i], co1); | ||
103 | + } | ||
104 | + | ||
105 | + g_usleep(seconds * 1000000); | ||
106 | + | ||
107 | + atomic_mb_set(&now_stopping, true); | ||
108 | + while (running > 0) { | ||
109 | + g_usleep(100000); | ||
110 | + } | ||
111 | + | ||
112 | + join_aio_contexts(); | ||
113 | + g_test_message("%d iterations/second\n", counter / seconds); | ||
114 | + g_assert_cmpint(counter, ==, atomic_counter); | ||
115 | +} | ||
116 | + | ||
117 | +static void test_multi_fair_mutex_1(void) | ||
118 | +{ | ||
119 | + test_multi_fair_mutex(NUM_CONTEXTS, 1); | ||
120 | +} | ||
121 | + | ||
122 | +static void test_multi_fair_mutex_10(void) | ||
123 | +{ | ||
124 | + test_multi_fair_mutex(NUM_CONTEXTS, 10); | ||
125 | +} | ||
126 | +#endif | ||
127 | + | ||
128 | +/* Same test with pthread mutexes, for performance comparison and | ||
129 | + * portability. */ | ||
130 | + | ||
131 | +static QemuMutex mutex; | ||
132 | + | ||
133 | +static void test_multi_mutex_entry(void *opaque) | ||
134 | +{ | ||
135 | + while (!atomic_mb_read(&now_stopping)) { | ||
136 | + qemu_mutex_lock(&mutex); | ||
137 | + counter++; | ||
138 | + qemu_mutex_unlock(&mutex); | ||
139 | + atomic_inc(&atomic_counter); | ||
140 | + } | ||
141 | + atomic_dec(&running); | ||
142 | +} | ||
143 | + | ||
144 | +static void test_multi_mutex(int threads, int seconds) | ||
145 | +{ | ||
146 | + int i; | ||
147 | + | ||
148 | + qemu_mutex_init(&mutex); | ||
149 | + counter = 0; | ||
150 | + atomic_counter = 0; | ||
151 | + now_stopping = false; | ||
152 | + | ||
153 | + create_aio_contexts(); | ||
154 | + assert(threads <= NUM_CONTEXTS); | ||
155 | + running = threads; | ||
156 | + for (i = 0; i < threads; i++) { | ||
157 | + Coroutine *co1 = qemu_coroutine_create(test_multi_mutex_entry, NULL); | ||
158 | + aio_co_schedule(ctx[i], co1); | ||
159 | + } | ||
160 | + | ||
161 | + g_usleep(seconds * 1000000); | ||
162 | + | ||
163 | + atomic_mb_set(&now_stopping, true); | ||
164 | + while (running > 0) { | ||
165 | + g_usleep(100000); | ||
166 | + } | ||
167 | + | ||
168 | + join_aio_contexts(); | ||
169 | + g_test_message("%d iterations/second\n", counter / seconds); | ||
170 | + g_assert_cmpint(counter, ==, atomic_counter); | ||
171 | +} | ||
172 | + | ||
173 | +static void test_multi_mutex_1(void) | ||
174 | +{ | ||
175 | + test_multi_mutex(NUM_CONTEXTS, 1); | ||
176 | +} | ||
177 | + | ||
178 | +static void test_multi_mutex_10(void) | ||
179 | +{ | ||
180 | + test_multi_mutex(NUM_CONTEXTS, 10); | ||
181 | +} | ||
182 | + | ||
183 | /* End of tests. */ | ||
184 | |||
185 | int main(int argc, char **argv) | ||
186 | @@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv) | ||
187 | g_test_add_func("/aio/multi/schedule", test_multi_co_schedule_1); | ||
188 | g_test_add_func("/aio/multi/mutex/contended", test_multi_co_mutex_1); | ||
189 | g_test_add_func("/aio/multi/mutex/handoff", test_multi_co_mutex_2_3); | ||
190 | +#ifdef CONFIG_LINUX | ||
191 | + g_test_add_func("/aio/multi/mutex/mcs", test_multi_fair_mutex_1); | ||
192 | +#endif | ||
193 | + g_test_add_func("/aio/multi/mutex/pthread", test_multi_mutex_1); | ||
194 | } else { | ||
195 | g_test_add_func("/aio/multi/schedule", test_multi_co_schedule_10); | ||
196 | g_test_add_func("/aio/multi/mutex/contended", test_multi_co_mutex_10); | ||
197 | g_test_add_func("/aio/multi/mutex/handoff", test_multi_co_mutex_2_30); | ||
198 | +#ifdef CONFIG_LINUX | ||
199 | + g_test_add_func("/aio/multi/mutex/mcs", test_multi_fair_mutex_10); | ||
200 | +#endif | ||
201 | + g_test_add_func("/aio/multi/mutex/pthread", test_multi_mutex_10); | ||
202 | } | ||
203 | return g_test_run(); | ||
204 | } | ||
205 | -- | ||
206 | 2.9.3 | ||
207 | |||
208 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Paolo Bonzini <pbonzini@redhat.com> | ||
1 | 2 | ||
3 | This will avoid forward references in the next patch. It is also | ||
4 | more logical because CoQueue is not anymore the basic primitive. | ||
5 | |||
6 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
7 | Reviewed-by: Fam Zheng <famz@redhat.com> | ||
8 | Message-id: 20170213181244.16297-5-pbonzini@redhat.com | ||
9 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
10 | --- | ||
11 | include/qemu/coroutine.h | 89 ++++++++++++++++++++++++------------------------ | ||
12 | 1 file changed, 44 insertions(+), 45 deletions(-) | ||
13 | |||
14 | diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h | ||
15 | index XXXXXXX..XXXXXXX 100644 | ||
16 | --- a/include/qemu/coroutine.h | ||
17 | +++ b/include/qemu/coroutine.h | ||
18 | @@ -XXX,XX +XXX,XX @@ bool qemu_in_coroutine(void); | ||
19 | */ | ||
20 | bool qemu_coroutine_entered(Coroutine *co); | ||
21 | |||
22 | - | ||
23 | -/** | ||
24 | - * CoQueues are a mechanism to queue coroutines in order to continue executing | ||
25 | - * them later. They provide the fundamental primitives on which coroutine locks | ||
26 | - * are built. | ||
27 | - */ | ||
28 | -typedef struct CoQueue { | ||
29 | - QSIMPLEQ_HEAD(, Coroutine) entries; | ||
30 | -} CoQueue; | ||
31 | - | ||
32 | -/** | ||
33 | - * Initialise a CoQueue. This must be called before any other operation is used | ||
34 | - * on the CoQueue. | ||
35 | - */ | ||
36 | -void qemu_co_queue_init(CoQueue *queue); | ||
37 | - | ||
38 | -/** | ||
39 | - * Adds the current coroutine to the CoQueue and transfers control to the | ||
40 | - * caller of the coroutine. | ||
41 | - */ | ||
42 | -void coroutine_fn qemu_co_queue_wait(CoQueue *queue); | ||
43 | - | ||
44 | -/** | ||
45 | - * Restarts the next coroutine in the CoQueue and removes it from the queue. | ||
46 | - * | ||
47 | - * Returns true if a coroutine was restarted, false if the queue is empty. | ||
48 | - */ | ||
49 | -bool coroutine_fn qemu_co_queue_next(CoQueue *queue); | ||
50 | - | ||
51 | -/** | ||
52 | - * Restarts all coroutines in the CoQueue and leaves the queue empty. | ||
53 | - */ | ||
54 | -void coroutine_fn qemu_co_queue_restart_all(CoQueue *queue); | ||
55 | - | ||
56 | -/** | ||
57 | - * Enter the next coroutine in the queue | ||
58 | - */ | ||
59 | -bool qemu_co_enter_next(CoQueue *queue); | ||
60 | - | ||
61 | -/** | ||
62 | - * Checks if the CoQueue is empty. | ||
63 | - */ | ||
64 | -bool qemu_co_queue_empty(CoQueue *queue); | ||
65 | - | ||
66 | - | ||
67 | /** | ||
68 | * Provides a mutex that can be used to synchronise coroutines | ||
69 | */ | ||
70 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex); | ||
71 | */ | ||
72 | void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex); | ||
73 | |||
74 | + | ||
75 | +/** | ||
76 | + * CoQueues are a mechanism to queue coroutines in order to continue executing | ||
77 | + * them later. | ||
78 | + */ | ||
79 | +typedef struct CoQueue { | ||
80 | + QSIMPLEQ_HEAD(, Coroutine) entries; | ||
81 | +} CoQueue; | ||
82 | + | ||
83 | +/** | ||
84 | + * Initialise a CoQueue. This must be called before any other operation is used | ||
85 | + * on the CoQueue. | ||
86 | + */ | ||
87 | +void qemu_co_queue_init(CoQueue *queue); | ||
88 | + | ||
89 | +/** | ||
90 | + * Adds the current coroutine to the CoQueue and transfers control to the | ||
91 | + * caller of the coroutine. | ||
92 | + */ | ||
93 | +void coroutine_fn qemu_co_queue_wait(CoQueue *queue); | ||
94 | + | ||
95 | +/** | ||
96 | + * Restarts the next coroutine in the CoQueue and removes it from the queue. | ||
97 | + * | ||
98 | + * Returns true if a coroutine was restarted, false if the queue is empty. | ||
99 | + */ | ||
100 | +bool coroutine_fn qemu_co_queue_next(CoQueue *queue); | ||
101 | + | ||
102 | +/** | ||
103 | + * Restarts all coroutines in the CoQueue and leaves the queue empty. | ||
104 | + */ | ||
105 | +void coroutine_fn qemu_co_queue_restart_all(CoQueue *queue); | ||
106 | + | ||
107 | +/** | ||
108 | + * Enter the next coroutine in the queue | ||
109 | + */ | ||
110 | +bool qemu_co_enter_next(CoQueue *queue); | ||
111 | + | ||
112 | +/** | ||
113 | + * Checks if the CoQueue is empty. | ||
114 | + */ | ||
115 | +bool qemu_co_queue_empty(CoQueue *queue); | ||
116 | + | ||
117 | + | ||
118 | typedef struct CoRwlock { | ||
119 | bool writer; | ||
120 | int reader; | ||
121 | -- | ||
122 | 2.9.3 | ||
123 | |||
124 | diff view generated by jsdifflib |
1 | From: Jeuk Kim <jeuk20.kim@samsung.com> | 1 | From: Paolo Bonzini <pbonzini@redhat.com> |
---|---|---|---|
2 | 2 | ||
3 | This commit adds support for ufs logical unit. | 3 | All that CoQueue needs in order to become thread-safe is help |
4 | The LU handles processing for the SCSI command, | 4 | from an external mutex. Add this to the API. |
5 | unit descriptor query request. | 5 | |
6 | 6 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | |
7 | This commit enables the UFS device to process | 7 | Reviewed-by: Fam Zheng <famz@redhat.com> |
8 | IO requests. | 8 | Message-id: 20170213181244.16297-6-pbonzini@redhat.com |
9 | |||
10 | Signed-off-by: Jeuk Kim <jeuk20.kim@samsung.com> | ||
11 | Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
12 | Message-id: beacc504376ab6a14b1a3830bb3c69382cf6aebc.1693980783.git.jeuk20.kim@gmail.com | ||
13 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 9 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
14 | --- | 10 | --- |
15 | hw/ufs/ufs.h | 43 ++ | 11 | include/qemu/coroutine.h | 8 +++++--- |
16 | include/scsi/constants.h | 1 + | 12 | block/backup.c | 2 +- |
17 | hw/ufs/lu.c | 1445 ++++++++++++++++++++++++++++++++++++++ | 13 | block/io.c | 4 ++-- |
18 | hw/ufs/ufs.c | 252 ++++++- | 14 | block/nbd-client.c | 2 +- |
19 | hw/ufs/meson.build | 2 +- | 15 | block/qcow2-cluster.c | 4 +--- |
20 | hw/ufs/trace-events | 25 + | 16 | block/sheepdog.c | 2 +- |
21 | 6 files changed, 1761 insertions(+), 7 deletions(-) | 17 | block/throttle-groups.c | 2 +- |
22 | create mode 100644 hw/ufs/lu.c | 18 | hw/9pfs/9p.c | 2 +- |
23 | 19 | util/qemu-coroutine-lock.c | 24 +++++++++++++++++++++--- | |
24 | diff --git a/hw/ufs/ufs.h b/hw/ufs/ufs.h | 20 | 9 files changed, 34 insertions(+), 16 deletions(-) |
25 | index XXXXXXX..XXXXXXX 100644 | 21 | |
26 | --- a/hw/ufs/ufs.h | 22 | diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h |
27 | +++ b/hw/ufs/ufs.h | 23 | index XXXXXXX..XXXXXXX 100644 |
28 | @@ -XXX,XX +XXX,XX @@ | 24 | --- a/include/qemu/coroutine.h |
29 | #define UFS_MAX_LUS 32 | 25 | +++ b/include/qemu/coroutine.h |
30 | #define UFS_BLOCK_SIZE 4096 | 26 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex); |
31 | 27 | ||
32 | +typedef struct UfsBusClass { | 28 | /** |
33 | + BusClass parent_class; | 29 | * CoQueues are a mechanism to queue coroutines in order to continue executing |
34 | + bool (*parent_check_address)(BusState *bus, DeviceState *dev, Error **errp); | 30 | - * them later. |
35 | +} UfsBusClass; | 31 | + * them later. They are similar to condition variables, but they need help |
32 | + * from an external mutex in order to maintain thread-safety. | ||
33 | */ | ||
34 | typedef struct CoQueue { | ||
35 | QSIMPLEQ_HEAD(, Coroutine) entries; | ||
36 | @@ -XXX,XX +XXX,XX @@ void qemu_co_queue_init(CoQueue *queue); | ||
37 | |||
38 | /** | ||
39 | * Adds the current coroutine to the CoQueue and transfers control to the | ||
40 | - * caller of the coroutine. | ||
41 | + * caller of the coroutine. The mutex is unlocked during the wait and | ||
42 | + * locked again afterwards. | ||
43 | */ | ||
44 | -void coroutine_fn qemu_co_queue_wait(CoQueue *queue); | ||
45 | +void coroutine_fn qemu_co_queue_wait(CoQueue *queue, CoMutex *mutex); | ||
46 | |||
47 | /** | ||
48 | * Restarts the next coroutine in the CoQueue and removes it from the queue. | ||
49 | diff --git a/block/backup.c b/block/backup.c | ||
50 | index XXXXXXX..XXXXXXX 100644 | ||
51 | --- a/block/backup.c | ||
52 | +++ b/block/backup.c | ||
53 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn wait_for_overlapping_requests(BackupBlockJob *job, | ||
54 | retry = false; | ||
55 | QLIST_FOREACH(req, &job->inflight_reqs, list) { | ||
56 | if (end > req->start && start < req->end) { | ||
57 | - qemu_co_queue_wait(&req->wait_queue); | ||
58 | + qemu_co_queue_wait(&req->wait_queue, NULL); | ||
59 | retry = true; | ||
60 | break; | ||
61 | } | ||
62 | diff --git a/block/io.c b/block/io.c | ||
63 | index XXXXXXX..XXXXXXX 100644 | ||
64 | --- a/block/io.c | ||
65 | +++ b/block/io.c | ||
66 | @@ -XXX,XX +XXX,XX @@ static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self) | ||
67 | * (instead of producing a deadlock in the former case). */ | ||
68 | if (!req->waiting_for) { | ||
69 | self->waiting_for = req; | ||
70 | - qemu_co_queue_wait(&req->wait_queue); | ||
71 | + qemu_co_queue_wait(&req->wait_queue, NULL); | ||
72 | self->waiting_for = NULL; | ||
73 | retry = true; | ||
74 | waited = true; | ||
75 | @@ -XXX,XX +XXX,XX @@ int coroutine_fn bdrv_co_flush(BlockDriverState *bs) | ||
76 | |||
77 | /* Wait until any previous flushes are completed */ | ||
78 | while (bs->active_flush_req) { | ||
79 | - qemu_co_queue_wait(&bs->flush_queue); | ||
80 | + qemu_co_queue_wait(&bs->flush_queue, NULL); | ||
81 | } | ||
82 | |||
83 | bs->active_flush_req = true; | ||
84 | diff --git a/block/nbd-client.c b/block/nbd-client.c | ||
85 | index XXXXXXX..XXXXXXX 100644 | ||
86 | --- a/block/nbd-client.c | ||
87 | +++ b/block/nbd-client.c | ||
88 | @@ -XXX,XX +XXX,XX @@ static void nbd_coroutine_start(NBDClientSession *s, | ||
89 | /* Poor man semaphore. The free_sema is locked when no other request | ||
90 | * can be accepted, and unlocked after receiving one reply. */ | ||
91 | if (s->in_flight == MAX_NBD_REQUESTS) { | ||
92 | - qemu_co_queue_wait(&s->free_sema); | ||
93 | + qemu_co_queue_wait(&s->free_sema, NULL); | ||
94 | assert(s->in_flight < MAX_NBD_REQUESTS); | ||
95 | } | ||
96 | s->in_flight++; | ||
97 | diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c | ||
98 | index XXXXXXX..XXXXXXX 100644 | ||
99 | --- a/block/qcow2-cluster.c | ||
100 | +++ b/block/qcow2-cluster.c | ||
101 | @@ -XXX,XX +XXX,XX @@ static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset, | ||
102 | if (bytes == 0) { | ||
103 | /* Wait for the dependency to complete. We need to recheck | ||
104 | * the free/allocated clusters when we continue. */ | ||
105 | - qemu_co_mutex_unlock(&s->lock); | ||
106 | - qemu_co_queue_wait(&old_alloc->dependent_requests); | ||
107 | - qemu_co_mutex_lock(&s->lock); | ||
108 | + qemu_co_queue_wait(&old_alloc->dependent_requests, &s->lock); | ||
109 | return -EAGAIN; | ||
110 | } | ||
111 | } | ||
112 | diff --git a/block/sheepdog.c b/block/sheepdog.c | ||
113 | index XXXXXXX..XXXXXXX 100644 | ||
114 | --- a/block/sheepdog.c | ||
115 | +++ b/block/sheepdog.c | ||
116 | @@ -XXX,XX +XXX,XX @@ static void wait_for_overlapping_aiocb(BDRVSheepdogState *s, SheepdogAIOCB *acb) | ||
117 | retry: | ||
118 | QLIST_FOREACH(cb, &s->inflight_aiocb_head, aiocb_siblings) { | ||
119 | if (AIOCBOverlapping(acb, cb)) { | ||
120 | - qemu_co_queue_wait(&s->overlapping_queue); | ||
121 | + qemu_co_queue_wait(&s->overlapping_queue, NULL); | ||
122 | goto retry; | ||
123 | } | ||
124 | } | ||
125 | diff --git a/block/throttle-groups.c b/block/throttle-groups.c | ||
126 | index XXXXXXX..XXXXXXX 100644 | ||
127 | --- a/block/throttle-groups.c | ||
128 | +++ b/block/throttle-groups.c | ||
129 | @@ -XXX,XX +XXX,XX @@ void coroutine_fn throttle_group_co_io_limits_intercept(BlockBackend *blk, | ||
130 | if (must_wait || blkp->pending_reqs[is_write]) { | ||
131 | blkp->pending_reqs[is_write]++; | ||
132 | qemu_mutex_unlock(&tg->lock); | ||
133 | - qemu_co_queue_wait(&blkp->throttled_reqs[is_write]); | ||
134 | + qemu_co_queue_wait(&blkp->throttled_reqs[is_write], NULL); | ||
135 | qemu_mutex_lock(&tg->lock); | ||
136 | blkp->pending_reqs[is_write]--; | ||
137 | } | ||
138 | diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c | ||
139 | index XXXXXXX..XXXXXXX 100644 | ||
140 | --- a/hw/9pfs/9p.c | ||
141 | +++ b/hw/9pfs/9p.c | ||
142 | @@ -XXX,XX +XXX,XX @@ static void coroutine_fn v9fs_flush(void *opaque) | ||
143 | /* | ||
144 | * Wait for pdu to complete. | ||
145 | */ | ||
146 | - qemu_co_queue_wait(&cancel_pdu->complete); | ||
147 | + qemu_co_queue_wait(&cancel_pdu->complete, NULL); | ||
148 | cancel_pdu->cancelled = 0; | ||
149 | pdu_free(cancel_pdu); | ||
150 | } | ||
151 | diff --git a/util/qemu-coroutine-lock.c b/util/qemu-coroutine-lock.c | ||
152 | index XXXXXXX..XXXXXXX 100644 | ||
153 | --- a/util/qemu-coroutine-lock.c | ||
154 | +++ b/util/qemu-coroutine-lock.c | ||
155 | @@ -XXX,XX +XXX,XX @@ void qemu_co_queue_init(CoQueue *queue) | ||
156 | QSIMPLEQ_INIT(&queue->entries); | ||
157 | } | ||
158 | |||
159 | -void coroutine_fn qemu_co_queue_wait(CoQueue *queue) | ||
160 | +void coroutine_fn qemu_co_queue_wait(CoQueue *queue, CoMutex *mutex) | ||
161 | { | ||
162 | Coroutine *self = qemu_coroutine_self(); | ||
163 | QSIMPLEQ_INSERT_TAIL(&queue->entries, self, co_queue_next); | ||
36 | + | 164 | + |
37 | +typedef struct UfsBus { | 165 | + if (mutex) { |
38 | + SCSIBus parent_bus; | 166 | + qemu_co_mutex_unlock(mutex); |
39 | +} UfsBus; | ||
40 | + | ||
41 | +#define TYPE_UFS_BUS "ufs-bus" | ||
42 | +DECLARE_OBJ_CHECKERS(UfsBus, UfsBusClass, UFS_BUS, TYPE_UFS_BUS) | ||
43 | + | ||
44 | typedef enum UfsRequestState { | ||
45 | UFS_REQUEST_IDLE = 0, | ||
46 | UFS_REQUEST_READY = 1, | ||
47 | @@ -XXX,XX +XXX,XX @@ typedef enum UfsRequestState { | ||
48 | typedef enum UfsReqResult { | ||
49 | UFS_REQUEST_SUCCESS = 0, | ||
50 | UFS_REQUEST_FAIL = 1, | ||
51 | + UFS_REQUEST_NO_COMPLETE = 2, | ||
52 | } UfsReqResult; | ||
53 | |||
54 | typedef struct UfsRequest { | ||
55 | @@ -XXX,XX +XXX,XX @@ typedef struct UfsRequest { | ||
56 | QEMUSGList *sg; | ||
57 | } UfsRequest; | ||
58 | |||
59 | +typedef struct UfsLu { | ||
60 | + SCSIDevice qdev; | ||
61 | + uint8_t lun; | ||
62 | + UnitDescriptor unit_desc; | ||
63 | +} UfsLu; | ||
64 | + | ||
65 | +typedef struct UfsWLu { | ||
66 | + SCSIDevice qdev; | ||
67 | + uint8_t lun; | ||
68 | +} UfsWLu; | ||
69 | + | ||
70 | typedef struct UfsParams { | ||
71 | char *serial; | ||
72 | uint8_t nutrs; /* Number of UTP Transfer Request Slots */ | ||
73 | @@ -XXX,XX +XXX,XX @@ typedef struct UfsParams { | ||
74 | |||
75 | typedef struct UfsHc { | ||
76 | PCIDevice parent_obj; | ||
77 | + UfsBus bus; | ||
78 | MemoryRegion iomem; | ||
79 | UfsReg reg; | ||
80 | UfsParams params; | ||
81 | uint32_t reg_size; | ||
82 | UfsRequest *req_list; | ||
83 | |||
84 | + UfsLu *lus[UFS_MAX_LUS]; | ||
85 | + UfsWLu *report_wlu; | ||
86 | + UfsWLu *dev_wlu; | ||
87 | + UfsWLu *boot_wlu; | ||
88 | + UfsWLu *rpmb_wlu; | ||
89 | DeviceDescriptor device_desc; | ||
90 | GeometryDescriptor geometry_desc; | ||
91 | Attributes attributes; | ||
92 | @@ -XXX,XX +XXX,XX @@ typedef struct UfsHc { | ||
93 | #define TYPE_UFS "ufs" | ||
94 | #define UFS(obj) OBJECT_CHECK(UfsHc, (obj), TYPE_UFS) | ||
95 | |||
96 | +#define TYPE_UFS_LU "ufs-lu" | ||
97 | +#define UFSLU(obj) OBJECT_CHECK(UfsLu, (obj), TYPE_UFS_LU) | ||
98 | + | ||
99 | +#define TYPE_UFS_WLU "ufs-wlu" | ||
100 | +#define UFSWLU(obj) OBJECT_CHECK(UfsWLu, (obj), TYPE_UFS_WLU) | ||
101 | + | ||
102 | typedef enum UfsQueryFlagPerm { | ||
103 | UFS_QUERY_FLAG_NONE = 0x0, | ||
104 | UFS_QUERY_FLAG_READ = 0x1, | ||
105 | @@ -XXX,XX +XXX,XX @@ typedef enum UfsQueryAttrPerm { | ||
106 | UFS_QUERY_ATTR_WRITE = 0x2, | ||
107 | } UfsQueryAttrPerm; | ||
108 | |||
109 | +static inline bool is_wlun(uint8_t lun) | ||
110 | +{ | ||
111 | + return (lun == UFS_UPIU_REPORT_LUNS_WLUN || | ||
112 | + lun == UFS_UPIU_UFS_DEVICE_WLUN || lun == UFS_UPIU_BOOT_WLUN || | ||
113 | + lun == UFS_UPIU_RPMB_WLUN); | ||
114 | +} | ||
115 | + | ||
116 | #endif /* HW_UFS_UFS_H */ | ||
117 | diff --git a/include/scsi/constants.h b/include/scsi/constants.h | ||
118 | index XXXXXXX..XXXXXXX 100644 | ||
119 | --- a/include/scsi/constants.h | ||
120 | +++ b/include/scsi/constants.h | ||
121 | @@ -XXX,XX +XXX,XX @@ | ||
122 | #define MODE_PAGE_FLEXIBLE_DISK_GEOMETRY 0x05 | ||
123 | #define MODE_PAGE_CACHING 0x08 | ||
124 | #define MODE_PAGE_AUDIO_CTL 0x0e | ||
125 | +#define MODE_PAGE_CONTROL 0x0a | ||
126 | #define MODE_PAGE_POWER 0x1a | ||
127 | #define MODE_PAGE_FAULT_FAIL 0x1c | ||
128 | #define MODE_PAGE_TO_PROTECT 0x1d | ||
129 | diff --git a/hw/ufs/lu.c b/hw/ufs/lu.c | ||
130 | new file mode 100644 | ||
131 | index XXXXXXX..XXXXXXX | ||
132 | --- /dev/null | ||
133 | +++ b/hw/ufs/lu.c | ||
134 | @@ -XXX,XX +XXX,XX @@ | ||
135 | +/* | ||
136 | + * QEMU UFS Logical Unit | ||
137 | + * | ||
138 | + * Copyright (c) 2023 Samsung Electronics Co., Ltd. All rights reserved. | ||
139 | + * | ||
140 | + * Written by Jeuk Kim <jeuk20.kim@samsung.com> | ||
141 | + * | ||
142 | + * This code is licensed under the GNU GPL v2 or later. | ||
143 | + */ | ||
144 | + | ||
145 | +#include "qemu/osdep.h" | ||
146 | +#include "qemu/units.h" | ||
147 | +#include "qapi/error.h" | ||
148 | +#include "qemu/memalign.h" | ||
149 | +#include "hw/scsi/scsi.h" | ||
150 | +#include "scsi/constants.h" | ||
151 | +#include "sysemu/block-backend.h" | ||
152 | +#include "qemu/cutils.h" | ||
153 | +#include "trace.h" | ||
154 | +#include "ufs.h" | ||
155 | + | ||
156 | +/* | ||
157 | + * The code below handling SCSI commands is copied from hw/scsi/scsi-disk.c, | ||
158 | + * with minor adjustments to make it work for UFS. | ||
159 | + */ | ||
160 | + | ||
161 | +#define SCSI_DMA_BUF_SIZE (128 * KiB) | ||
162 | +#define SCSI_MAX_INQUIRY_LEN 256 | ||
163 | +#define SCSI_INQUIRY_DATA_SIZE 36 | ||
164 | +#define SCSI_MAX_MODE_LEN 256 | ||
165 | + | ||
166 | +typedef struct UfsSCSIReq { | ||
167 | + SCSIRequest req; | ||
168 | + /* Both sector and sector_count are in terms of BDRV_SECTOR_SIZE bytes. */ | ||
169 | + uint64_t sector; | ||
170 | + uint32_t sector_count; | ||
171 | + uint32_t buflen; | ||
172 | + bool started; | ||
173 | + bool need_fua_emulation; | ||
174 | + struct iovec iov; | ||
175 | + QEMUIOVector qiov; | ||
176 | + BlockAcctCookie acct; | ||
177 | +} UfsSCSIReq; | ||
178 | + | ||
179 | +static void ufs_scsi_free_request(SCSIRequest *req) | ||
180 | +{ | ||
181 | + UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req); | ||
182 | + | ||
183 | + qemu_vfree(r->iov.iov_base); | ||
184 | +} | ||
185 | + | ||
186 | +static void scsi_check_condition(UfsSCSIReq *r, SCSISense sense) | ||
187 | +{ | ||
188 | + trace_ufs_scsi_check_condition(r->req.tag, sense.key, sense.asc, | ||
189 | + sense.ascq); | ||
190 | + scsi_req_build_sense(&r->req, sense); | ||
191 | + scsi_req_complete(&r->req, CHECK_CONDITION); | ||
192 | +} | ||
193 | + | ||
194 | +static int ufs_scsi_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf, | ||
195 | + uint32_t outbuf_len) | ||
196 | +{ | ||
197 | + UfsHc *u = UFS(req->bus->qbus.parent); | ||
198 | + UfsLu *lu = DO_UPCAST(UfsLu, qdev, req->dev); | ||
199 | + uint8_t page_code = req->cmd.buf[2]; | ||
200 | + int start, buflen = 0; | ||
201 | + | ||
202 | + if (outbuf_len < SCSI_INQUIRY_DATA_SIZE) { | ||
203 | + return -1; | ||
204 | + } | 167 | + } |
205 | + | 168 | + |
206 | + outbuf[buflen++] = lu->qdev.type & 0x1f; | 169 | + /* There is no race condition here. Other threads will call |
207 | + outbuf[buflen++] = page_code; | 170 | + * aio_co_schedule on our AioContext, which can reenter this |
208 | + outbuf[buflen++] = 0x00; | 171 | + * coroutine but only after this yield and after the main loop |
209 | + outbuf[buflen++] = 0x00; | 172 | + * has gone through the next iteration. |
210 | + start = buflen; | 173 | + */ |
174 | qemu_coroutine_yield(); | ||
175 | assert(qemu_in_coroutine()); | ||
211 | + | 176 | + |
212 | + switch (page_code) { | 177 | + /* TODO: OSv implements wait morphing here, where the wakeup |
213 | + case 0x00: /* Supported page codes, mandatory */ | 178 | + * primitive automatically places the woken coroutine on the |
214 | + { | 179 | + * mutex's queue. This avoids the thundering herd effect. |
215 | + trace_ufs_scsi_emulate_vpd_page_00(req->cmd.xfer); | ||
216 | + outbuf[buflen++] = 0x00; /* list of supported pages (this page) */ | ||
217 | + if (u->params.serial) { | ||
218 | + outbuf[buflen++] = 0x80; /* unit serial number */ | ||
219 | + } | ||
220 | + outbuf[buflen++] = 0x87; /* mode page policy */ | ||
221 | + break; | ||
222 | + } | ||
223 | + case 0x80: /* Device serial number, optional */ | ||
224 | + { | ||
225 | + int l; | ||
226 | + | ||
227 | + if (!u->params.serial) { | ||
228 | + trace_ufs_scsi_emulate_vpd_page_80_not_supported(); | ||
229 | + return -1; | ||
230 | + } | ||
231 | + | ||
232 | + l = strlen(u->params.serial); | ||
233 | + if (l > SCSI_INQUIRY_DATA_SIZE) { | ||
234 | + l = SCSI_INQUIRY_DATA_SIZE; | ||
235 | + } | ||
236 | + | ||
237 | + trace_ufs_scsi_emulate_vpd_page_80(req->cmd.xfer); | ||
238 | + memcpy(outbuf + buflen, u->params.serial, l); | ||
239 | + buflen += l; | ||
240 | + break; | ||
241 | + } | ||
242 | + case 0x87: /* Mode Page Policy, mandatory */ | ||
243 | + { | ||
244 | + trace_ufs_scsi_emulate_vpd_page_87(req->cmd.xfer); | ||
245 | + outbuf[buflen++] = 0x3f; /* apply to all mode pages and subpages */ | ||
246 | + outbuf[buflen++] = 0xff; | ||
247 | + outbuf[buflen++] = 0; /* shared */ | ||
248 | + outbuf[buflen++] = 0; | ||
249 | + break; | ||
250 | + } | ||
251 | + default: | ||
252 | + return -1; | ||
253 | + } | ||
254 | + /* done with EVPD */ | ||
255 | + assert(buflen - start <= 255); | ||
256 | + outbuf[start - 1] = buflen - start; | ||
257 | + return buflen; | ||
258 | +} | ||
259 | + | ||
260 | +static int ufs_scsi_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf, | ||
261 | + uint32_t outbuf_len) | ||
262 | +{ | ||
263 | + int buflen = 0; | ||
264 | + | ||
265 | + if (outbuf_len < SCSI_INQUIRY_DATA_SIZE) { | ||
266 | + return -1; | ||
267 | + } | ||
268 | + | ||
269 | + if (req->cmd.buf[1] & 0x1) { | ||
270 | + /* Vital product data */ | ||
271 | + return ufs_scsi_emulate_vpd_page(req, outbuf, outbuf_len); | ||
272 | + } | ||
273 | + | ||
274 | + /* Standard INQUIRY data */ | ||
275 | + if (req->cmd.buf[2] != 0) { | ||
276 | + return -1; | ||
277 | + } | ||
278 | + | ||
279 | + /* PAGE CODE == 0 */ | ||
280 | + buflen = req->cmd.xfer; | ||
281 | + if (buflen > SCSI_MAX_INQUIRY_LEN) { | ||
282 | + buflen = SCSI_MAX_INQUIRY_LEN; | ||
283 | + } | ||
284 | + | ||
285 | + if (is_wlun(req->lun)) { | ||
286 | + outbuf[0] = TYPE_WLUN; | ||
287 | + } else { | ||
288 | + outbuf[0] = 0; | ||
289 | + } | ||
290 | + outbuf[1] = 0; | ||
291 | + | ||
292 | + strpadcpy((char *)&outbuf[16], 16, "QEMU UFS", ' '); | ||
293 | + strpadcpy((char *)&outbuf[8], 8, "QEMU", ' '); | ||
294 | + | ||
295 | + memset(&outbuf[32], 0, 4); | ||
296 | + | ||
297 | + outbuf[2] = 0x06; /* SPC-4 */ | ||
298 | + outbuf[3] = 0x2; | ||
299 | + | ||
300 | + if (buflen > SCSI_INQUIRY_DATA_SIZE) { | ||
301 | + outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */ | ||
302 | + } else { | ||
303 | + /* | ||
304 | + * If the allocation length of CDB is too small, the additional | ||
305 | + * length is not adjusted | ||
306 | + */ | ||
307 | + outbuf[4] = SCSI_INQUIRY_DATA_SIZE - 5; | ||
308 | + } | ||
309 | + | ||
310 | + /* Support TCQ. */ | ||
311 | + outbuf[7] = req->bus->info->tcq ? 0x02 : 0; | ||
312 | + return buflen; | ||
313 | +} | ||
314 | + | ||
315 | +static int mode_sense_page(UfsLu *lu, int page, uint8_t **p_outbuf, | ||
316 | + int page_control) | ||
317 | +{ | ||
318 | + static const int mode_sense_valid[0x3f] = { | ||
319 | + [MODE_PAGE_CACHING] = 1, | ||
320 | + [MODE_PAGE_R_W_ERROR] = 1, | ||
321 | + [MODE_PAGE_CONTROL] = 1, | ||
322 | + }; | ||
323 | + | ||
324 | + uint8_t *p = *p_outbuf + 2; | ||
325 | + int length; | ||
326 | + | ||
327 | + assert(page < ARRAY_SIZE(mode_sense_valid)); | ||
328 | + if ((mode_sense_valid[page]) == 0) { | ||
329 | + return -1; | ||
330 | + } | ||
331 | + | ||
332 | + /* | ||
333 | + * If Changeable Values are requested, a mask denoting those mode parameters | ||
334 | + * that are changeable shall be returned. As we currently don't support | ||
335 | + * parameter changes via MODE_SELECT all bits are returned set to zero. | ||
336 | + * The buffer was already memset to zero by the caller of this function. | ||
337 | + */ | 180 | + */ |
338 | + switch (page) { | 181 | + if (mutex) { |
339 | + case MODE_PAGE_CACHING: | 182 | + qemu_co_mutex_lock(mutex); |
340 | + length = 0x12; | ||
341 | + if (page_control == 1 || /* Changeable Values */ | ||
342 | + blk_enable_write_cache(lu->qdev.conf.blk)) { | ||
343 | + p[0] = 4; /* WCE */ | ||
344 | + } | ||
345 | + break; | ||
346 | + | ||
347 | + case MODE_PAGE_R_W_ERROR: | ||
348 | + length = 10; | ||
349 | + if (page_control == 1) { /* Changeable Values */ | ||
350 | + break; | ||
351 | + } | ||
352 | + p[0] = 0x80; /* Automatic Write Reallocation Enabled */ | ||
353 | + break; | ||
354 | + | ||
355 | + case MODE_PAGE_CONTROL: | ||
356 | + length = 10; | ||
357 | + if (page_control == 1) { /* Changeable Values */ | ||
358 | + break; | ||
359 | + } | ||
360 | + p[1] = 0x10; /* Queue Algorithm modifier */ | ||
361 | + p[8] = 0xff; /* Busy Timeout Period */ | ||
362 | + p[9] = 0xff; | ||
363 | + break; | ||
364 | + | ||
365 | + default: | ||
366 | + return -1; | ||
367 | + } | ||
368 | + | ||
369 | + assert(length < 256); | ||
370 | + (*p_outbuf)[0] = page; | ||
371 | + (*p_outbuf)[1] = length; | ||
372 | + *p_outbuf += length + 2; | ||
373 | + return length + 2; | ||
374 | +} | ||
375 | + | ||
376 | +static int ufs_scsi_emulate_mode_sense(UfsSCSIReq *r, uint8_t *outbuf) | ||
377 | +{ | ||
378 | + UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); | ||
379 | + bool dbd; | ||
380 | + int page, buflen, ret, page_control; | ||
381 | + uint8_t *p; | ||
382 | + uint8_t dev_specific_param = 0; | ||
383 | + | ||
384 | + dbd = (r->req.cmd.buf[1] & 0x8) != 0; | ||
385 | + if (!dbd) { | ||
386 | + return -1; | ||
387 | + } | ||
388 | + | ||
389 | + page = r->req.cmd.buf[2] & 0x3f; | ||
390 | + page_control = (r->req.cmd.buf[2] & 0xc0) >> 6; | ||
391 | + | ||
392 | + trace_ufs_scsi_emulate_mode_sense((r->req.cmd.buf[0] == MODE_SENSE) ? 6 : | ||
393 | + 10, | ||
394 | + page, r->req.cmd.xfer, page_control); | ||
395 | + memset(outbuf, 0, r->req.cmd.xfer); | ||
396 | + p = outbuf; | ||
397 | + | ||
398 | + if (!blk_is_writable(lu->qdev.conf.blk)) { | ||
399 | + dev_specific_param |= 0x80; /* Readonly. */ | ||
400 | + } | ||
401 | + | ||
402 | + p[2] = 0; /* Medium type. */ | ||
403 | + p[3] = dev_specific_param; | ||
404 | + p[6] = p[7] = 0; /* Block descriptor length. */ | ||
405 | + p += 8; | ||
406 | + | ||
407 | + if (page_control == 3) { | ||
408 | + /* Saved Values */ | ||
409 | + scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED)); | ||
410 | + return -1; | ||
411 | + } | ||
412 | + | ||
413 | + if (page == 0x3f) { | ||
414 | + for (page = 0; page <= 0x3e; page++) { | ||
415 | + mode_sense_page(lu, page, &p, page_control); | ||
416 | + } | ||
417 | + } else { | ||
418 | + ret = mode_sense_page(lu, page, &p, page_control); | ||
419 | + if (ret == -1) { | ||
420 | + return -1; | ||
421 | + } | ||
422 | + } | ||
423 | + | ||
424 | + buflen = p - outbuf; | ||
425 | + /* | ||
426 | + * The mode data length field specifies the length in bytes of the | ||
427 | + * following data that is available to be transferred. The mode data | ||
428 | + * length does not include itself. | ||
429 | + */ | ||
430 | + outbuf[0] = ((buflen - 2) >> 8) & 0xff; | ||
431 | + outbuf[1] = (buflen - 2) & 0xff; | ||
432 | + return buflen; | ||
433 | +} | ||
434 | + | ||
435 | +/* | ||
436 | + * scsi_handle_rw_error has two return values. False means that the error | ||
437 | + * must be ignored, true means that the error has been processed and the | ||
438 | + * caller should not do anything else for this request. Note that | ||
439 | + * scsi_handle_rw_error always manages its reference counts, independent | ||
440 | + * of the return value. | ||
441 | + */ | ||
442 | +static bool scsi_handle_rw_error(UfsSCSIReq *r, int ret, bool acct_failed) | ||
443 | +{ | ||
444 | + bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV); | ||
445 | + UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); | ||
446 | + SCSISense sense = SENSE_CODE(NO_SENSE); | ||
447 | + int error = 0; | ||
448 | + bool req_has_sense = false; | ||
449 | + BlockErrorAction action; | ||
450 | + int status; | ||
451 | + | ||
452 | + if (ret < 0) { | ||
453 | + status = scsi_sense_from_errno(-ret, &sense); | ||
454 | + error = -ret; | ||
455 | + } else { | ||
456 | + /* A passthrough command has completed with nonzero status. */ | ||
457 | + status = ret; | ||
458 | + if (status == CHECK_CONDITION) { | ||
459 | + req_has_sense = true; | ||
460 | + error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense)); | ||
461 | + } else { | ||
462 | + error = EINVAL; | ||
463 | + } | ||
464 | + } | ||
465 | + | ||
466 | + /* | ||
467 | + * Check whether the error has to be handled by the guest or should | ||
468 | + * rather follow the rerror=/werror= settings. Guest-handled errors | ||
469 | + * are usually retried immediately, so do not post them to QMP and | ||
470 | + * do not account them as failed I/O. | ||
471 | + */ | ||
472 | + if (req_has_sense && scsi_sense_buf_is_guest_recoverable( | ||
473 | + r->req.sense, sizeof(r->req.sense))) { | ||
474 | + action = BLOCK_ERROR_ACTION_REPORT; | ||
475 | + acct_failed = false; | ||
476 | + } else { | ||
477 | + action = blk_get_error_action(lu->qdev.conf.blk, is_read, error); | ||
478 | + blk_error_action(lu->qdev.conf.blk, action, is_read, error); | ||
479 | + } | ||
480 | + | ||
481 | + switch (action) { | ||
482 | + case BLOCK_ERROR_ACTION_REPORT: | ||
483 | + if (acct_failed) { | ||
484 | + block_acct_failed(blk_get_stats(lu->qdev.conf.blk), &r->acct); | ||
485 | + } | ||
486 | + if (!req_has_sense && status == CHECK_CONDITION) { | ||
487 | + scsi_req_build_sense(&r->req, sense); | ||
488 | + } | ||
489 | + scsi_req_complete(&r->req, status); | ||
490 | + return true; | ||
491 | + | ||
492 | + case BLOCK_ERROR_ACTION_IGNORE: | ||
493 | + return false; | ||
494 | + | ||
495 | + case BLOCK_ERROR_ACTION_STOP: | ||
496 | + scsi_req_retry(&r->req); | ||
497 | + return true; | ||
498 | + | ||
499 | + default: | ||
500 | + g_assert_not_reached(); | ||
501 | + } | ||
502 | +} | ||
503 | + | ||
504 | +static bool ufs_scsi_req_check_error(UfsSCSIReq *r, int ret, bool acct_failed) | ||
505 | +{ | ||
506 | + if (r->req.io_canceled) { | ||
507 | + scsi_req_cancel_complete(&r->req); | ||
508 | + return true; | ||
509 | + } | ||
510 | + | ||
511 | + if (ret < 0) { | ||
512 | + return scsi_handle_rw_error(r, ret, acct_failed); | ||
513 | + } | ||
514 | + | ||
515 | + return false; | ||
516 | +} | ||
517 | + | ||
518 | +static void scsi_aio_complete(void *opaque, int ret) | ||
519 | +{ | ||
520 | + UfsSCSIReq *r = (UfsSCSIReq *)opaque; | ||
521 | + UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); | ||
522 | + | ||
523 | + assert(r->req.aiocb != NULL); | ||
524 | + r->req.aiocb = NULL; | ||
525 | + aio_context_acquire(blk_get_aio_context(lu->qdev.conf.blk)); | ||
526 | + if (ufs_scsi_req_check_error(r, ret, true)) { | ||
527 | + goto done; | ||
528 | + } | ||
529 | + | ||
530 | + block_acct_done(blk_get_stats(lu->qdev.conf.blk), &r->acct); | ||
531 | + scsi_req_complete(&r->req, GOOD); | ||
532 | + | ||
533 | +done: | ||
534 | + aio_context_release(blk_get_aio_context(lu->qdev.conf.blk)); | ||
535 | + scsi_req_unref(&r->req); | ||
536 | +} | ||
537 | + | ||
538 | +static int32_t ufs_scsi_emulate_command(SCSIRequest *req, uint8_t *buf) | ||
539 | +{ | ||
540 | + UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req); | ||
541 | + UfsLu *lu = DO_UPCAST(UfsLu, qdev, req->dev); | ||
542 | + uint32_t last_block = 0; | ||
543 | + uint8_t *outbuf; | ||
544 | + int buflen; | ||
545 | + | ||
546 | + switch (req->cmd.buf[0]) { | ||
547 | + case INQUIRY: | ||
548 | + case MODE_SENSE_10: | ||
549 | + case START_STOP: | ||
550 | + case REQUEST_SENSE: | ||
551 | + break; | ||
552 | + | ||
553 | + default: | ||
554 | + if (!blk_is_available(lu->qdev.conf.blk)) { | ||
555 | + scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); | ||
556 | + return 0; | ||
557 | + } | ||
558 | + break; | ||
559 | + } | ||
560 | + | ||
561 | + /* | ||
562 | + * FIXME: we shouldn't return anything bigger than 4k, but the code | ||
563 | + * requires the buffer to be as big as req->cmd.xfer in several | ||
564 | + * places. So, do not allow CDBs with a very large ALLOCATION | ||
565 | + * LENGTH. The real fix would be to modify scsi_read_data and | ||
566 | + * dma_buf_read, so that they return data beyond the buflen | ||
567 | + * as all zeros. | ||
568 | + */ | ||
569 | + if (req->cmd.xfer > 65536) { | ||
570 | + goto illegal_request; | ||
571 | + } | ||
572 | + r->buflen = MAX(4096, req->cmd.xfer); | ||
573 | + | ||
574 | + if (!r->iov.iov_base) { | ||
575 | + r->iov.iov_base = blk_blockalign(lu->qdev.conf.blk, r->buflen); | ||
576 | + } | ||
577 | + | ||
578 | + outbuf = r->iov.iov_base; | ||
579 | + memset(outbuf, 0, r->buflen); | ||
580 | + switch (req->cmd.buf[0]) { | ||
581 | + case TEST_UNIT_READY: | ||
582 | + assert(blk_is_available(lu->qdev.conf.blk)); | ||
583 | + break; | ||
584 | + case INQUIRY: | ||
585 | + buflen = ufs_scsi_emulate_inquiry(req, outbuf, r->buflen); | ||
586 | + if (buflen < 0) { | ||
587 | + goto illegal_request; | ||
588 | + } | ||
589 | + break; | ||
590 | + case MODE_SENSE_10: | ||
591 | + buflen = ufs_scsi_emulate_mode_sense(r, outbuf); | ||
592 | + if (buflen < 0) { | ||
593 | + goto illegal_request; | ||
594 | + } | ||
595 | + break; | ||
596 | + case READ_CAPACITY_10: | ||
597 | + /* The normal LEN field for this command is zero. */ | ||
598 | + memset(outbuf, 0, 8); | ||
599 | + if (lu->qdev.max_lba > 0) { | ||
600 | + last_block = lu->qdev.max_lba - 1; | ||
601 | + }; | ||
602 | + outbuf[0] = (last_block >> 24) & 0xff; | ||
603 | + outbuf[1] = (last_block >> 16) & 0xff; | ||
604 | + outbuf[2] = (last_block >> 8) & 0xff; | ||
605 | + outbuf[3] = last_block & 0xff; | ||
606 | + outbuf[4] = (lu->qdev.blocksize >> 24) & 0xff; | ||
607 | + outbuf[5] = (lu->qdev.blocksize >> 16) & 0xff; | ||
608 | + outbuf[6] = (lu->qdev.blocksize >> 8) & 0xff; | ||
609 | + outbuf[7] = lu->qdev.blocksize & 0xff; | ||
610 | + break; | ||
611 | + case REQUEST_SENSE: | ||
612 | + /* Just return "NO SENSE". */ | ||
613 | + buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen, | ||
614 | + (req->cmd.buf[1] & 1) == 0); | ||
615 | + if (buflen < 0) { | ||
616 | + goto illegal_request; | ||
617 | + } | ||
618 | + break; | ||
619 | + case SYNCHRONIZE_CACHE: | ||
620 | + /* The request is used as the AIO opaque value, so add a ref. */ | ||
621 | + scsi_req_ref(&r->req); | ||
622 | + block_acct_start(blk_get_stats(lu->qdev.conf.blk), &r->acct, 0, | ||
623 | + BLOCK_ACCT_FLUSH); | ||
624 | + r->req.aiocb = blk_aio_flush(lu->qdev.conf.blk, scsi_aio_complete, r); | ||
625 | + return 0; | ||
626 | + case VERIFY_10: | ||
627 | + trace_ufs_scsi_emulate_command_VERIFY((req->cmd.buf[1] >> 1) & 3); | ||
628 | + if (req->cmd.buf[1] & 6) { | ||
629 | + goto illegal_request; | ||
630 | + } | ||
631 | + break; | ||
632 | + case SERVICE_ACTION_IN_16: | ||
633 | + /* Service Action In subcommands. */ | ||
634 | + if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { | ||
635 | + trace_ufs_scsi_emulate_command_SAI_16(); | ||
636 | + memset(outbuf, 0, req->cmd.xfer); | ||
637 | + | ||
638 | + if (lu->qdev.max_lba > 0) { | ||
639 | + last_block = lu->qdev.max_lba - 1; | ||
640 | + }; | ||
641 | + outbuf[0] = 0; | ||
642 | + outbuf[1] = 0; | ||
643 | + outbuf[2] = 0; | ||
644 | + outbuf[3] = 0; | ||
645 | + outbuf[4] = (last_block >> 24) & 0xff; | ||
646 | + outbuf[5] = (last_block >> 16) & 0xff; | ||
647 | + outbuf[6] = (last_block >> 8) & 0xff; | ||
648 | + outbuf[7] = last_block & 0xff; | ||
649 | + outbuf[8] = (lu->qdev.blocksize >> 24) & 0xff; | ||
650 | + outbuf[9] = (lu->qdev.blocksize >> 16) & 0xff; | ||
651 | + outbuf[10] = (lu->qdev.blocksize >> 8) & 0xff; | ||
652 | + outbuf[11] = lu->qdev.blocksize & 0xff; | ||
653 | + outbuf[12] = 0; | ||
654 | + outbuf[13] = get_physical_block_exp(&lu->qdev.conf); | ||
655 | + | ||
656 | + if (lu->unit_desc.provisioning_type == 2 || | ||
657 | + lu->unit_desc.provisioning_type == 3) { | ||
658 | + outbuf[14] = 0x80; | ||
659 | + } | ||
660 | + /* Protection, exponent and lowest lba field left blank. */ | ||
661 | + break; | ||
662 | + } | ||
663 | + trace_ufs_scsi_emulate_command_SAI_unsupported(); | ||
664 | + goto illegal_request; | ||
665 | + case MODE_SELECT_10: | ||
666 | + trace_ufs_scsi_emulate_command_MODE_SELECT_10(r->req.cmd.xfer); | ||
667 | + break; | ||
668 | + case START_STOP: | ||
669 | + /* | ||
670 | + * TODO: START_STOP is not yet implemented. It always returns success. | ||
671 | + * Revisit it when ufs power management is implemented. | ||
672 | + */ | ||
673 | + trace_ufs_scsi_emulate_command_START_STOP(); | ||
674 | + break; | ||
675 | + case FORMAT_UNIT: | ||
676 | + trace_ufs_scsi_emulate_command_FORMAT_UNIT(); | ||
677 | + break; | ||
678 | + case SEND_DIAGNOSTIC: | ||
679 | + trace_ufs_scsi_emulate_command_SEND_DIAGNOSTIC(); | ||
680 | + break; | ||
681 | + default: | ||
682 | + trace_ufs_scsi_emulate_command_UNKNOWN(buf[0], | ||
683 | + scsi_command_name(buf[0])); | ||
684 | + scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE)); | ||
685 | + return 0; | ||
686 | + } | ||
687 | + assert(!r->req.aiocb); | ||
688 | + r->iov.iov_len = MIN(r->buflen, req->cmd.xfer); | ||
689 | + if (r->iov.iov_len == 0) { | ||
690 | + scsi_req_complete(&r->req, GOOD); | ||
691 | + } | ||
692 | + if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { | ||
693 | + assert(r->iov.iov_len == req->cmd.xfer); | ||
694 | + return -r->iov.iov_len; | ||
695 | + } else { | ||
696 | + return r->iov.iov_len; | ||
697 | + } | ||
698 | + | ||
699 | +illegal_request: | ||
700 | + if (r->req.status == -1) { | ||
701 | + scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); | ||
702 | + } | ||
703 | + return 0; | ||
704 | +} | ||
705 | + | ||
706 | +static void ufs_scsi_emulate_read_data(SCSIRequest *req) | ||
707 | +{ | ||
708 | + UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req); | ||
709 | + int buflen = r->iov.iov_len; | ||
710 | + | ||
711 | + if (buflen) { | ||
712 | + trace_ufs_scsi_emulate_read_data(buflen); | ||
713 | + r->iov.iov_len = 0; | ||
714 | + r->started = true; | ||
715 | + scsi_req_data(&r->req, buflen); | ||
716 | + return; | ||
717 | + } | ||
718 | + | ||
719 | + /* This also clears the sense buffer for REQUEST SENSE. */ | ||
720 | + scsi_req_complete(&r->req, GOOD); | ||
721 | +} | ||
722 | + | ||
723 | +static int ufs_scsi_check_mode_select(UfsLu *lu, int page, uint8_t *inbuf, | ||
724 | + int inlen) | ||
725 | +{ | ||
726 | + uint8_t mode_current[SCSI_MAX_MODE_LEN]; | ||
727 | + uint8_t mode_changeable[SCSI_MAX_MODE_LEN]; | ||
728 | + uint8_t *p; | ||
729 | + int len, expected_len, changeable_len, i; | ||
730 | + | ||
731 | + /* | ||
732 | + * The input buffer does not include the page header, so it is | ||
733 | + * off by 2 bytes. | ||
734 | + */ | ||
735 | + expected_len = inlen + 2; | ||
736 | + if (expected_len > SCSI_MAX_MODE_LEN) { | ||
737 | + return -1; | ||
738 | + } | ||
739 | + | ||
740 | + /* MODE_PAGE_ALLS is only valid for MODE SENSE commands */ | ||
741 | + if (page == MODE_PAGE_ALLS) { | ||
742 | + return -1; | ||
743 | + } | ||
744 | + | ||
745 | + p = mode_current; | ||
746 | + memset(mode_current, 0, inlen + 2); | ||
747 | + len = mode_sense_page(lu, page, &p, 0); | ||
748 | + if (len < 0 || len != expected_len) { | ||
749 | + return -1; | ||
750 | + } | ||
751 | + | ||
752 | + p = mode_changeable; | ||
753 | + memset(mode_changeable, 0, inlen + 2); | ||
754 | + changeable_len = mode_sense_page(lu, page, &p, 1); | ||
755 | + assert(changeable_len == len); | ||
756 | + | ||
757 | + /* | ||
758 | + * Check that unchangeable bits are the same as what MODE SENSE | ||
759 | + * would return. | ||
760 | + */ | ||
761 | + for (i = 2; i < len; i++) { | ||
762 | + if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) { | ||
763 | + return -1; | ||
764 | + } | ||
765 | + } | ||
766 | + return 0; | ||
767 | +} | ||
768 | + | ||
769 | +static void ufs_scsi_apply_mode_select(UfsLu *lu, int page, uint8_t *p) | ||
770 | +{ | ||
771 | + switch (page) { | ||
772 | + case MODE_PAGE_CACHING: | ||
773 | + blk_set_enable_write_cache(lu->qdev.conf.blk, (p[0] & 4) != 0); | ||
774 | + break; | ||
775 | + | ||
776 | + default: | ||
777 | + break; | ||
778 | + } | ||
779 | +} | ||
780 | + | ||
781 | +static int mode_select_pages(UfsSCSIReq *r, uint8_t *p, int len, bool change) | ||
782 | +{ | ||
783 | + UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); | ||
784 | + | ||
785 | + while (len > 0) { | ||
786 | + int page, page_len; | ||
787 | + | ||
788 | + page = p[0] & 0x3f; | ||
789 | + if (p[0] & 0x40) { | ||
790 | + goto invalid_param; | ||
791 | + } else { | ||
792 | + if (len < 2) { | ||
793 | + goto invalid_param_len; | ||
794 | + } | ||
795 | + page_len = p[1]; | ||
796 | + p += 2; | ||
797 | + len -= 2; | ||
798 | + } | ||
799 | + | ||
800 | + if (page_len > len) { | ||
801 | + goto invalid_param_len; | ||
802 | + } | ||
803 | + | ||
804 | + if (!change) { | ||
805 | + if (ufs_scsi_check_mode_select(lu, page, p, page_len) < 0) { | ||
806 | + goto invalid_param; | ||
807 | + } | ||
808 | + } else { | ||
809 | + ufs_scsi_apply_mode_select(lu, page, p); | ||
810 | + } | ||
811 | + | ||
812 | + p += page_len; | ||
813 | + len -= page_len; | ||
814 | + } | ||
815 | + return 0; | ||
816 | + | ||
817 | +invalid_param: | ||
818 | + scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); | ||
819 | + return -1; | ||
820 | + | ||
821 | +invalid_param_len: | ||
822 | + scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); | ||
823 | + return -1; | ||
824 | +} | ||
825 | + | ||
826 | +static void ufs_scsi_emulate_mode_select(UfsSCSIReq *r, uint8_t *inbuf) | ||
827 | +{ | ||
828 | + UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); | ||
829 | + uint8_t *p = inbuf; | ||
830 | + int len = r->req.cmd.xfer; | ||
831 | + int hdr_len = 8; | ||
832 | + int bd_len; | ||
833 | + int pass; | ||
834 | + | ||
835 | + /* We only support PF=1, SP=0. */ | ||
836 | + if ((r->req.cmd.buf[1] & 0x11) != 0x10) { | ||
837 | + goto invalid_field; | ||
838 | + } | ||
839 | + | ||
840 | + if (len < hdr_len) { | ||
841 | + goto invalid_param_len; | ||
842 | + } | ||
843 | + | ||
844 | + bd_len = lduw_be_p(&p[6]); | ||
845 | + if (bd_len != 0) { | ||
846 | + goto invalid_param; | ||
847 | + } | ||
848 | + | ||
849 | + len -= hdr_len; | ||
850 | + p += hdr_len; | ||
851 | + | ||
852 | + /* Ensure no change is made if there is an error! */ | ||
853 | + for (pass = 0; pass < 2; pass++) { | ||
854 | + if (mode_select_pages(r, p, len, pass == 1) < 0) { | ||
855 | + assert(pass == 0); | ||
856 | + return; | ||
857 | + } | ||
858 | + } | ||
859 | + | ||
860 | + if (!blk_enable_write_cache(lu->qdev.conf.blk)) { | ||
861 | + /* The request is used as the AIO opaque value, so add a ref. */ | ||
862 | + scsi_req_ref(&r->req); | ||
863 | + block_acct_start(blk_get_stats(lu->qdev.conf.blk), &r->acct, 0, | ||
864 | + BLOCK_ACCT_FLUSH); | ||
865 | + r->req.aiocb = blk_aio_flush(lu->qdev.conf.blk, scsi_aio_complete, r); | ||
866 | + return; | ||
867 | + } | ||
868 | + | ||
869 | + scsi_req_complete(&r->req, GOOD); | ||
870 | + return; | ||
871 | + | ||
872 | +invalid_param: | ||
873 | + scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); | ||
874 | + return; | ||
875 | + | ||
876 | +invalid_param_len: | ||
877 | + scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); | ||
878 | + return; | ||
879 | + | ||
880 | +invalid_field: | ||
881 | + scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); | ||
882 | +} | ||
883 | + | ||
884 | +/* block_num and nb_blocks expected to be in qdev blocksize */ | ||
885 | +static inline bool check_lba_range(UfsLu *lu, uint64_t block_num, | ||
886 | + uint32_t nb_blocks) | ||
887 | +{ | ||
888 | + /* | ||
889 | + * The first line tests that no overflow happens when computing the last | ||
890 | + * block. The second line tests that the last accessed block is in | ||
891 | + * range. | ||
892 | + * | ||
893 | + * Careful, the computations should not underflow for nb_blocks == 0, | ||
894 | + * and a 0-block read to the first LBA beyond the end of device is | ||
895 | + * valid. | ||
896 | + */ | ||
897 | + return (block_num <= block_num + nb_blocks && | ||
898 | + block_num + nb_blocks <= lu->qdev.max_lba + 1); | ||
899 | +} | ||
900 | + | ||
901 | +static void ufs_scsi_emulate_write_data(SCSIRequest *req) | ||
902 | +{ | ||
903 | + UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req); | ||
904 | + | ||
905 | + if (r->iov.iov_len) { | ||
906 | + int buflen = r->iov.iov_len; | ||
907 | + trace_ufs_scsi_emulate_write_data(buflen); | ||
908 | + r->iov.iov_len = 0; | ||
909 | + scsi_req_data(&r->req, buflen); | ||
910 | + return; | ||
911 | + } | ||
912 | + | ||
913 | + switch (req->cmd.buf[0]) { | ||
914 | + case MODE_SELECT_10: | ||
915 | + /* This also clears the sense buffer for REQUEST SENSE. */ | ||
916 | + ufs_scsi_emulate_mode_select(r, r->iov.iov_base); | ||
917 | + break; | ||
918 | + default: | ||
919 | + abort(); | ||
920 | + } | ||
921 | +} | ||
922 | + | ||
923 | +/* Return a pointer to the data buffer. */ | ||
924 | +static uint8_t *ufs_scsi_get_buf(SCSIRequest *req) | ||
925 | +{ | ||
926 | + UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req); | ||
927 | + | ||
928 | + return (uint8_t *)r->iov.iov_base; | ||
929 | +} | ||
930 | + | ||
931 | +static int32_t ufs_scsi_dma_command(SCSIRequest *req, uint8_t *buf) | ||
932 | +{ | ||
933 | + UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req); | ||
934 | + UfsLu *lu = DO_UPCAST(UfsLu, qdev, req->dev); | ||
935 | + uint32_t len; | ||
936 | + uint8_t command; | ||
937 | + | ||
938 | + command = buf[0]; | ||
939 | + | ||
940 | + if (!blk_is_available(lu->qdev.conf.blk)) { | ||
941 | + scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); | ||
942 | + return 0; | ||
943 | + } | ||
944 | + | ||
945 | + len = scsi_data_cdb_xfer(r->req.cmd.buf); | ||
946 | + switch (command) { | ||
947 | + case READ_6: | ||
948 | + case READ_10: | ||
949 | + trace_ufs_scsi_dma_command_READ(r->req.cmd.lba, len); | ||
950 | + if (r->req.cmd.buf[1] & 0xe0) { | ||
951 | + goto illegal_request; | ||
952 | + } | ||
953 | + if (!check_lba_range(lu, r->req.cmd.lba, len)) { | ||
954 | + goto illegal_lba; | ||
955 | + } | ||
956 | + r->sector = r->req.cmd.lba * (lu->qdev.blocksize / BDRV_SECTOR_SIZE); | ||
957 | + r->sector_count = len * (lu->qdev.blocksize / BDRV_SECTOR_SIZE); | ||
958 | + break; | ||
959 | + case WRITE_6: | ||
960 | + case WRITE_10: | ||
961 | + trace_ufs_scsi_dma_command_WRITE(r->req.cmd.lba, len); | ||
962 | + if (!blk_is_writable(lu->qdev.conf.blk)) { | ||
963 | + scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); | ||
964 | + return 0; | ||
965 | + } | ||
966 | + if (r->req.cmd.buf[1] & 0xe0) { | ||
967 | + goto illegal_request; | ||
968 | + } | ||
969 | + if (!check_lba_range(lu, r->req.cmd.lba, len)) { | ||
970 | + goto illegal_lba; | ||
971 | + } | ||
972 | + r->sector = r->req.cmd.lba * (lu->qdev.blocksize / BDRV_SECTOR_SIZE); | ||
973 | + r->sector_count = len * (lu->qdev.blocksize / BDRV_SECTOR_SIZE); | ||
974 | + break; | ||
975 | + default: | ||
976 | + abort(); | ||
977 | + illegal_request: | ||
978 | + scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); | ||
979 | + return 0; | ||
980 | + illegal_lba: | ||
981 | + scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); | ||
982 | + return 0; | ||
983 | + } | ||
984 | + r->need_fua_emulation = ((r->req.cmd.buf[1] & 8) != 0); | ||
985 | + if (r->sector_count == 0) { | ||
986 | + scsi_req_complete(&r->req, GOOD); | ||
987 | + } | ||
988 | + assert(r->iov.iov_len == 0); | ||
989 | + if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { | ||
990 | + return -r->sector_count * BDRV_SECTOR_SIZE; | ||
991 | + } else { | ||
992 | + return r->sector_count * BDRV_SECTOR_SIZE; | ||
993 | + } | ||
994 | +} | ||
995 | + | ||
996 | +static void scsi_write_do_fua(UfsSCSIReq *r) | ||
997 | +{ | ||
998 | + UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); | ||
999 | + | ||
1000 | + assert(r->req.aiocb == NULL); | ||
1001 | + assert(!r->req.io_canceled); | ||
1002 | + | ||
1003 | + if (r->need_fua_emulation) { | ||
1004 | + block_acct_start(blk_get_stats(lu->qdev.conf.blk), &r->acct, 0, | ||
1005 | + BLOCK_ACCT_FLUSH); | ||
1006 | + r->req.aiocb = blk_aio_flush(lu->qdev.conf.blk, scsi_aio_complete, r); | ||
1007 | + return; | ||
1008 | + } | ||
1009 | + | ||
1010 | + scsi_req_complete(&r->req, GOOD); | ||
1011 | + scsi_req_unref(&r->req); | ||
1012 | +} | ||
1013 | + | ||
1014 | +static void scsi_dma_complete_noio(UfsSCSIReq *r, int ret) | ||
1015 | +{ | ||
1016 | + assert(r->req.aiocb == NULL); | ||
1017 | + if (ufs_scsi_req_check_error(r, ret, false)) { | ||
1018 | + goto done; | ||
1019 | + } | ||
1020 | + | ||
1021 | + r->sector += r->sector_count; | ||
1022 | + r->sector_count = 0; | ||
1023 | + if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { | ||
1024 | + scsi_write_do_fua(r); | ||
1025 | + return; | ||
1026 | + } else { | ||
1027 | + scsi_req_complete(&r->req, GOOD); | ||
1028 | + } | ||
1029 | + | ||
1030 | +done: | ||
1031 | + scsi_req_unref(&r->req); | ||
1032 | +} | ||
1033 | + | ||
1034 | +static void scsi_dma_complete(void *opaque, int ret) | ||
1035 | +{ | ||
1036 | + UfsSCSIReq *r = (UfsSCSIReq *)opaque; | ||
1037 | + UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); | ||
1038 | + | ||
1039 | + assert(r->req.aiocb != NULL); | ||
1040 | + r->req.aiocb = NULL; | ||
1041 | + | ||
1042 | + aio_context_acquire(blk_get_aio_context(lu->qdev.conf.blk)); | ||
1043 | + if (ret < 0) { | ||
1044 | + block_acct_failed(blk_get_stats(lu->qdev.conf.blk), &r->acct); | ||
1045 | + } else { | ||
1046 | + block_acct_done(blk_get_stats(lu->qdev.conf.blk), &r->acct); | ||
1047 | + } | ||
1048 | + scsi_dma_complete_noio(r, ret); | ||
1049 | + aio_context_release(blk_get_aio_context(lu->qdev.conf.blk)); | ||
1050 | +} | ||
1051 | + | ||
1052 | +static BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov, | ||
1053 | + BlockCompletionFunc *cb, void *cb_opaque, | ||
1054 | + void *opaque) | ||
1055 | +{ | ||
1056 | + UfsSCSIReq *r = opaque; | ||
1057 | + UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); | ||
1058 | + return blk_aio_preadv(lu->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); | ||
1059 | +} | ||
1060 | + | ||
1061 | +static void scsi_init_iovec(UfsSCSIReq *r, size_t size) | ||
1062 | +{ | ||
1063 | + UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); | ||
1064 | + | ||
1065 | + if (!r->iov.iov_base) { | ||
1066 | + r->buflen = size; | ||
1067 | + r->iov.iov_base = blk_blockalign(lu->qdev.conf.blk, r->buflen); | ||
1068 | + } | ||
1069 | + r->iov.iov_len = MIN(r->sector_count * BDRV_SECTOR_SIZE, r->buflen); | ||
1070 | + qemu_iovec_init_external(&r->qiov, &r->iov, 1); | ||
1071 | +} | ||
1072 | + | ||
1073 | +static void scsi_read_complete_noio(UfsSCSIReq *r, int ret) | ||
1074 | +{ | ||
1075 | + uint32_t n; | ||
1076 | + | ||
1077 | + assert(r->req.aiocb == NULL); | ||
1078 | + if (ufs_scsi_req_check_error(r, ret, false)) { | ||
1079 | + goto done; | ||
1080 | + } | ||
1081 | + | ||
1082 | + n = r->qiov.size / BDRV_SECTOR_SIZE; | ||
1083 | + r->sector += n; | ||
1084 | + r->sector_count -= n; | ||
1085 | + scsi_req_data(&r->req, r->qiov.size); | ||
1086 | + | ||
1087 | +done: | ||
1088 | + scsi_req_unref(&r->req); | ||
1089 | +} | ||
1090 | + | ||
1091 | +static void scsi_read_complete(void *opaque, int ret) | ||
1092 | +{ | ||
1093 | + UfsSCSIReq *r = (UfsSCSIReq *)opaque; | ||
1094 | + UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); | ||
1095 | + | ||
1096 | + assert(r->req.aiocb != NULL); | ||
1097 | + r->req.aiocb = NULL; | ||
1098 | + trace_ufs_scsi_read_data_count(r->sector_count); | ||
1099 | + aio_context_acquire(blk_get_aio_context(lu->qdev.conf.blk)); | ||
1100 | + if (ret < 0) { | ||
1101 | + block_acct_failed(blk_get_stats(lu->qdev.conf.blk), &r->acct); | ||
1102 | + } else { | ||
1103 | + block_acct_done(blk_get_stats(lu->qdev.conf.blk), &r->acct); | ||
1104 | + trace_ufs_scsi_read_complete(r->req.tag, r->qiov.size); | ||
1105 | + } | ||
1106 | + scsi_read_complete_noio(r, ret); | ||
1107 | + aio_context_release(blk_get_aio_context(lu->qdev.conf.blk)); | ||
1108 | +} | ||
1109 | + | ||
1110 | +/* Actually issue a read to the block device. */ | ||
1111 | +static void scsi_do_read(UfsSCSIReq *r, int ret) | ||
1112 | +{ | ||
1113 | + UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); | ||
1114 | + | ||
1115 | + assert(r->req.aiocb == NULL); | ||
1116 | + if (ufs_scsi_req_check_error(r, ret, false)) { | ||
1117 | + goto done; | ||
1118 | + } | ||
1119 | + | ||
1120 | + /* The request is used as the AIO opaque value, so add a ref. */ | ||
1121 | + scsi_req_ref(&r->req); | ||
1122 | + | ||
1123 | + if (r->req.sg) { | ||
1124 | + dma_acct_start(lu->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ); | ||
1125 | + r->req.residual -= r->req.sg->size; | ||
1126 | + r->req.aiocb = dma_blk_io( | ||
1127 | + blk_get_aio_context(lu->qdev.conf.blk), r->req.sg, | ||
1128 | + r->sector << BDRV_SECTOR_BITS, BDRV_SECTOR_SIZE, scsi_dma_readv, r, | ||
1129 | + scsi_dma_complete, r, DMA_DIRECTION_FROM_DEVICE); | ||
1130 | + } else { | ||
1131 | + scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); | ||
1132 | + block_acct_start(blk_get_stats(lu->qdev.conf.blk), &r->acct, | ||
1133 | + r->qiov.size, BLOCK_ACCT_READ); | ||
1134 | + r->req.aiocb = scsi_dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov, | ||
1135 | + scsi_read_complete, r, r); | ||
1136 | + } | ||
1137 | + | ||
1138 | +done: | ||
1139 | + scsi_req_unref(&r->req); | ||
1140 | +} | ||
1141 | + | ||
1142 | +static void scsi_do_read_cb(void *opaque, int ret) | ||
1143 | +{ | ||
1144 | + UfsSCSIReq *r = (UfsSCSIReq *)opaque; | ||
1145 | + UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); | ||
1146 | + | ||
1147 | + assert(r->req.aiocb != NULL); | ||
1148 | + r->req.aiocb = NULL; | ||
1149 | + | ||
1150 | + aio_context_acquire(blk_get_aio_context(lu->qdev.conf.blk)); | ||
1151 | + if (ret < 0) { | ||
1152 | + block_acct_failed(blk_get_stats(lu->qdev.conf.blk), &r->acct); | ||
1153 | + } else { | ||
1154 | + block_acct_done(blk_get_stats(lu->qdev.conf.blk), &r->acct); | ||
1155 | + } | ||
1156 | + scsi_do_read(opaque, ret); | ||
1157 | + aio_context_release(blk_get_aio_context(lu->qdev.conf.blk)); | ||
1158 | +} | ||
1159 | + | ||
1160 | +/* Read more data from scsi device into buffer. */ | ||
1161 | +static void scsi_read_data(SCSIRequest *req) | ||
1162 | +{ | ||
1163 | + UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req); | ||
1164 | + UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); | ||
1165 | + bool first; | ||
1166 | + | ||
1167 | + trace_ufs_scsi_read_data_count(r->sector_count); | ||
1168 | + if (r->sector_count == 0) { | ||
1169 | + /* This also clears the sense buffer for REQUEST SENSE. */ | ||
1170 | + scsi_req_complete(&r->req, GOOD); | ||
1171 | + return; | ||
1172 | + } | ||
1173 | + | ||
1174 | + /* No data transfer may already be in progress */ | ||
1175 | + assert(r->req.aiocb == NULL); | ||
1176 | + | ||
1177 | + /* The request is used as the AIO opaque value, so add a ref. */ | ||
1178 | + scsi_req_ref(&r->req); | ||
1179 | + if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { | ||
1180 | + trace_ufs_scsi_read_data_invalid(); | ||
1181 | + scsi_read_complete_noio(r, -EINVAL); | ||
1182 | + return; | ||
1183 | + } | ||
1184 | + | ||
1185 | + if (!blk_is_available(req->dev->conf.blk)) { | ||
1186 | + scsi_read_complete_noio(r, -ENOMEDIUM); | ||
1187 | + return; | ||
1188 | + } | ||
1189 | + | ||
1190 | + first = !r->started; | ||
1191 | + r->started = true; | ||
1192 | + if (first && r->need_fua_emulation) { | ||
1193 | + block_acct_start(blk_get_stats(lu->qdev.conf.blk), &r->acct, 0, | ||
1194 | + BLOCK_ACCT_FLUSH); | ||
1195 | + r->req.aiocb = blk_aio_flush(lu->qdev.conf.blk, scsi_do_read_cb, r); | ||
1196 | + } else { | ||
1197 | + scsi_do_read(r, 0); | ||
1198 | + } | ||
1199 | +} | ||
1200 | + | ||
1201 | +static void scsi_write_complete_noio(UfsSCSIReq *r, int ret) | ||
1202 | +{ | ||
1203 | + uint32_t n; | ||
1204 | + | ||
1205 | + assert(r->req.aiocb == NULL); | ||
1206 | + if (ufs_scsi_req_check_error(r, ret, false)) { | ||
1207 | + goto done; | ||
1208 | + } | ||
1209 | + | ||
1210 | + n = r->qiov.size / BDRV_SECTOR_SIZE; | ||
1211 | + r->sector += n; | ||
1212 | + r->sector_count -= n; | ||
1213 | + if (r->sector_count == 0) { | ||
1214 | + scsi_write_do_fua(r); | ||
1215 | + return; | ||
1216 | + } else { | ||
1217 | + scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); | ||
1218 | + trace_ufs_scsi_write_complete_noio(r->req.tag, r->qiov.size); | ||
1219 | + scsi_req_data(&r->req, r->qiov.size); | ||
1220 | + } | ||
1221 | + | ||
1222 | +done: | ||
1223 | + scsi_req_unref(&r->req); | ||
1224 | +} | ||
1225 | + | ||
1226 | +static void scsi_write_complete(void *opaque, int ret) | ||
1227 | +{ | ||
1228 | + UfsSCSIReq *r = (UfsSCSIReq *)opaque; | ||
1229 | + UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); | ||
1230 | + | ||
1231 | + assert(r->req.aiocb != NULL); | ||
1232 | + r->req.aiocb = NULL; | ||
1233 | + | ||
1234 | + aio_context_acquire(blk_get_aio_context(lu->qdev.conf.blk)); | ||
1235 | + if (ret < 0) { | ||
1236 | + block_acct_failed(blk_get_stats(lu->qdev.conf.blk), &r->acct); | ||
1237 | + } else { | ||
1238 | + block_acct_done(blk_get_stats(lu->qdev.conf.blk), &r->acct); | ||
1239 | + } | ||
1240 | + scsi_write_complete_noio(r, ret); | ||
1241 | + aio_context_release(blk_get_aio_context(lu->qdev.conf.blk)); | ||
1242 | +} | ||
1243 | + | ||
1244 | +static BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov, | ||
1245 | + BlockCompletionFunc *cb, void *cb_opaque, | ||
1246 | + void *opaque) | ||
1247 | +{ | ||
1248 | + UfsSCSIReq *r = opaque; | ||
1249 | + UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); | ||
1250 | + return blk_aio_pwritev(lu->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); | ||
1251 | +} | ||
1252 | + | ||
1253 | +static void scsi_write_data(SCSIRequest *req) | ||
1254 | +{ | ||
1255 | + UfsSCSIReq *r = DO_UPCAST(UfsSCSIReq, req, req); | ||
1256 | + UfsLu *lu = DO_UPCAST(UfsLu, qdev, r->req.dev); | ||
1257 | + | ||
1258 | + /* No data transfer may already be in progress */ | ||
1259 | + assert(r->req.aiocb == NULL); | ||
1260 | + | ||
1261 | + /* The request is used as the AIO opaque value, so add a ref. */ | ||
1262 | + scsi_req_ref(&r->req); | ||
1263 | + if (r->req.cmd.mode != SCSI_XFER_TO_DEV) { | ||
1264 | + trace_ufs_scsi_write_data_invalid(); | ||
1265 | + scsi_write_complete_noio(r, -EINVAL); | ||
1266 | + return; | ||
1267 | + } | ||
1268 | + | ||
1269 | + if (!r->req.sg && !r->qiov.size) { | ||
1270 | + /* Called for the first time. Ask the driver to send us more data. */ | ||
1271 | + r->started = true; | ||
1272 | + scsi_write_complete_noio(r, 0); | ||
1273 | + return; | ||
1274 | + } | ||
1275 | + if (!blk_is_available(req->dev->conf.blk)) { | ||
1276 | + scsi_write_complete_noio(r, -ENOMEDIUM); | ||
1277 | + return; | ||
1278 | + } | ||
1279 | + | ||
1280 | + if (r->req.sg) { | ||
1281 | + dma_acct_start(lu->qdev.conf.blk, &r->acct, r->req.sg, | ||
1282 | + BLOCK_ACCT_WRITE); | ||
1283 | + r->req.residual -= r->req.sg->size; | ||
1284 | + r->req.aiocb = dma_blk_io( | ||
1285 | + blk_get_aio_context(lu->qdev.conf.blk), r->req.sg, | ||
1286 | + r->sector << BDRV_SECTOR_BITS, BDRV_SECTOR_SIZE, scsi_dma_writev, r, | ||
1287 | + scsi_dma_complete, r, DMA_DIRECTION_TO_DEVICE); | ||
1288 | + } else { | ||
1289 | + block_acct_start(blk_get_stats(lu->qdev.conf.blk), &r->acct, | ||
1290 | + r->qiov.size, BLOCK_ACCT_WRITE); | ||
1291 | + r->req.aiocb = scsi_dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov, | ||
1292 | + scsi_write_complete, r, r); | ||
1293 | + } | ||
1294 | +} | ||
1295 | + | ||
1296 | +static const SCSIReqOps ufs_scsi_emulate_reqops = { | ||
1297 | + .size = sizeof(UfsSCSIReq), | ||
1298 | + .free_req = ufs_scsi_free_request, | ||
1299 | + .send_command = ufs_scsi_emulate_command, | ||
1300 | + .read_data = ufs_scsi_emulate_read_data, | ||
1301 | + .write_data = ufs_scsi_emulate_write_data, | ||
1302 | + .get_buf = ufs_scsi_get_buf, | ||
1303 | +}; | ||
1304 | + | ||
1305 | +static const SCSIReqOps ufs_scsi_dma_reqops = { | ||
1306 | + .size = sizeof(UfsSCSIReq), | ||
1307 | + .free_req = ufs_scsi_free_request, | ||
1308 | + .send_command = ufs_scsi_dma_command, | ||
1309 | + .read_data = scsi_read_data, | ||
1310 | + .write_data = scsi_write_data, | ||
1311 | + .get_buf = ufs_scsi_get_buf, | ||
1312 | +}; | ||
1313 | + | ||
1314 | +/* | ||
1315 | + * Following commands are not yet supported | ||
1316 | + * PRE_FETCH(10), | ||
1317 | + * UNMAP, | ||
1318 | + * WRITE_BUFFER, READ_BUFFER, | ||
1319 | + * SECURITY_PROTOCOL_IN, SECURITY_PROTOCOL_OUT | ||
1320 | + */ | ||
1321 | +static const SCSIReqOps *const ufs_scsi_reqops_dispatch[256] = { | ||
1322 | + [TEST_UNIT_READY] = &ufs_scsi_emulate_reqops, | ||
1323 | + [INQUIRY] = &ufs_scsi_emulate_reqops, | ||
1324 | + [MODE_SENSE_10] = &ufs_scsi_emulate_reqops, | ||
1325 | + [START_STOP] = &ufs_scsi_emulate_reqops, | ||
1326 | + [READ_CAPACITY_10] = &ufs_scsi_emulate_reqops, | ||
1327 | + [REQUEST_SENSE] = &ufs_scsi_emulate_reqops, | ||
1328 | + [SYNCHRONIZE_CACHE] = &ufs_scsi_emulate_reqops, | ||
1329 | + [MODE_SELECT_10] = &ufs_scsi_emulate_reqops, | ||
1330 | + [VERIFY_10] = &ufs_scsi_emulate_reqops, | ||
1331 | + [FORMAT_UNIT] = &ufs_scsi_emulate_reqops, | ||
1332 | + [SERVICE_ACTION_IN_16] = &ufs_scsi_emulate_reqops, | ||
1333 | + [SEND_DIAGNOSTIC] = &ufs_scsi_emulate_reqops, | ||
1334 | + | ||
1335 | + [READ_6] = &ufs_scsi_dma_reqops, | ||
1336 | + [READ_10] = &ufs_scsi_dma_reqops, | ||
1337 | + [WRITE_6] = &ufs_scsi_dma_reqops, | ||
1338 | + [WRITE_10] = &ufs_scsi_dma_reqops, | ||
1339 | +}; | ||
1340 | + | ||
1341 | +static SCSIRequest *scsi_new_request(SCSIDevice *dev, uint32_t tag, | ||
1342 | + uint32_t lun, uint8_t *buf, | ||
1343 | + void *hba_private) | ||
1344 | +{ | ||
1345 | + UfsLu *lu = DO_UPCAST(UfsLu, qdev, dev); | ||
1346 | + SCSIRequest *req; | ||
1347 | + const SCSIReqOps *ops; | ||
1348 | + uint8_t command; | ||
1349 | + | ||
1350 | + command = buf[0]; | ||
1351 | + ops = ufs_scsi_reqops_dispatch[command]; | ||
1352 | + if (!ops) { | ||
1353 | + ops = &ufs_scsi_emulate_reqops; | ||
1354 | + } | ||
1355 | + req = scsi_req_alloc(ops, &lu->qdev, tag, lun, hba_private); | ||
1356 | + | ||
1357 | + return req; | ||
1358 | +} | ||
1359 | + | ||
1360 | +static Property ufs_lu_props[] = { | ||
1361 | + DEFINE_PROP_DRIVE("drive", UfsLu, qdev.conf.blk), | ||
1362 | + DEFINE_PROP_END_OF_LIST(), | ||
1363 | +}; | ||
1364 | + | ||
1365 | +static bool ufs_lu_brdv_init(UfsLu *lu, Error **errp) | ||
1366 | +{ | ||
1367 | + SCSIDevice *dev = &lu->qdev; | ||
1368 | + bool read_only; | ||
1369 | + | ||
1370 | + if (!lu->qdev.conf.blk) { | ||
1371 | + error_setg(errp, "drive property not set"); | ||
1372 | + return false; | ||
1373 | + } | ||
1374 | + | ||
1375 | + if (!blkconf_blocksizes(&lu->qdev.conf, errp)) { | ||
1376 | + return false; | ||
1377 | + } | ||
1378 | + | ||
1379 | + if (blk_get_aio_context(lu->qdev.conf.blk) != qemu_get_aio_context() && | ||
1380 | + !lu->qdev.hba_supports_iothread) { | ||
1381 | + error_setg(errp, "HBA does not support iothreads"); | ||
1382 | + return false; | ||
1383 | + } | ||
1384 | + | ||
1385 | + read_only = !blk_supports_write_perm(lu->qdev.conf.blk); | ||
1386 | + | ||
1387 | + if (!blkconf_apply_backend_options(&dev->conf, read_only, | ||
1388 | + dev->type == TYPE_DISK, errp)) { | ||
1389 | + return false; | ||
1390 | + } | ||
1391 | + | ||
1392 | + if (blk_is_sg(lu->qdev.conf.blk)) { | ||
1393 | + error_setg(errp, "unwanted /dev/sg*"); | ||
1394 | + return false; | ||
1395 | + } | ||
1396 | + | ||
1397 | + blk_iostatus_enable(lu->qdev.conf.blk); | ||
1398 | + return true; | ||
1399 | +} | ||
1400 | + | ||
1401 | +static bool ufs_add_lu(UfsHc *u, UfsLu *lu, Error **errp) | ||
1402 | +{ | ||
1403 | + BlockBackend *blk = lu->qdev.conf.blk; | ||
1404 | + int64_t brdv_len = blk_getlength(blk); | ||
1405 | + uint64_t raw_dev_cap = | ||
1406 | + be64_to_cpu(u->geometry_desc.total_raw_device_capacity); | ||
1407 | + | ||
1408 | + if (u->device_desc.number_lu >= UFS_MAX_LUS) { | ||
1409 | + error_setg(errp, "ufs host controller has too many logical units."); | ||
1410 | + return false; | ||
1411 | + } | ||
1412 | + | ||
1413 | + if (u->lus[lu->lun] != NULL) { | ||
1414 | + error_setg(errp, "ufs logical unit %d already exists.", lu->lun); | ||
1415 | + return false; | ||
1416 | + } | ||
1417 | + | ||
1418 | + u->lus[lu->lun] = lu; | ||
1419 | + u->device_desc.number_lu++; | ||
1420 | + raw_dev_cap += (brdv_len >> UFS_GEOMETRY_CAPACITY_SHIFT); | ||
1421 | + u->geometry_desc.total_raw_device_capacity = cpu_to_be64(raw_dev_cap); | ||
1422 | + return true; | ||
1423 | +} | ||
1424 | + | ||
1425 | +static inline uint8_t ufs_log2(uint64_t input) | ||
1426 | +{ | ||
1427 | + int log = 0; | ||
1428 | + while (input >>= 1) { | ||
1429 | + log++; | ||
1430 | + } | ||
1431 | + return log; | ||
1432 | +} | ||
1433 | + | ||
1434 | +static void ufs_init_lu(UfsLu *lu) | ||
1435 | +{ | ||
1436 | + BlockBackend *blk = lu->qdev.conf.blk; | ||
1437 | + int64_t brdv_len = blk_getlength(blk); | ||
1438 | + | ||
1439 | + lu->lun = lu->qdev.lun; | ||
1440 | + memset(&lu->unit_desc, 0, sizeof(lu->unit_desc)); | ||
1441 | + lu->unit_desc.length = sizeof(UnitDescriptor); | ||
1442 | + lu->unit_desc.descriptor_idn = UFS_QUERY_DESC_IDN_UNIT; | ||
1443 | + lu->unit_desc.lu_enable = 0x01; | ||
1444 | + lu->unit_desc.logical_block_size = ufs_log2(lu->qdev.blocksize); | ||
1445 | + lu->unit_desc.unit_index = lu->qdev.lun; | ||
1446 | + lu->unit_desc.logical_block_count = | ||
1447 | + cpu_to_be64(brdv_len / (1 << lu->unit_desc.logical_block_size)); | ||
1448 | +} | ||
1449 | + | ||
1450 | +static bool ufs_lu_check_constraints(UfsLu *lu, Error **errp) | ||
1451 | +{ | ||
1452 | + if (!lu->qdev.conf.blk) { | ||
1453 | + error_setg(errp, "drive property not set"); | ||
1454 | + return false; | ||
1455 | + } | ||
1456 | + | ||
1457 | + if (lu->qdev.channel != 0) { | ||
1458 | + error_setg(errp, "ufs logical unit does not support channel"); | ||
1459 | + return false; | ||
1460 | + } | ||
1461 | + | ||
1462 | + if (lu->qdev.lun >= UFS_MAX_LUS) { | ||
1463 | + error_setg(errp, "lun must be between 1 and %d", UFS_MAX_LUS - 1); | ||
1464 | + return false; | ||
1465 | + } | ||
1466 | + | ||
1467 | + return true; | ||
1468 | +} | ||
1469 | + | ||
1470 | +static void ufs_lu_realize(SCSIDevice *dev, Error **errp) | ||
1471 | +{ | ||
1472 | + UfsLu *lu = DO_UPCAST(UfsLu, qdev, dev); | ||
1473 | + BusState *s = qdev_get_parent_bus(&dev->qdev); | ||
1474 | + UfsHc *u = UFS(s->parent); | ||
1475 | + AioContext *ctx = NULL; | ||
1476 | + uint64_t nb_sectors, nb_blocks; | ||
1477 | + | ||
1478 | + if (!ufs_lu_check_constraints(lu, errp)) { | ||
1479 | + return; | ||
1480 | + } | ||
1481 | + | ||
1482 | + if (lu->qdev.conf.blk) { | ||
1483 | + ctx = blk_get_aio_context(lu->qdev.conf.blk); | ||
1484 | + aio_context_acquire(ctx); | ||
1485 | + if (!blkconf_blocksizes(&lu->qdev.conf, errp)) { | ||
1486 | + goto out; | ||
1487 | + } | ||
1488 | + } | ||
1489 | + lu->qdev.blocksize = UFS_BLOCK_SIZE; | ||
1490 | + blk_get_geometry(lu->qdev.conf.blk, &nb_sectors); | ||
1491 | + nb_blocks = nb_sectors / (lu->qdev.blocksize / BDRV_SECTOR_SIZE); | ||
1492 | + if (nb_blocks > UINT32_MAX) { | ||
1493 | + nb_blocks = UINT32_MAX; | ||
1494 | + } | ||
1495 | + lu->qdev.max_lba = nb_blocks; | ||
1496 | + lu->qdev.type = TYPE_DISK; | ||
1497 | + | ||
1498 | + ufs_init_lu(lu); | ||
1499 | + if (!ufs_add_lu(u, lu, errp)) { | ||
1500 | + goto out; | ||
1501 | + } | ||
1502 | + | ||
1503 | + ufs_lu_brdv_init(lu, errp); | ||
1504 | +out: | ||
1505 | + if (ctx) { | ||
1506 | + aio_context_release(ctx); | ||
1507 | + } | ||
1508 | +} | ||
1509 | + | ||
1510 | +static void ufs_lu_unrealize(SCSIDevice *dev) | ||
1511 | +{ | ||
1512 | + UfsLu *lu = DO_UPCAST(UfsLu, qdev, dev); | ||
1513 | + | ||
1514 | + blk_drain(lu->qdev.conf.blk); | ||
1515 | +} | ||
1516 | + | ||
1517 | +static void ufs_wlu_realize(DeviceState *qdev, Error **errp) | ||
1518 | +{ | ||
1519 | + UfsWLu *wlu = UFSWLU(qdev); | ||
1520 | + SCSIDevice *dev = &wlu->qdev; | ||
1521 | + | ||
1522 | + if (!is_wlun(dev->lun)) { | ||
1523 | + error_setg(errp, "not well-known logical unit number"); | ||
1524 | + return; | ||
1525 | + } | ||
1526 | + | ||
1527 | + QTAILQ_INIT(&dev->requests); | ||
1528 | +} | ||
1529 | + | ||
1530 | +static void ufs_lu_class_init(ObjectClass *oc, void *data) | ||
1531 | +{ | ||
1532 | + DeviceClass *dc = DEVICE_CLASS(oc); | ||
1533 | + SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(oc); | ||
1534 | + | ||
1535 | + sc->realize = ufs_lu_realize; | ||
1536 | + sc->unrealize = ufs_lu_unrealize; | ||
1537 | + sc->alloc_req = scsi_new_request; | ||
1538 | + dc->bus_type = TYPE_UFS_BUS; | ||
1539 | + device_class_set_props(dc, ufs_lu_props); | ||
1540 | + dc->desc = "Virtual UFS logical unit"; | ||
1541 | +} | ||
1542 | + | ||
1543 | +static void ufs_wlu_class_init(ObjectClass *oc, void *data) | ||
1544 | +{ | ||
1545 | + DeviceClass *dc = DEVICE_CLASS(oc); | ||
1546 | + SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(oc); | ||
1547 | + | ||
1548 | + /* | ||
1549 | + * The realize() function of TYPE_SCSI_DEVICE causes a segmentation fault | ||
1550 | + * if a block drive does not exist. Define a new realize function for | ||
1551 | + * well-known LUs that do not have a block drive. | ||
1552 | + */ | ||
1553 | + dc->realize = ufs_wlu_realize; | ||
1554 | + sc->alloc_req = scsi_new_request; | ||
1555 | + dc->bus_type = TYPE_UFS_BUS; | ||
1556 | + dc->desc = "Virtual UFS well-known logical unit"; | ||
1557 | +} | ||
1558 | + | ||
1559 | +static const TypeInfo ufs_lu_info = { | ||
1560 | + .name = TYPE_UFS_LU, | ||
1561 | + .parent = TYPE_SCSI_DEVICE, | ||
1562 | + .class_init = ufs_lu_class_init, | ||
1563 | + .instance_size = sizeof(UfsLu), | ||
1564 | +}; | ||
1565 | + | ||
1566 | +static const TypeInfo ufs_wlu_info = { | ||
1567 | + .name = TYPE_UFS_WLU, | ||
1568 | + .parent = TYPE_SCSI_DEVICE, | ||
1569 | + .class_init = ufs_wlu_class_init, | ||
1570 | + .instance_size = sizeof(UfsWLu), | ||
1571 | +}; | ||
1572 | + | ||
1573 | +static void ufs_lu_register_types(void) | ||
1574 | +{ | ||
1575 | + type_register_static(&ufs_lu_info); | ||
1576 | + type_register_static(&ufs_wlu_info); | ||
1577 | +} | ||
1578 | + | ||
1579 | +type_init(ufs_lu_register_types) | ||
1580 | diff --git a/hw/ufs/ufs.c b/hw/ufs/ufs.c | ||
1581 | index XXXXXXX..XXXXXXX 100644 | ||
1582 | --- a/hw/ufs/ufs.c | ||
1583 | +++ b/hw/ufs/ufs.c | ||
1584 | @@ -XXX,XX +XXX,XX @@ | ||
1585 | * SPDX-License-Identifier: GPL-2.0-or-later | ||
1586 | */ | ||
1587 | |||
1588 | +/** | ||
1589 | + * Reference Specs: https://www.jedec.org/, 3.1 | ||
1590 | + * | ||
1591 | + * Usage | ||
1592 | + * ----- | ||
1593 | + * | ||
1594 | + * Add options: | ||
1595 | + * -drive file=<file>,if=none,id=<drive_id> | ||
1596 | + * -device ufs,serial=<serial>,id=<bus_name>, \ | ||
1597 | + * nutrs=<N[optional]>,nutmrs=<N[optional]> | ||
1598 | + * -device ufs-lu,drive=<drive_id>,bus=<bus_name> | ||
1599 | + */ | ||
1600 | + | ||
1601 | #include "qemu/osdep.h" | ||
1602 | #include "qapi/error.h" | ||
1603 | #include "migration/vmstate.h" | ||
1604 | @@ -XXX,XX +XXX,XX @@ static const MemoryRegionOps ufs_mmio_ops = { | ||
1605 | }, | ||
1606 | }; | ||
1607 | |||
1608 | +static QEMUSGList *ufs_get_sg_list(SCSIRequest *scsi_req) | ||
1609 | +{ | ||
1610 | + UfsRequest *req = scsi_req->hba_private; | ||
1611 | + return req->sg; | ||
1612 | +} | ||
1613 | + | ||
1614 | +static void ufs_build_upiu_sense_data(UfsRequest *req, SCSIRequest *scsi_req) | ||
1615 | +{ | ||
1616 | + req->rsp_upiu.sr.sense_data_len = cpu_to_be16(scsi_req->sense_len); | ||
1617 | + assert(scsi_req->sense_len <= SCSI_SENSE_LEN); | ||
1618 | + memcpy(req->rsp_upiu.sr.sense_data, scsi_req->sense, scsi_req->sense_len); | ||
1619 | +} | ||
1620 | + | ||
1621 | static void ufs_build_upiu_header(UfsRequest *req, uint8_t trans_type, | ||
1622 | uint8_t flags, uint8_t response, | ||
1623 | uint8_t scsi_status, | ||
1624 | @@ -XXX,XX +XXX,XX @@ static void ufs_build_upiu_header(UfsRequest *req, uint8_t trans_type, | ||
1625 | req->rsp_upiu.header.data_segment_length = cpu_to_be16(data_segment_length); | ||
1626 | } | ||
1627 | |||
1628 | +static void ufs_scsi_command_complete(SCSIRequest *scsi_req, size_t resid) | ||
1629 | +{ | ||
1630 | + UfsRequest *req = scsi_req->hba_private; | ||
1631 | + int16_t status = scsi_req->status; | ||
1632 | + uint32_t expected_len = be32_to_cpu(req->req_upiu.sc.exp_data_transfer_len); | ||
1633 | + uint32_t transfered_len = scsi_req->cmd.xfer - resid; | ||
1634 | + uint8_t flags = 0, response = UFS_COMMAND_RESULT_SUCESS; | ||
1635 | + uint16_t data_segment_length; | ||
1636 | + | ||
1637 | + if (expected_len > transfered_len) { | ||
1638 | + req->rsp_upiu.sr.residual_transfer_count = | ||
1639 | + cpu_to_be32(expected_len - transfered_len); | ||
1640 | + flags |= UFS_UPIU_FLAG_UNDERFLOW; | ||
1641 | + } else if (expected_len < transfered_len) { | ||
1642 | + req->rsp_upiu.sr.residual_transfer_count = | ||
1643 | + cpu_to_be32(transfered_len - expected_len); | ||
1644 | + flags |= UFS_UPIU_FLAG_OVERFLOW; | ||
1645 | + } | ||
1646 | + | ||
1647 | + if (status != 0) { | ||
1648 | + ufs_build_upiu_sense_data(req, scsi_req); | ||
1649 | + response = UFS_COMMAND_RESULT_FAIL; | ||
1650 | + } | ||
1651 | + | ||
1652 | + data_segment_length = cpu_to_be16(scsi_req->sense_len + | ||
1653 | + sizeof(req->rsp_upiu.sr.sense_data_len)); | ||
1654 | + ufs_build_upiu_header(req, UFS_UPIU_TRANSACTION_RESPONSE, flags, response, | ||
1655 | + status, data_segment_length); | ||
1656 | + | ||
1657 | + ufs_complete_req(req, UFS_REQUEST_SUCCESS); | ||
1658 | + | ||
1659 | + scsi_req->hba_private = NULL; | ||
1660 | + scsi_req_unref(scsi_req); | ||
1661 | +} | ||
1662 | + | ||
1663 | +static const struct SCSIBusInfo ufs_scsi_info = { | ||
1664 | + .tcq = true, | ||
1665 | + .max_target = 0, | ||
1666 | + .max_lun = UFS_MAX_LUS, | ||
1667 | + .max_channel = 0, | ||
1668 | + | ||
1669 | + .get_sg_list = ufs_get_sg_list, | ||
1670 | + .complete = ufs_scsi_command_complete, | ||
1671 | +}; | ||
1672 | + | ||
1673 | +static UfsReqResult ufs_exec_scsi_cmd(UfsRequest *req) | ||
1674 | +{ | ||
1675 | + UfsHc *u = req->hc; | ||
1676 | + uint8_t lun = req->req_upiu.header.lun; | ||
1677 | + uint8_t task_tag = req->req_upiu.header.task_tag; | ||
1678 | + SCSIDevice *dev = NULL; | ||
1679 | + | ||
1680 | + trace_ufs_exec_scsi_cmd(req->slot, lun, req->req_upiu.sc.cdb[0]); | ||
1681 | + | ||
1682 | + if (!is_wlun(lun)) { | ||
1683 | + if (lun >= u->device_desc.number_lu) { | ||
1684 | + trace_ufs_err_scsi_cmd_invalid_lun(lun); | ||
1685 | + return UFS_REQUEST_FAIL; | ||
1686 | + } else if (u->lus[lun] == NULL) { | ||
1687 | + trace_ufs_err_scsi_cmd_invalid_lun(lun); | ||
1688 | + return UFS_REQUEST_FAIL; | ||
1689 | + } | ||
1690 | + } | ||
1691 | + | ||
1692 | + switch (lun) { | ||
1693 | + case UFS_UPIU_REPORT_LUNS_WLUN: | ||
1694 | + dev = &u->report_wlu->qdev; | ||
1695 | + break; | ||
1696 | + case UFS_UPIU_UFS_DEVICE_WLUN: | ||
1697 | + dev = &u->dev_wlu->qdev; | ||
1698 | + break; | ||
1699 | + case UFS_UPIU_BOOT_WLUN: | ||
1700 | + dev = &u->boot_wlu->qdev; | ||
1701 | + break; | ||
1702 | + case UFS_UPIU_RPMB_WLUN: | ||
1703 | + dev = &u->rpmb_wlu->qdev; | ||
1704 | + break; | ||
1705 | + default: | ||
1706 | + dev = &u->lus[lun]->qdev; | ||
1707 | + } | ||
1708 | + | ||
1709 | + SCSIRequest *scsi_req = scsi_req_new( | ||
1710 | + dev, task_tag, lun, req->req_upiu.sc.cdb, UFS_CDB_SIZE, req); | ||
1711 | + | ||
1712 | + uint32_t len = scsi_req_enqueue(scsi_req); | ||
1713 | + if (len) { | ||
1714 | + scsi_req_continue(scsi_req); | ||
1715 | + } | ||
1716 | + | ||
1717 | + return UFS_REQUEST_NO_COMPLETE; | ||
1718 | +} | ||
1719 | + | ||
1720 | static UfsReqResult ufs_exec_nop_cmd(UfsRequest *req) | ||
1721 | { | ||
1722 | trace_ufs_exec_nop_cmd(req->slot); | ||
1723 | @@ -XXX,XX +XXX,XX @@ static const RpmbUnitDescriptor rpmb_unit_desc = { | ||
1724 | |||
1725 | static QueryRespCode ufs_read_unit_desc(UfsRequest *req) | ||
1726 | { | ||
1727 | + UfsHc *u = req->hc; | ||
1728 | uint8_t lun = req->req_upiu.qr.index; | ||
1729 | |||
1730 | - if (lun != UFS_UPIU_RPMB_WLUN && lun > UFS_MAX_LUS) { | ||
1731 | + if (lun != UFS_UPIU_RPMB_WLUN && | ||
1732 | + (lun > UFS_MAX_LUS || u->lus[lun] == NULL)) { | ||
1733 | trace_ufs_err_query_invalid_index(req->req_upiu.qr.opcode, lun); | ||
1734 | return UFS_QUERY_RESULT_INVALID_INDEX; | ||
1735 | } | ||
1736 | @@ -XXX,XX +XXX,XX @@ static QueryRespCode ufs_read_unit_desc(UfsRequest *req) | ||
1737 | if (lun == UFS_UPIU_RPMB_WLUN) { | ||
1738 | memcpy(&req->rsp_upiu.qr.data, &rpmb_unit_desc, rpmb_unit_desc.length); | ||
1739 | } else { | ||
1740 | - /* unit descriptor is not yet supported */ | ||
1741 | - return UFS_QUERY_RESULT_INVALID_INDEX; | ||
1742 | + memcpy(&req->rsp_upiu.qr.data, &u->lus[lun]->unit_desc, | ||
1743 | + sizeof(u->lus[lun]->unit_desc)); | ||
1744 | } | ||
1745 | |||
1746 | return UFS_QUERY_RESULT_SUCCESS; | ||
1747 | @@ -XXX,XX +XXX,XX @@ static void ufs_exec_req(UfsRequest *req) | ||
1748 | req_result = ufs_exec_nop_cmd(req); | ||
1749 | break; | ||
1750 | case UFS_UPIU_TRANSACTION_COMMAND: | ||
1751 | - /* Not yet implemented */ | ||
1752 | - req_result = UFS_REQUEST_FAIL; | ||
1753 | + req_result = ufs_exec_scsi_cmd(req); | ||
1754 | break; | ||
1755 | case UFS_UPIU_TRANSACTION_QUERY_REQ: | ||
1756 | req_result = ufs_exec_query_cmd(req); | ||
1757 | @@ -XXX,XX +XXX,XX @@ static void ufs_exec_req(UfsRequest *req) | ||
1758 | req_result = UFS_REQUEST_FAIL; | ||
1759 | } | ||
1760 | |||
1761 | - ufs_complete_req(req, req_result); | ||
1762 | + /* | ||
1763 | + * The ufs_complete_req for scsi commands is handled by the | ||
1764 | + * ufs_scsi_command_complete() callback function. Therefore, to avoid | ||
1765 | + * duplicate processing, ufs_complete_req() is not called for scsi commands. | ||
1766 | + */ | ||
1767 | + if (req_result != UFS_REQUEST_NO_COMPLETE) { | ||
1768 | + ufs_complete_req(req, req_result); | ||
1769 | + } | 183 | + } |
1770 | } | 184 | } |
1771 | 185 | ||
1772 | static void ufs_process_req(void *opaque) | 186 | /** |
1773 | @@ -XXX,XX +XXX,XX @@ static void ufs_init_hc(UfsHc *u) | 187 | @@ -XXX,XX +XXX,XX @@ void qemu_co_rwlock_rdlock(CoRwlock *lock) |
1774 | u->flags.permanently_disable_fw_update = 1; | 188 | Coroutine *self = qemu_coroutine_self(); |
1775 | } | 189 | |
1776 | 190 | while (lock->writer) { | |
1777 | +static bool ufs_init_wlu(UfsHc *u, UfsWLu **wlu, uint8_t wlun, Error **errp) | 191 | - qemu_co_queue_wait(&lock->queue); |
1778 | +{ | 192 | + qemu_co_queue_wait(&lock->queue, NULL); |
1779 | + UfsWLu *new_wlu = UFSWLU(qdev_new(TYPE_UFS_WLU)); | 193 | } |
1780 | + | 194 | lock->reader++; |
1781 | + qdev_prop_set_uint32(DEVICE(new_wlu), "lun", wlun); | 195 | self->locks_held++; |
1782 | + | 196 | @@ -XXX,XX +XXX,XX @@ void qemu_co_rwlock_wrlock(CoRwlock *lock) |
1783 | + /* | 197 | Coroutine *self = qemu_coroutine_self(); |
1784 | + * The well-known lu shares the same bus as the normal lu. If the well-known | 198 | |
1785 | + * lu writes the same channel value as the normal lu, the report will be | 199 | while (lock->writer || lock->reader) { |
1786 | + * made not only for the normal lu but also for the well-known lu at | 200 | - qemu_co_queue_wait(&lock->queue); |
1787 | + * REPORT_LUN time. To prevent this, the channel value of normal lu is fixed | 201 | + qemu_co_queue_wait(&lock->queue, NULL); |
1788 | + * to 0 and the channel value of well-known lu is fixed to 1. | 202 | } |
1789 | + */ | 203 | lock->writer = true; |
1790 | + qdev_prop_set_uint32(DEVICE(new_wlu), "channel", 1); | 204 | self->locks_held++; |
1791 | + if (!qdev_realize_and_unref(DEVICE(new_wlu), BUS(&u->bus), errp)) { | ||
1792 | + return false; | ||
1793 | + } | ||
1794 | + | ||
1795 | + *wlu = new_wlu; | ||
1796 | + return true; | ||
1797 | +} | ||
1798 | + | ||
1799 | static void ufs_realize(PCIDevice *pci_dev, Error **errp) | ||
1800 | { | ||
1801 | UfsHc *u = UFS(pci_dev); | ||
1802 | @@ -XXX,XX +XXX,XX @@ static void ufs_realize(PCIDevice *pci_dev, Error **errp) | ||
1803 | return; | ||
1804 | } | ||
1805 | |||
1806 | + qbus_init(&u->bus, sizeof(UfsBus), TYPE_UFS_BUS, &pci_dev->qdev, | ||
1807 | + u->parent_obj.qdev.id); | ||
1808 | + u->bus.parent_bus.info = &ufs_scsi_info; | ||
1809 | + | ||
1810 | ufs_init_state(u); | ||
1811 | ufs_init_hc(u); | ||
1812 | ufs_init_pci(u, pci_dev); | ||
1813 | + | ||
1814 | + if (!ufs_init_wlu(u, &u->report_wlu, UFS_UPIU_REPORT_LUNS_WLUN, errp)) { | ||
1815 | + return; | ||
1816 | + } | ||
1817 | + | ||
1818 | + if (!ufs_init_wlu(u, &u->dev_wlu, UFS_UPIU_UFS_DEVICE_WLUN, errp)) { | ||
1819 | + return; | ||
1820 | + } | ||
1821 | + | ||
1822 | + if (!ufs_init_wlu(u, &u->boot_wlu, UFS_UPIU_BOOT_WLUN, errp)) { | ||
1823 | + return; | ||
1824 | + } | ||
1825 | + | ||
1826 | + if (!ufs_init_wlu(u, &u->rpmb_wlu, UFS_UPIU_RPMB_WLUN, errp)) { | ||
1827 | + return; | ||
1828 | + } | ||
1829 | } | ||
1830 | |||
1831 | static void ufs_exit(PCIDevice *pci_dev) | ||
1832 | { | ||
1833 | UfsHc *u = UFS(pci_dev); | ||
1834 | |||
1835 | + if (u->dev_wlu) { | ||
1836 | + object_unref(OBJECT(u->dev_wlu)); | ||
1837 | + u->dev_wlu = NULL; | ||
1838 | + } | ||
1839 | + | ||
1840 | + if (u->report_wlu) { | ||
1841 | + object_unref(OBJECT(u->report_wlu)); | ||
1842 | + u->report_wlu = NULL; | ||
1843 | + } | ||
1844 | + | ||
1845 | + if (u->rpmb_wlu) { | ||
1846 | + object_unref(OBJECT(u->rpmb_wlu)); | ||
1847 | + u->rpmb_wlu = NULL; | ||
1848 | + } | ||
1849 | + | ||
1850 | + if (u->boot_wlu) { | ||
1851 | + object_unref(OBJECT(u->boot_wlu)); | ||
1852 | + u->boot_wlu = NULL; | ||
1853 | + } | ||
1854 | + | ||
1855 | qemu_bh_delete(u->doorbell_bh); | ||
1856 | qemu_bh_delete(u->complete_bh); | ||
1857 | |||
1858 | @@ -XXX,XX +XXX,XX @@ static void ufs_class_init(ObjectClass *oc, void *data) | ||
1859 | dc->vmsd = &ufs_vmstate; | ||
1860 | } | ||
1861 | |||
1862 | +static bool ufs_bus_check_address(BusState *qbus, DeviceState *qdev, | ||
1863 | + Error **errp) | ||
1864 | +{ | ||
1865 | + SCSIDevice *dev = SCSI_DEVICE(qdev); | ||
1866 | + UfsBusClass *ubc = UFS_BUS_GET_CLASS(qbus); | ||
1867 | + UfsHc *u = UFS(qbus->parent); | ||
1868 | + | ||
1869 | + if (strcmp(object_get_typename(OBJECT(dev)), TYPE_UFS_WLU) == 0) { | ||
1870 | + if (dev->lun != UFS_UPIU_REPORT_LUNS_WLUN && | ||
1871 | + dev->lun != UFS_UPIU_UFS_DEVICE_WLUN && | ||
1872 | + dev->lun != UFS_UPIU_BOOT_WLUN && dev->lun != UFS_UPIU_RPMB_WLUN) { | ||
1873 | + error_setg(errp, "bad well-known lun: %d", dev->lun); | ||
1874 | + return false; | ||
1875 | + } | ||
1876 | + | ||
1877 | + if ((dev->lun == UFS_UPIU_REPORT_LUNS_WLUN && u->report_wlu != NULL) || | ||
1878 | + (dev->lun == UFS_UPIU_UFS_DEVICE_WLUN && u->dev_wlu != NULL) || | ||
1879 | + (dev->lun == UFS_UPIU_BOOT_WLUN && u->boot_wlu != NULL) || | ||
1880 | + (dev->lun == UFS_UPIU_RPMB_WLUN && u->rpmb_wlu != NULL)) { | ||
1881 | + error_setg(errp, "well-known lun %d already exists", dev->lun); | ||
1882 | + return false; | ||
1883 | + } | ||
1884 | + | ||
1885 | + return true; | ||
1886 | + } | ||
1887 | + | ||
1888 | + if (strcmp(object_get_typename(OBJECT(dev)), TYPE_UFS_LU) != 0) { | ||
1889 | + error_setg(errp, "%s cannot be connected to ufs-bus", | ||
1890 | + object_get_typename(OBJECT(dev))); | ||
1891 | + return false; | ||
1892 | + } | ||
1893 | + | ||
1894 | + return ubc->parent_check_address(qbus, qdev, errp); | ||
1895 | +} | ||
1896 | + | ||
1897 | +static void ufs_bus_class_init(ObjectClass *class, void *data) | ||
1898 | +{ | ||
1899 | + BusClass *bc = BUS_CLASS(class); | ||
1900 | + UfsBusClass *ubc = UFS_BUS_CLASS(class); | ||
1901 | + ubc->parent_check_address = bc->check_address; | ||
1902 | + bc->check_address = ufs_bus_check_address; | ||
1903 | +} | ||
1904 | + | ||
1905 | static const TypeInfo ufs_info = { | ||
1906 | .name = TYPE_UFS, | ||
1907 | .parent = TYPE_PCI_DEVICE, | ||
1908 | @@ -XXX,XX +XXX,XX @@ static const TypeInfo ufs_info = { | ||
1909 | .interfaces = (InterfaceInfo[]){ { INTERFACE_PCIE_DEVICE }, {} }, | ||
1910 | }; | ||
1911 | |||
1912 | +static const TypeInfo ufs_bus_info = { | ||
1913 | + .name = TYPE_UFS_BUS, | ||
1914 | + .parent = TYPE_SCSI_BUS, | ||
1915 | + .class_init = ufs_bus_class_init, | ||
1916 | + .class_size = sizeof(UfsBusClass), | ||
1917 | + .instance_size = sizeof(UfsBus), | ||
1918 | +}; | ||
1919 | + | ||
1920 | static void ufs_register_types(void) | ||
1921 | { | ||
1922 | type_register_static(&ufs_info); | ||
1923 | + type_register_static(&ufs_bus_info); | ||
1924 | } | ||
1925 | |||
1926 | type_init(ufs_register_types) | ||
1927 | diff --git a/hw/ufs/meson.build b/hw/ufs/meson.build | ||
1928 | index XXXXXXX..XXXXXXX 100644 | ||
1929 | --- a/hw/ufs/meson.build | ||
1930 | +++ b/hw/ufs/meson.build | ||
1931 | @@ -1 +1 @@ | ||
1932 | -system_ss.add(when: 'CONFIG_UFS_PCI', if_true: files('ufs.c')) | ||
1933 | +system_ss.add(when: 'CONFIG_UFS_PCI', if_true: files('ufs.c', 'lu.c')) | ||
1934 | diff --git a/hw/ufs/trace-events b/hw/ufs/trace-events | ||
1935 | index XXXXXXX..XXXXXXX 100644 | ||
1936 | --- a/hw/ufs/trace-events | ||
1937 | +++ b/hw/ufs/trace-events | ||
1938 | @@ -XXX,XX +XXX,XX @@ ufs_exec_scsi_cmd(uint32_t slot, uint8_t lun, uint8_t opcode) "slot %"PRIu32", l | ||
1939 | ufs_exec_query_cmd(uint32_t slot, uint8_t opcode) "slot %"PRIu32", opcode 0x%"PRIx8"" | ||
1940 | ufs_process_uiccmd(uint32_t uiccmd, uint32_t ucmdarg1, uint32_t ucmdarg2, uint32_t ucmdarg3) "uiccmd 0x%"PRIx32", ucmdarg1 0x%"PRIx32", ucmdarg2 0x%"PRIx32", ucmdarg3 0x%"PRIx32"" | ||
1941 | |||
1942 | +# lu.c | ||
1943 | +ufs_scsi_check_condition(uint32_t tag, uint8_t key, uint8_t asc, uint8_t ascq) "Command complete tag=0x%x sense=%d/%d/%d" | ||
1944 | +ufs_scsi_read_complete(uint32_t tag, size_t size) "Data ready tag=0x%x len=%zd" | ||
1945 | +ufs_scsi_read_data_count(uint32_t sector_count) "Read sector_count=%d" | ||
1946 | +ufs_scsi_read_data_invalid(void) "Data transfer direction invalid" | ||
1947 | +ufs_scsi_write_complete_noio(uint32_t tag, size_t size) "Write complete tag=0x%x more=%zd" | ||
1948 | +ufs_scsi_write_data_invalid(void) "Data transfer direction invalid" | ||
1949 | +ufs_scsi_emulate_vpd_page_00(size_t xfer) "Inquiry EVPD[Supported pages] buffer size %zd" | ||
1950 | +ufs_scsi_emulate_vpd_page_80_not_supported(void) "Inquiry EVPD[Serial number] not supported" | ||
1951 | +ufs_scsi_emulate_vpd_page_80(size_t xfer) "Inquiry EVPD[Serial number] buffer size %zd" | ||
1952 | +ufs_scsi_emulate_vpd_page_87(size_t xfer) "Inquiry EVPD[Mode Page Policy] buffer size %zd" | ||
1953 | +ufs_scsi_emulate_mode_sense(int cmd, int page, size_t xfer, int control) "Mode Sense(%d) (page %d, xfer %zd, page_control %d)" | ||
1954 | +ufs_scsi_emulate_read_data(int buflen) "Read buf_len=%d" | ||
1955 | +ufs_scsi_emulate_write_data(int buflen) "Write buf_len=%d" | ||
1956 | +ufs_scsi_emulate_command_START_STOP(void) "START STOP UNIT" | ||
1957 | +ufs_scsi_emulate_command_FORMAT_UNIT(void) "FORMAT UNIT" | ||
1958 | +ufs_scsi_emulate_command_SEND_DIAGNOSTIC(void) "SEND DIAGNOSTIC" | ||
1959 | +ufs_scsi_emulate_command_SAI_16(void) "SAI READ CAPACITY(16)" | ||
1960 | +ufs_scsi_emulate_command_SAI_unsupported(void) "Unsupported Service Action In" | ||
1961 | +ufs_scsi_emulate_command_MODE_SELECT_10(size_t xfer) "Mode Select(10) (len %zd)" | ||
1962 | +ufs_scsi_emulate_command_VERIFY(int bytchk) "Verify (bytchk %d)" | ||
1963 | +ufs_scsi_emulate_command_UNKNOWN(int cmd, const char *name) "Unknown SCSI command (0x%2.2x=%s)" | ||
1964 | +ufs_scsi_dma_command_READ(uint64_t lba, uint32_t len) "Read (block %" PRIu64 ", count %u)" | ||
1965 | +ufs_scsi_dma_command_WRITE(uint64_t lba, int len) "Write (block %" PRIu64 ", count %u)" | ||
1966 | + | ||
1967 | # error condition | ||
1968 | ufs_err_dma_read_utrd(uint32_t slot, uint64_t addr) "failed to read utrd. UTRLDBR slot %"PRIu32", UTRD dma addr %"PRIu64"" | ||
1969 | ufs_err_dma_read_req_upiu(uint32_t slot, uint64_t addr) "failed to read req upiu. UTRLDBR slot %"PRIu32", request upiu addr %"PRIu64"" | ||
1970 | -- | 205 | -- |
1971 | 2.41.0 | 206 | 2.9.3 |
207 | |||
208 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Paolo Bonzini <pbonzini@redhat.com> | ||
1 | 2 | ||
3 | This adds a CoMutex around the existing CoQueue. Because the write-side | ||
4 | can just take CoMutex, the old "writer" field is not necessary anymore. | ||
5 | Instead of removing it altogether, count the number of pending writers | ||
6 | during a read-side critical section and forbid further readers from | ||
7 | entering. | ||
8 | |||
9 | Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | ||
10 | Reviewed-by: Fam Zheng <famz@redhat.com> | ||
11 | Message-id: 20170213181244.16297-7-pbonzini@redhat.com | ||
12 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
13 | --- | ||
14 | include/qemu/coroutine.h | 3 ++- | ||
15 | util/qemu-coroutine-lock.c | 35 ++++++++++++++++++++++++----------- | ||
16 | 2 files changed, 26 insertions(+), 12 deletions(-) | ||
17 | |||
18 | diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h | ||
19 | index XXXXXXX..XXXXXXX 100644 | ||
20 | --- a/include/qemu/coroutine.h | ||
21 | +++ b/include/qemu/coroutine.h | ||
22 | @@ -XXX,XX +XXX,XX @@ bool qemu_co_queue_empty(CoQueue *queue); | ||
23 | |||
24 | |||
25 | typedef struct CoRwlock { | ||
26 | - bool writer; | ||
27 | + int pending_writer; | ||
28 | int reader; | ||
29 | + CoMutex mutex; | ||
30 | CoQueue queue; | ||
31 | } CoRwlock; | ||
32 | |||
33 | diff --git a/util/qemu-coroutine-lock.c b/util/qemu-coroutine-lock.c | ||
34 | index XXXXXXX..XXXXXXX 100644 | ||
35 | --- a/util/qemu-coroutine-lock.c | ||
36 | +++ b/util/qemu-coroutine-lock.c | ||
37 | @@ -XXX,XX +XXX,XX @@ void qemu_co_rwlock_init(CoRwlock *lock) | ||
38 | { | ||
39 | memset(lock, 0, sizeof(*lock)); | ||
40 | qemu_co_queue_init(&lock->queue); | ||
41 | + qemu_co_mutex_init(&lock->mutex); | ||
42 | } | ||
43 | |||
44 | void qemu_co_rwlock_rdlock(CoRwlock *lock) | ||
45 | { | ||
46 | Coroutine *self = qemu_coroutine_self(); | ||
47 | |||
48 | - while (lock->writer) { | ||
49 | - qemu_co_queue_wait(&lock->queue, NULL); | ||
50 | + qemu_co_mutex_lock(&lock->mutex); | ||
51 | + /* For fairness, wait if a writer is in line. */ | ||
52 | + while (lock->pending_writer) { | ||
53 | + qemu_co_queue_wait(&lock->queue, &lock->mutex); | ||
54 | } | ||
55 | lock->reader++; | ||
56 | + qemu_co_mutex_unlock(&lock->mutex); | ||
57 | + | ||
58 | + /* The rest of the read-side critical section is run without the mutex. */ | ||
59 | self->locks_held++; | ||
60 | } | ||
61 | |||
62 | @@ -XXX,XX +XXX,XX @@ void qemu_co_rwlock_unlock(CoRwlock *lock) | ||
63 | Coroutine *self = qemu_coroutine_self(); | ||
64 | |||
65 | assert(qemu_in_coroutine()); | ||
66 | - if (lock->writer) { | ||
67 | - lock->writer = false; | ||
68 | + if (!lock->reader) { | ||
69 | + /* The critical section started in qemu_co_rwlock_wrlock. */ | ||
70 | qemu_co_queue_restart_all(&lock->queue); | ||
71 | } else { | ||
72 | + self->locks_held--; | ||
73 | + | ||
74 | + qemu_co_mutex_lock(&lock->mutex); | ||
75 | lock->reader--; | ||
76 | assert(lock->reader >= 0); | ||
77 | /* Wakeup only one waiting writer */ | ||
78 | @@ -XXX,XX +XXX,XX @@ void qemu_co_rwlock_unlock(CoRwlock *lock) | ||
79 | qemu_co_queue_next(&lock->queue); | ||
80 | } | ||
81 | } | ||
82 | - self->locks_held--; | ||
83 | + qemu_co_mutex_unlock(&lock->mutex); | ||
84 | } | ||
85 | |||
86 | void qemu_co_rwlock_wrlock(CoRwlock *lock) | ||
87 | { | ||
88 | - Coroutine *self = qemu_coroutine_self(); | ||
89 | - | ||
90 | - while (lock->writer || lock->reader) { | ||
91 | - qemu_co_queue_wait(&lock->queue, NULL); | ||
92 | + qemu_co_mutex_lock(&lock->mutex); | ||
93 | + lock->pending_writer++; | ||
94 | + while (lock->reader) { | ||
95 | + qemu_co_queue_wait(&lock->queue, &lock->mutex); | ||
96 | } | ||
97 | - lock->writer = true; | ||
98 | - self->locks_held++; | ||
99 | + lock->pending_writer--; | ||
100 | + | ||
101 | + /* The rest of the write-side critical section is run with | ||
102 | + * the mutex taken, so that lock->reader remains zero. | ||
103 | + * There is no need to update self->locks_held. | ||
104 | + */ | ||
105 | } | ||
106 | -- | ||
107 | 2.9.3 | ||
108 | |||
109 | diff view generated by jsdifflib |