1 | The following changes since commit 98b2e3c9ab3abfe476a2b02f8f51813edb90e72d: | 1 | The following changes since commit 171199f56f5f9bdf1e5d670d09ef1351d8f01bae: |
---|---|---|---|
2 | 2 | ||
3 | Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging (2019-10-08 16:08:35 +0100) | 3 | Merge remote-tracking branch 'remotes/alistair/tags/pull-riscv-to-apply-20200619-3' into staging (2020-06-22 14:45:25 +0100) |
4 | 4 | ||
5 | are available in the Git repository at: | 5 | are available in the Git repository at: |
6 | 6 | ||
7 | https://github.com/stefanha/qemu.git tags/block-pull-request | 7 | https://github.com/stefanha/qemu.git tags/block-pull-request |
8 | 8 | ||
9 | for you to fetch changes up to 69de48445a0d6169f1e2a6c5bfab994e1c810e33: | 9 | for you to fetch changes up to 7838c67f22a81fcf669785cd6c0876438422071a: |
10 | 10 | ||
11 | test-bdrv-drain: fix iothread_join() hang (2019-10-14 09:48:01 +0100) | 11 | block/nvme: support nested aio_poll() (2020-06-23 15:46:08 +0100) |
12 | 12 | ||
13 | ---------------------------------------------------------------- | 13 | ---------------------------------------------------------------- |
14 | Pull request | 14 | Pull request |
15 | 15 | ||
16 | ---------------------------------------------------------------- | 16 | ---------------------------------------------------------------- |
17 | 17 | ||
18 | Stefan Hajnoczi (1): | 18 | Daniele Buono (4): |
19 | test-bdrv-drain: fix iothread_join() hang | 19 | coroutine: support SafeStack in ucontext backend |
20 | coroutine: add check for SafeStack in sigaltstack | ||
21 | configure: add flags to support SafeStack | ||
22 | check-block: enable iotests with SafeStack | ||
20 | 23 | ||
21 | tests/iothread.c | 10 ++++++++-- | 24 | Stefan Hajnoczi (8): |
22 | 1 file changed, 8 insertions(+), 2 deletions(-) | 25 | minikconf: explicitly set encoding to UTF-8 |
26 | block/nvme: poll queues without q->lock | ||
27 | block/nvme: drop tautologous assertion | ||
28 | block/nvme: don't access CQE after moving cq.head | ||
29 | block/nvme: switch to a NVMeRequest freelist | ||
30 | block/nvme: clarify that free_req_queue is protected by q->lock | ||
31 | block/nvme: keep BDRVNVMeState pointer in NVMeQueuePair | ||
32 | block/nvme: support nested aio_poll() | ||
33 | |||
34 | configure | 73 ++++++++++++ | ||
35 | include/qemu/coroutine_int.h | 5 + | ||
36 | block/nvme.c | 220 +++++++++++++++++++++++++---------- | ||
37 | util/coroutine-sigaltstack.c | 4 + | ||
38 | util/coroutine-ucontext.c | 28 +++++ | ||
39 | block/trace-events | 2 +- | ||
40 | scripts/minikconf.py | 6 +- | ||
41 | tests/check-block.sh | 12 +- | ||
42 | 8 files changed, 284 insertions(+), 66 deletions(-) | ||
23 | 43 | ||
24 | -- | 44 | -- |
25 | 2.21.0 | 45 | 2.26.2 |
26 | 46 | ||
27 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | QEMU currently only has ASCII Kconfig files but Linux actually uses | ||
2 | UTF-8. Explicitly specify the encoding and that we're doing text file | ||
3 | I/O. | ||
1 | 4 | ||
5 | It's unclear whether or not QEMU will ever need Unicode in its Kconfig | ||
6 | files. If we start using the help text then it will become an issue | ||
7 | sooner or later. Make this change now for consistency with Linux | ||
8 | Kconfig. | ||
9 | |||
10 | Reported-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
11 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
12 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
13 | Message-id: 20200521153616.307100-1-stefanha@redhat.com | ||
14 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
15 | --- | ||
16 | scripts/minikconf.py | 6 +++--- | ||
17 | 1 file changed, 3 insertions(+), 3 deletions(-) | ||
18 | |||
19 | diff --git a/scripts/minikconf.py b/scripts/minikconf.py | ||
20 | index XXXXXXX..XXXXXXX 100755 | ||
21 | --- a/scripts/minikconf.py | ||
22 | +++ b/scripts/minikconf.py | ||
23 | @@ -XXX,XX +XXX,XX @@ class KconfigParser: | ||
24 | if incl_abs_fname in self.data.previously_included: | ||
25 | return | ||
26 | try: | ||
27 | - fp = open(incl_abs_fname, 'r') | ||
28 | + fp = open(incl_abs_fname, 'rt', encoding='utf-8') | ||
29 | except IOError as e: | ||
30 | raise KconfigParserError(self, | ||
31 | '%s: %s' % (e.strerror, include)) | ||
32 | @@ -XXX,XX +XXX,XX @@ if __name__ == '__main__': | ||
33 | parser.do_assignment(name, value == 'y') | ||
34 | external_vars.add(name[7:]) | ||
35 | else: | ||
36 | - fp = open(arg, 'r') | ||
37 | + fp = open(arg, 'rt', encoding='utf-8') | ||
38 | parser.parse_file(fp) | ||
39 | fp.close() | ||
40 | |||
41 | @@ -XXX,XX +XXX,XX @@ if __name__ == '__main__': | ||
42 | if key not in external_vars and config[key]: | ||
43 | print ('CONFIG_%s=y' % key) | ||
44 | |||
45 | - deps = open(argv[2], 'w') | ||
46 | + deps = open(argv[2], 'wt', encoding='utf-8') | ||
47 | for fname in data.previously_included: | ||
48 | print ('%s: %s' % (argv[1], fname), file=deps) | ||
49 | deps.close() | ||
50 | -- | ||
51 | 2.26.2 | ||
52 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Daniele Buono <dbuono@linux.vnet.ibm.com> | ||
1 | 2 | ||
3 | LLVM's SafeStack instrumentation does not yet support programs that make | ||
4 | use of the APIs in ucontext.h | ||
5 | With the current implementation of coroutine-ucontext, the resulting | ||
6 | binary is incorrect, with different coroutines sharing the same unsafe | ||
7 | stack and producing undefined behavior at runtime. | ||
8 | This fix allocates an additional unsafe stack area for each coroutine, | ||
9 | and sets the new unsafe stack pointer before calling swapcontext() in | ||
10 | qemu_coroutine_new. | ||
11 | This is the only place where the pointer needs to be manually updated, | ||
12 | since sigsetjmp/siglongjmp are already instrumented by LLVM to properly | ||
13 | support SafeStack. | ||
14 | The additional stack is then freed in qemu_coroutine_delete. | ||
15 | |||
16 | Signed-off-by: Daniele Buono <dbuono@linux.vnet.ibm.com> | ||
17 | Message-id: 20200529205122.714-2-dbuono@linux.vnet.ibm.com | ||
18 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
19 | --- | ||
20 | include/qemu/coroutine_int.h | 5 +++++ | ||
21 | util/coroutine-ucontext.c | 28 ++++++++++++++++++++++++++++ | ||
22 | 2 files changed, 33 insertions(+) | ||
23 | |||
24 | diff --git a/include/qemu/coroutine_int.h b/include/qemu/coroutine_int.h | ||
25 | index XXXXXXX..XXXXXXX 100644 | ||
26 | --- a/include/qemu/coroutine_int.h | ||
27 | +++ b/include/qemu/coroutine_int.h | ||
28 | @@ -XXX,XX +XXX,XX @@ | ||
29 | #include "qemu/queue.h" | ||
30 | #include "qemu/coroutine.h" | ||
31 | |||
32 | +#ifdef CONFIG_SAFESTACK | ||
33 | +/* Pointer to the unsafe stack, defined by the compiler */ | ||
34 | +extern __thread void *__safestack_unsafe_stack_ptr; | ||
35 | +#endif | ||
36 | + | ||
37 | #define COROUTINE_STACK_SIZE (1 << 20) | ||
38 | |||
39 | typedef enum { | ||
40 | diff --git a/util/coroutine-ucontext.c b/util/coroutine-ucontext.c | ||
41 | index XXXXXXX..XXXXXXX 100644 | ||
42 | --- a/util/coroutine-ucontext.c | ||
43 | +++ b/util/coroutine-ucontext.c | ||
44 | @@ -XXX,XX +XXX,XX @@ typedef struct { | ||
45 | Coroutine base; | ||
46 | void *stack; | ||
47 | size_t stack_size; | ||
48 | +#ifdef CONFIG_SAFESTACK | ||
49 | + /* Need an unsafe stack for each coroutine */ | ||
50 | + void *unsafe_stack; | ||
51 | + size_t unsafe_stack_size; | ||
52 | +#endif | ||
53 | sigjmp_buf env; | ||
54 | |||
55 | void *tsan_co_fiber; | ||
56 | @@ -XXX,XX +XXX,XX @@ Coroutine *qemu_coroutine_new(void) | ||
57 | co = g_malloc0(sizeof(*co)); | ||
58 | co->stack_size = COROUTINE_STACK_SIZE; | ||
59 | co->stack = qemu_alloc_stack(&co->stack_size); | ||
60 | +#ifdef CONFIG_SAFESTACK | ||
61 | + co->unsafe_stack_size = COROUTINE_STACK_SIZE; | ||
62 | + co->unsafe_stack = qemu_alloc_stack(&co->unsafe_stack_size); | ||
63 | +#endif | ||
64 | co->base.entry_arg = &old_env; /* stash away our jmp_buf */ | ||
65 | |||
66 | uc.uc_link = &old_uc; | ||
67 | @@ -XXX,XX +XXX,XX @@ Coroutine *qemu_coroutine_new(void) | ||
68 | COROUTINE_YIELD, | ||
69 | &fake_stack_save, | ||
70 | co->stack, co->stack_size, co->tsan_co_fiber); | ||
71 | + | ||
72 | +#ifdef CONFIG_SAFESTACK | ||
73 | + /* | ||
74 | + * Before we swap the context, set the new unsafe stack | ||
75 | + * The unsafe stack grows just like the normal stack, so start from | ||
76 | + * the last usable location of the memory area. | ||
77 | + * NOTE: we don't have to re-set the usp afterwards because we are | ||
78 | + * coming back to this context through a siglongjmp. | ||
79 | + * The compiler already wrapped the corresponding sigsetjmp call with | ||
80 | + * code that saves the usp on the (safe) stack before the call, and | ||
81 | + * restores it right after (which is where we return with siglongjmp). | ||
82 | + */ | ||
83 | + void *usp = co->unsafe_stack + co->unsafe_stack_size; | ||
84 | + __safestack_unsafe_stack_ptr = usp; | ||
85 | +#endif | ||
86 | + | ||
87 | swapcontext(&old_uc, &uc); | ||
88 | } | ||
89 | |||
90 | @@ -XXX,XX +XXX,XX @@ void qemu_coroutine_delete(Coroutine *co_) | ||
91 | #endif | ||
92 | |||
93 | qemu_free_stack(co->stack, co->stack_size); | ||
94 | +#ifdef CONFIG_SAFESTACK | ||
95 | + qemu_free_stack(co->unsafe_stack, co->unsafe_stack_size); | ||
96 | +#endif | ||
97 | g_free(co); | ||
98 | } | ||
99 | |||
100 | -- | ||
101 | 2.26.2 | ||
102 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Daniele Buono <dbuono@linux.vnet.ibm.com> | ||
1 | 2 | ||
3 | Current implementation of LLVM's SafeStack is not compatible with | ||
4 | code that uses an alternate stack created with sigaltstack(). | ||
5 | Since coroutine-sigaltstack relies on sigaltstack(), it is not | ||
6 | compatible with SafeStack. The resulting binary is incorrect, with | ||
7 | different coroutines sharing the same unsafe stack and producing | ||
8 | undefined behavior at runtime. | ||
9 | |||
10 | In the future LLVM may provide a SafeStack implementation compatible with | ||
11 | sigaltstack(). In the meantime, if SafeStack is desired, the coroutine | ||
12 | implementation from coroutine-ucontext should be used. | ||
13 | As a safety check, add a control in coroutine-sigaltstack to throw a | ||
14 | preprocessor #error if SafeStack is enabled and we are trying to | ||
15 | use coroutine-sigaltstack to implement coroutines. | ||
16 | |||
17 | Signed-off-by: Daniele Buono <dbuono@linux.vnet.ibm.com> | ||
18 | Message-id: 20200529205122.714-3-dbuono@linux.vnet.ibm.com | ||
19 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
20 | --- | ||
21 | util/coroutine-sigaltstack.c | 4 ++++ | ||
22 | 1 file changed, 4 insertions(+) | ||
23 | |||
24 | diff --git a/util/coroutine-sigaltstack.c b/util/coroutine-sigaltstack.c | ||
25 | index XXXXXXX..XXXXXXX 100644 | ||
26 | --- a/util/coroutine-sigaltstack.c | ||
27 | +++ b/util/coroutine-sigaltstack.c | ||
28 | @@ -XXX,XX +XXX,XX @@ | ||
29 | #include "qemu-common.h" | ||
30 | #include "qemu/coroutine_int.h" | ||
31 | |||
32 | +#ifdef CONFIG_SAFESTACK | ||
33 | +#error "SafeStack is not compatible with code run in alternate signal stacks" | ||
34 | +#endif | ||
35 | + | ||
36 | typedef struct { | ||
37 | Coroutine base; | ||
38 | void *stack; | ||
39 | -- | ||
40 | 2.26.2 | ||
41 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Daniele Buono <dbuono@linux.vnet.ibm.com> | ||
1 | 2 | ||
3 | This patch adds a flag to enable/disable the SafeStack instrumentation | ||
4 | provided by LLVM. | ||
5 | |||
6 | On enable, make sure that the compiler supports the flags, and that we | ||
7 | are using the proper coroutine implementation (coroutine-ucontext). | ||
8 | On disable, explicitly disable the option if it was enabled by default. | ||
9 | |||
10 | While SafeStack is supported only on Linux, NetBSD, FreeBSD and macOS, | ||
11 | we are not checking for the O.S. since this is already done by LLVM. | ||
12 | |||
13 | Signed-off-by: Daniele Buono <dbuono@linux.vnet.ibm.com> | ||
14 | Message-id: 20200529205122.714-4-dbuono@linux.vnet.ibm.com | ||
15 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
16 | --- | ||
17 | configure | 73 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ | ||
18 | 1 file changed, 73 insertions(+) | ||
19 | |||
20 | diff --git a/configure b/configure | ||
21 | index XXXXXXX..XXXXXXX 100755 | ||
22 | --- a/configure | ||
23 | +++ b/configure | ||
24 | @@ -XXX,XX +XXX,XX @@ audio_win_int="" | ||
25 | libs_qga="" | ||
26 | debug_info="yes" | ||
27 | stack_protector="" | ||
28 | +safe_stack="" | ||
29 | use_containers="yes" | ||
30 | gdb_bin=$(command -v "gdb-multiarch" || command -v "gdb") | ||
31 | |||
32 | @@ -XXX,XX +XXX,XX @@ for opt do | ||
33 | ;; | ||
34 | --disable-stack-protector) stack_protector="no" | ||
35 | ;; | ||
36 | + --enable-safe-stack) safe_stack="yes" | ||
37 | + ;; | ||
38 | + --disable-safe-stack) safe_stack="no" | ||
39 | + ;; | ||
40 | --disable-curses) curses="no" | ||
41 | ;; | ||
42 | --enable-curses) curses="yes" | ||
43 | @@ -XXX,XX +XXX,XX @@ disabled with --disable-FEATURE, default is enabled if available: | ||
44 | debug-tcg TCG debugging (default is disabled) | ||
45 | debug-info debugging information | ||
46 | sparse sparse checker | ||
47 | + safe-stack SafeStack Stack Smash Protection. Depends on | ||
48 | + clang/llvm >= 3.7 and requires coroutine backend ucontext. | ||
49 | |||
50 | gnutls GNUTLS cryptography support | ||
51 | nettle nettle cryptography support | ||
52 | @@ -XXX,XX +XXX,XX @@ if test "$debug_stack_usage" = "yes"; then | ||
53 | fi | ||
54 | fi | ||
55 | |||
56 | +################################################## | ||
57 | +# SafeStack | ||
58 | + | ||
59 | + | ||
60 | +if test "$safe_stack" = "yes"; then | ||
61 | +cat > $TMPC << EOF | ||
62 | +int main(int argc, char *argv[]) | ||
63 | +{ | ||
64 | +#if ! __has_feature(safe_stack) | ||
65 | +#error SafeStack Disabled | ||
66 | +#endif | ||
67 | + return 0; | ||
68 | +} | ||
69 | +EOF | ||
70 | + flag="-fsanitize=safe-stack" | ||
71 | + # Check that safe-stack is supported and enabled. | ||
72 | + if compile_prog "-Werror $flag" "$flag"; then | ||
73 | + # Flag needed both at compilation and at linking | ||
74 | + QEMU_CFLAGS="$QEMU_CFLAGS $flag" | ||
75 | + QEMU_LDFLAGS="$QEMU_LDFLAGS $flag" | ||
76 | + else | ||
77 | + error_exit "SafeStack not supported by your compiler" | ||
78 | + fi | ||
79 | + if test "$coroutine" != "ucontext"; then | ||
80 | + error_exit "SafeStack is only supported by the coroutine backend ucontext" | ||
81 | + fi | ||
82 | +else | ||
83 | +cat > $TMPC << EOF | ||
84 | +int main(int argc, char *argv[]) | ||
85 | +{ | ||
86 | +#if defined(__has_feature) | ||
87 | +#if __has_feature(safe_stack) | ||
88 | +#error SafeStack Enabled | ||
89 | +#endif | ||
90 | +#endif | ||
91 | + return 0; | ||
92 | +} | ||
93 | +EOF | ||
94 | +if test "$safe_stack" = "no"; then | ||
95 | + # Make sure that safe-stack is disabled | ||
96 | + if ! compile_prog "-Werror" ""; then | ||
97 | + # SafeStack was already enabled, try to explicitly remove the feature | ||
98 | + flag="-fno-sanitize=safe-stack" | ||
99 | + if ! compile_prog "-Werror $flag" "$flag"; then | ||
100 | + error_exit "Configure cannot disable SafeStack" | ||
101 | + fi | ||
102 | + QEMU_CFLAGS="$QEMU_CFLAGS $flag" | ||
103 | + QEMU_LDFLAGS="$QEMU_LDFLAGS $flag" | ||
104 | + fi | ||
105 | +else # "$safe_stack" = "" | ||
106 | + # Set safe_stack to yes or no based on pre-existing flags | ||
107 | + if compile_prog "-Werror" ""; then | ||
108 | + safe_stack="no" | ||
109 | + else | ||
110 | + safe_stack="yes" | ||
111 | + if test "$coroutine" != "ucontext"; then | ||
112 | + error_exit "SafeStack is only supported by the coroutine backend ucontext" | ||
113 | + fi | ||
114 | + fi | ||
115 | +fi | ||
116 | +fi | ||
117 | |||
118 | ########################################## | ||
119 | # check if we have open_by_handle_at | ||
120 | @@ -XXX,XX +XXX,XX @@ echo "sparse enabled $sparse" | ||
121 | echo "strip binaries $strip_opt" | ||
122 | echo "profiler $profiler" | ||
123 | echo "static build $static" | ||
124 | +echo "safe stack $safe_stack" | ||
125 | if test "$darwin" = "yes" ; then | ||
126 | echo "Cocoa support $cocoa" | ||
127 | fi | ||
128 | @@ -XXX,XX +XXX,XX @@ if test "$ccache_cpp2" = "yes"; then | ||
129 | echo "export CCACHE_CPP2=y" >> $config_host_mak | ||
130 | fi | ||
131 | |||
132 | +if test "$safe_stack" = "yes"; then | ||
133 | + echo "CONFIG_SAFESTACK=y" >> $config_host_mak | ||
134 | +fi | ||
135 | + | ||
136 | # If we're using a separate build tree, set it up now. | ||
137 | # DIRS are directories which we simply mkdir in the build tree; | ||
138 | # LINKS are things to symlink back into the source tree | ||
139 | -- | ||
140 | 2.26.2 | ||
141 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Daniele Buono <dbuono@linux.vnet.ibm.com> | ||
1 | 2 | ||
3 | SafeStack is a stack protection technique implemented in llvm. It is | ||
4 | enabled with a -fsanitize flag. | ||
5 | iotests are currently disabled when any -fsanitize option is used, | ||
6 | because such options tend to produce additional warnings and false | ||
7 | positives. | ||
8 | |||
9 | While common -fsanitize options are used to verify the code and not | ||
10 | added in production, SafeStack's main use is in production environments | ||
11 | to protect against stack smashing. | ||
12 | |||
13 | Since SafeStack does not print any warning or false positive, enable | ||
14 | iotests when SafeStack is the only -fsanitize option used. | ||
15 | This is likely going to be a production binary and we want to make sure | ||
16 | it works correctly. | ||
17 | |||
18 | Signed-off-by: Daniele Buono <dbuono@linux.vnet.ibm.com> | ||
19 | Message-id: 20200529205122.714-5-dbuono@linux.vnet.ibm.com | ||
20 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
21 | --- | ||
22 | tests/check-block.sh | 12 +++++++++++- | ||
23 | 1 file changed, 11 insertions(+), 1 deletion(-) | ||
24 | |||
25 | diff --git a/tests/check-block.sh b/tests/check-block.sh | ||
26 | index XXXXXXX..XXXXXXX 100755 | ||
27 | --- a/tests/check-block.sh | ||
28 | +++ b/tests/check-block.sh | ||
29 | @@ -XXX,XX +XXX,XX @@ if grep -q "CONFIG_GPROF=y" config-host.mak 2>/dev/null ; then | ||
30 | exit 0 | ||
31 | fi | ||
32 | |||
33 | -if grep -q "CFLAGS.*-fsanitize" config-host.mak 2>/dev/null ; then | ||
34 | +# Disable tests with any sanitizer except for SafeStack | ||
35 | +CFLAGS=$( grep "CFLAGS.*-fsanitize" config-host.mak 2>/dev/null ) | ||
36 | +SANITIZE_FLAGS="" | ||
37 | +#Remove all occurrencies of -fsanitize=safe-stack | ||
38 | +for i in ${CFLAGS}; do | ||
39 | + if [ "${i}" != "-fsanitize=safe-stack" ]; then | ||
40 | + SANITIZE_FLAGS="${SANITIZE_FLAGS} ${i}" | ||
41 | + fi | ||
42 | +done | ||
43 | +if echo ${SANITIZE_FLAGS} | grep -q "\-fsanitize" 2>/dev/null; then | ||
44 | + # Have a sanitize flag that is not allowed, stop | ||
45 | echo "Sanitizers are enabled ==> Not running the qemu-iotests." | ||
46 | exit 0 | ||
47 | fi | ||
48 | -- | ||
49 | 2.26.2 | ||
50 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | A lot of CPU time is spent simply locking/unlocking q->lock during | ||
2 | polling. Check for completion outside the lock to make q->lock disappear | ||
3 | from the profile. | ||
1 | 4 | ||
5 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
6 | Reviewed-by: Sergio Lopez <slp@redhat.com> | ||
7 | Message-id: 20200617132201.1832152-2-stefanha@redhat.com | ||
8 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
9 | --- | ||
10 | block/nvme.c | 12 ++++++++++++ | ||
11 | 1 file changed, 12 insertions(+) | ||
12 | |||
13 | diff --git a/block/nvme.c b/block/nvme.c | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/block/nvme.c | ||
16 | +++ b/block/nvme.c | ||
17 | @@ -XXX,XX +XXX,XX @@ static bool nvme_poll_queues(BDRVNVMeState *s) | ||
18 | |||
19 | for (i = 0; i < s->nr_queues; i++) { | ||
20 | NVMeQueuePair *q = s->queues[i]; | ||
21 | + const size_t cqe_offset = q->cq.head * NVME_CQ_ENTRY_BYTES; | ||
22 | + NvmeCqe *cqe = (NvmeCqe *)&q->cq.queue[cqe_offset]; | ||
23 | + | ||
24 | + /* | ||
25 | + * Do an early check for completions. q->lock isn't needed because | ||
26 | + * nvme_process_completion() only runs in the event loop thread and | ||
27 | + * cannot race with itself. | ||
28 | + */ | ||
29 | + if ((le16_to_cpu(cqe->status) & 0x1) == q->cq_phase) { | ||
30 | + continue; | ||
31 | + } | ||
32 | + | ||
33 | qemu_mutex_lock(&q->lock); | ||
34 | while (nvme_process_completion(s, q)) { | ||
35 | /* Keep polling */ | ||
36 | -- | ||
37 | 2.26.2 | ||
38 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | nvme_process_completion() explicitly checks cid so the assertion that | ||
2 | follows is always true: | ||
1 | 3 | ||
4 | if (cid == 0 || cid > NVME_QUEUE_SIZE) { | ||
5 | ... | ||
6 | continue; | ||
7 | } | ||
8 | assert(cid <= NVME_QUEUE_SIZE); | ||
9 | |||
10 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
11 | Reviewed-by: Sergio Lopez <slp@redhat.com> | ||
12 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
13 | Message-id: 20200617132201.1832152-3-stefanha@redhat.com | ||
14 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
15 | --- | ||
16 | block/nvme.c | 1 - | ||
17 | 1 file changed, 1 deletion(-) | ||
18 | |||
19 | diff --git a/block/nvme.c b/block/nvme.c | ||
20 | index XXXXXXX..XXXXXXX 100644 | ||
21 | --- a/block/nvme.c | ||
22 | +++ b/block/nvme.c | ||
23 | @@ -XXX,XX +XXX,XX @@ static bool nvme_process_completion(BDRVNVMeState *s, NVMeQueuePair *q) | ||
24 | cid); | ||
25 | continue; | ||
26 | } | ||
27 | - assert(cid <= NVME_QUEUE_SIZE); | ||
28 | trace_nvme_complete_command(s, q->index, cid); | ||
29 | preq = &q->reqs[cid - 1]; | ||
30 | req = *preq; | ||
31 | -- | ||
32 | 2.26.2 | ||
33 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Do not access a CQE after incrementing q->cq.head and releasing q->lock. | ||
2 | It is unlikely that this causes problems in practice but it's a latent | ||
3 | bug. | ||
1 | 4 | ||
5 | The reason why it should be safe at the moment is that completion | ||
6 | processing is not re-entrant and the CQ doorbell isn't written until the | ||
7 | end of nvme_process_completion(). | ||
8 | |||
9 | Make this change now because QEMU expects completion processing to be | ||
10 | re-entrant and later patches will do that. | ||
11 | |||
12 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
13 | Reviewed-by: Sergio Lopez <slp@redhat.com> | ||
14 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
15 | Message-id: 20200617132201.1832152-4-stefanha@redhat.com | ||
16 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
17 | --- | ||
18 | block/nvme.c | 5 ++++- | ||
19 | 1 file changed, 4 insertions(+), 1 deletion(-) | ||
20 | |||
21 | diff --git a/block/nvme.c b/block/nvme.c | ||
22 | index XXXXXXX..XXXXXXX 100644 | ||
23 | --- a/block/nvme.c | ||
24 | +++ b/block/nvme.c | ||
25 | @@ -XXX,XX +XXX,XX @@ static bool nvme_process_completion(BDRVNVMeState *s, NVMeQueuePair *q) | ||
26 | q->busy = true; | ||
27 | assert(q->inflight >= 0); | ||
28 | while (q->inflight) { | ||
29 | + int ret; | ||
30 | int16_t cid; | ||
31 | + | ||
32 | c = (NvmeCqe *)&q->cq.queue[q->cq.head * NVME_CQ_ENTRY_BYTES]; | ||
33 | if ((le16_to_cpu(c->status) & 0x1) == q->cq_phase) { | ||
34 | break; | ||
35 | } | ||
36 | + ret = nvme_translate_error(c); | ||
37 | q->cq.head = (q->cq.head + 1) % NVME_QUEUE_SIZE; | ||
38 | if (!q->cq.head) { | ||
39 | q->cq_phase = !q->cq_phase; | ||
40 | @@ -XXX,XX +XXX,XX @@ static bool nvme_process_completion(BDRVNVMeState *s, NVMeQueuePair *q) | ||
41 | preq->busy = false; | ||
42 | preq->cb = preq->opaque = NULL; | ||
43 | qemu_mutex_unlock(&q->lock); | ||
44 | - req.cb(req.opaque, nvme_translate_error(c)); | ||
45 | + req.cb(req.opaque, ret); | ||
46 | qemu_mutex_lock(&q->lock); | ||
47 | q->inflight--; | ||
48 | progress = true; | ||
49 | -- | ||
50 | 2.26.2 | ||
51 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | There are three issues with the current NVMeRequest->busy field: | ||
2 | 1. The busy field is accidentally accessed outside q->lock when request | ||
3 | submission fails. | ||
4 | 2. Waiters on free_req_queue are not woken when a request is returned | ||
5 | early due to submission failure. | ||
6 | 2. Finding a free request involves scanning all requests. This makes | ||
7 | request submission O(n^2). | ||
1 | 8 | ||
9 | Switch to an O(1) freelist that is always accessed under the lock. | ||
10 | |||
11 | Also differentiate between NVME_QUEUE_SIZE, the actual SQ/CQ size, and | ||
12 | NVME_NUM_REQS, the number of usable requests. This makes the code | ||
13 | simpler than using NVME_QUEUE_SIZE everywhere and having to keep in mind | ||
14 | that one slot is reserved. | ||
15 | |||
16 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
17 | Reviewed-by: Sergio Lopez <slp@redhat.com> | ||
18 | Message-id: 20200617132201.1832152-5-stefanha@redhat.com | ||
19 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
20 | --- | ||
21 | block/nvme.c | 81 ++++++++++++++++++++++++++++++++++------------------ | ||
22 | 1 file changed, 54 insertions(+), 27 deletions(-) | ||
23 | |||
24 | diff --git a/block/nvme.c b/block/nvme.c | ||
25 | index XXXXXXX..XXXXXXX 100644 | ||
26 | --- a/block/nvme.c | ||
27 | +++ b/block/nvme.c | ||
28 | @@ -XXX,XX +XXX,XX @@ | ||
29 | #define NVME_QUEUE_SIZE 128 | ||
30 | #define NVME_BAR_SIZE 8192 | ||
31 | |||
32 | +/* | ||
33 | + * We have to leave one slot empty as that is the full queue case where | ||
34 | + * head == tail + 1. | ||
35 | + */ | ||
36 | +#define NVME_NUM_REQS (NVME_QUEUE_SIZE - 1) | ||
37 | + | ||
38 | typedef struct { | ||
39 | int32_t head, tail; | ||
40 | uint8_t *queue; | ||
41 | @@ -XXX,XX +XXX,XX @@ typedef struct { | ||
42 | int cid; | ||
43 | void *prp_list_page; | ||
44 | uint64_t prp_list_iova; | ||
45 | - bool busy; | ||
46 | + int free_req_next; /* q->reqs[] index of next free req */ | ||
47 | } NVMeRequest; | ||
48 | |||
49 | typedef struct { | ||
50 | @@ -XXX,XX +XXX,XX @@ typedef struct { | ||
51 | /* Fields protected by @lock */ | ||
52 | NVMeQueue sq, cq; | ||
53 | int cq_phase; | ||
54 | - NVMeRequest reqs[NVME_QUEUE_SIZE]; | ||
55 | + int free_req_head; | ||
56 | + NVMeRequest reqs[NVME_NUM_REQS]; | ||
57 | bool busy; | ||
58 | int need_kick; | ||
59 | int inflight; | ||
60 | @@ -XXX,XX +XXX,XX @@ static NVMeQueuePair *nvme_create_queue_pair(BlockDriverState *bs, | ||
61 | qemu_mutex_init(&q->lock); | ||
62 | q->index = idx; | ||
63 | qemu_co_queue_init(&q->free_req_queue); | ||
64 | - q->prp_list_pages = qemu_blockalign0(bs, s->page_size * NVME_QUEUE_SIZE); | ||
65 | + q->prp_list_pages = qemu_blockalign0(bs, s->page_size * NVME_NUM_REQS); | ||
66 | r = qemu_vfio_dma_map(s->vfio, q->prp_list_pages, | ||
67 | - s->page_size * NVME_QUEUE_SIZE, | ||
68 | + s->page_size * NVME_NUM_REQS, | ||
69 | false, &prp_list_iova); | ||
70 | if (r) { | ||
71 | goto fail; | ||
72 | } | ||
73 | - for (i = 0; i < NVME_QUEUE_SIZE; i++) { | ||
74 | + q->free_req_head = -1; | ||
75 | + for (i = 0; i < NVME_NUM_REQS; i++) { | ||
76 | NVMeRequest *req = &q->reqs[i]; | ||
77 | req->cid = i + 1; | ||
78 | + req->free_req_next = q->free_req_head; | ||
79 | + q->free_req_head = i; | ||
80 | req->prp_list_page = q->prp_list_pages + i * s->page_size; | ||
81 | req->prp_list_iova = prp_list_iova + i * s->page_size; | ||
82 | } | ||
83 | + | ||
84 | nvme_init_queue(bs, &q->sq, size, NVME_SQ_ENTRY_BYTES, &local_err); | ||
85 | if (local_err) { | ||
86 | error_propagate(errp, local_err); | ||
87 | @@ -XXX,XX +XXX,XX @@ static void nvme_kick(BDRVNVMeState *s, NVMeQueuePair *q) | ||
88 | */ | ||
89 | static NVMeRequest *nvme_get_free_req(NVMeQueuePair *q) | ||
90 | { | ||
91 | - int i; | ||
92 | - NVMeRequest *req = NULL; | ||
93 | + NVMeRequest *req; | ||
94 | |||
95 | qemu_mutex_lock(&q->lock); | ||
96 | - while (q->inflight + q->need_kick > NVME_QUEUE_SIZE - 2) { | ||
97 | - /* We have to leave one slot empty as that is the full queue case (head | ||
98 | - * == tail + 1). */ | ||
99 | + | ||
100 | + while (q->free_req_head == -1) { | ||
101 | if (qemu_in_coroutine()) { | ||
102 | trace_nvme_free_req_queue_wait(q); | ||
103 | qemu_co_queue_wait(&q->free_req_queue, &q->lock); | ||
104 | @@ -XXX,XX +XXX,XX @@ static NVMeRequest *nvme_get_free_req(NVMeQueuePair *q) | ||
105 | return NULL; | ||
106 | } | ||
107 | } | ||
108 | - for (i = 0; i < NVME_QUEUE_SIZE; i++) { | ||
109 | - if (!q->reqs[i].busy) { | ||
110 | - q->reqs[i].busy = true; | ||
111 | - req = &q->reqs[i]; | ||
112 | - break; | ||
113 | - } | ||
114 | - } | ||
115 | - /* We have checked inflight and need_kick while holding q->lock, so one | ||
116 | - * free req must be available. */ | ||
117 | - assert(req); | ||
118 | + | ||
119 | + req = &q->reqs[q->free_req_head]; | ||
120 | + q->free_req_head = req->free_req_next; | ||
121 | + req->free_req_next = -1; | ||
122 | + | ||
123 | qemu_mutex_unlock(&q->lock); | ||
124 | return req; | ||
125 | } | ||
126 | |||
127 | +/* With q->lock */ | ||
128 | +static void nvme_put_free_req_locked(NVMeQueuePair *q, NVMeRequest *req) | ||
129 | +{ | ||
130 | + req->free_req_next = q->free_req_head; | ||
131 | + q->free_req_head = req - q->reqs; | ||
132 | +} | ||
133 | + | ||
134 | +/* With q->lock */ | ||
135 | +static void nvme_wake_free_req_locked(BDRVNVMeState *s, NVMeQueuePair *q) | ||
136 | +{ | ||
137 | + if (!qemu_co_queue_empty(&q->free_req_queue)) { | ||
138 | + replay_bh_schedule_oneshot_event(s->aio_context, | ||
139 | + nvme_free_req_queue_cb, q); | ||
140 | + } | ||
141 | +} | ||
142 | + | ||
143 | +/* Insert a request in the freelist and wake waiters */ | ||
144 | +static void nvme_put_free_req_and_wake(BDRVNVMeState *s, NVMeQueuePair *q, | ||
145 | + NVMeRequest *req) | ||
146 | +{ | ||
147 | + qemu_mutex_lock(&q->lock); | ||
148 | + nvme_put_free_req_locked(q, req); | ||
149 | + nvme_wake_free_req_locked(s, q); | ||
150 | + qemu_mutex_unlock(&q->lock); | ||
151 | +} | ||
152 | + | ||
153 | static inline int nvme_translate_error(const NvmeCqe *c) | ||
154 | { | ||
155 | uint16_t status = (le16_to_cpu(c->status) >> 1) & 0xFF; | ||
156 | @@ -XXX,XX +XXX,XX @@ static bool nvme_process_completion(BDRVNVMeState *s, NVMeQueuePair *q) | ||
157 | req = *preq; | ||
158 | assert(req.cid == cid); | ||
159 | assert(req.cb); | ||
160 | - preq->busy = false; | ||
161 | + nvme_put_free_req_locked(q, preq); | ||
162 | preq->cb = preq->opaque = NULL; | ||
163 | qemu_mutex_unlock(&q->lock); | ||
164 | req.cb(req.opaque, ret); | ||
165 | @@ -XXX,XX +XXX,XX @@ static bool nvme_process_completion(BDRVNVMeState *s, NVMeQueuePair *q) | ||
166 | /* Notify the device so it can post more completions. */ | ||
167 | smp_mb_release(); | ||
168 | *q->cq.doorbell = cpu_to_le32(q->cq.head); | ||
169 | - if (!qemu_co_queue_empty(&q->free_req_queue)) { | ||
170 | - replay_bh_schedule_oneshot_event(s->aio_context, | ||
171 | - nvme_free_req_queue_cb, q); | ||
172 | - } | ||
173 | + nvme_wake_free_req_locked(s, q); | ||
174 | } | ||
175 | q->busy = false; | ||
176 | return progress; | ||
177 | @@ -XXX,XX +XXX,XX @@ static coroutine_fn int nvme_co_prw_aligned(BlockDriverState *bs, | ||
178 | r = nvme_cmd_map_qiov(bs, &cmd, req, qiov); | ||
179 | qemu_co_mutex_unlock(&s->dma_map_lock); | ||
180 | if (r) { | ||
181 | - req->busy = false; | ||
182 | + nvme_put_free_req_and_wake(s, ioq, req); | ||
183 | return r; | ||
184 | } | ||
185 | nvme_submit_command(s, ioq, req, &cmd, nvme_rw_cb, &data); | ||
186 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs, | ||
187 | qemu_co_mutex_unlock(&s->dma_map_lock); | ||
188 | |||
189 | if (ret) { | ||
190 | - req->busy = false; | ||
191 | + nvme_put_free_req_and_wake(s, ioq, req); | ||
192 | goto out; | ||
193 | } | ||
194 | |||
195 | -- | ||
196 | 2.26.2 | ||
197 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Existing users access free_req_queue under q->lock. Document this. | ||
1 | 2 | ||
3 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
4 | Reviewed-by: Sergio Lopez <slp@redhat.com> | ||
5 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
6 | Message-id: 20200617132201.1832152-6-stefanha@redhat.com | ||
7 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
8 | --- | ||
9 | block/nvme.c | 2 +- | ||
10 | 1 file changed, 1 insertion(+), 1 deletion(-) | ||
11 | |||
12 | diff --git a/block/nvme.c b/block/nvme.c | ||
13 | index XXXXXXX..XXXXXXX 100644 | ||
14 | --- a/block/nvme.c | ||
15 | +++ b/block/nvme.c | ||
16 | @@ -XXX,XX +XXX,XX @@ typedef struct { | ||
17 | } NVMeRequest; | ||
18 | |||
19 | typedef struct { | ||
20 | - CoQueue free_req_queue; | ||
21 | QemuMutex lock; | ||
22 | |||
23 | /* Fields protected by BQL */ | ||
24 | @@ -XXX,XX +XXX,XX @@ typedef struct { | ||
25 | uint8_t *prp_list_pages; | ||
26 | |||
27 | /* Fields protected by @lock */ | ||
28 | + CoQueue free_req_queue; | ||
29 | NVMeQueue sq, cq; | ||
30 | int cq_phase; | ||
31 | int free_req_head; | ||
32 | -- | ||
33 | 2.26.2 | ||
34 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Passing around both BDRVNVMeState and NVMeQueuePair is unwieldy. Reduce | ||
2 | the number of function arguments by keeping the BDRVNVMeState pointer in | ||
3 | NVMeQueuePair. This will come in handly when a BH is introduced in a | ||
4 | later patch and only one argument can be passed to it. | ||
1 | 5 | ||
6 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
7 | Reviewed-by: Sergio Lopez <slp@redhat.com> | ||
8 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
9 | Message-id: 20200617132201.1832152-7-stefanha@redhat.com | ||
10 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | ||
11 | --- | ||
12 | block/nvme.c | 70 ++++++++++++++++++++++++++++------------------------ | ||
13 | 1 file changed, 38 insertions(+), 32 deletions(-) | ||
14 | |||
15 | diff --git a/block/nvme.c b/block/nvme.c | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/block/nvme.c | ||
18 | +++ b/block/nvme.c | ||
19 | @@ -XXX,XX +XXX,XX @@ | ||
20 | */ | ||
21 | #define NVME_NUM_REQS (NVME_QUEUE_SIZE - 1) | ||
22 | |||
23 | +typedef struct BDRVNVMeState BDRVNVMeState; | ||
24 | + | ||
25 | typedef struct { | ||
26 | int32_t head, tail; | ||
27 | uint8_t *queue; | ||
28 | @@ -XXX,XX +XXX,XX @@ typedef struct { | ||
29 | typedef struct { | ||
30 | QemuMutex lock; | ||
31 | |||
32 | + /* Read from I/O code path, initialized under BQL */ | ||
33 | + BDRVNVMeState *s; | ||
34 | + int index; | ||
35 | + | ||
36 | /* Fields protected by BQL */ | ||
37 | - int index; | ||
38 | uint8_t *prp_list_pages; | ||
39 | |||
40 | /* Fields protected by @lock */ | ||
41 | @@ -XXX,XX +XXX,XX @@ typedef volatile struct { | ||
42 | |||
43 | QEMU_BUILD_BUG_ON(offsetof(NVMeRegs, doorbells) != 0x1000); | ||
44 | |||
45 | -typedef struct { | ||
46 | +struct BDRVNVMeState { | ||
47 | AioContext *aio_context; | ||
48 | QEMUVFIOState *vfio; | ||
49 | NVMeRegs *regs; | ||
50 | @@ -XXX,XX +XXX,XX @@ typedef struct { | ||
51 | |||
52 | /* PCI address (required for nvme_refresh_filename()) */ | ||
53 | char *device; | ||
54 | -} BDRVNVMeState; | ||
55 | +}; | ||
56 | |||
57 | #define NVME_BLOCK_OPT_DEVICE "device" | ||
58 | #define NVME_BLOCK_OPT_NAMESPACE "namespace" | ||
59 | @@ -XXX,XX +XXX,XX @@ static void nvme_init_queue(BlockDriverState *bs, NVMeQueue *q, | ||
60 | } | ||
61 | } | ||
62 | |||
63 | -static void nvme_free_queue_pair(BlockDriverState *bs, NVMeQueuePair *q) | ||
64 | +static void nvme_free_queue_pair(NVMeQueuePair *q) | ||
65 | { | ||
66 | qemu_vfree(q->prp_list_pages); | ||
67 | qemu_vfree(q->sq.queue); | ||
68 | @@ -XXX,XX +XXX,XX @@ static NVMeQueuePair *nvme_create_queue_pair(BlockDriverState *bs, | ||
69 | uint64_t prp_list_iova; | ||
70 | |||
71 | qemu_mutex_init(&q->lock); | ||
72 | + q->s = s; | ||
73 | q->index = idx; | ||
74 | qemu_co_queue_init(&q->free_req_queue); | ||
75 | q->prp_list_pages = qemu_blockalign0(bs, s->page_size * NVME_NUM_REQS); | ||
76 | @@ -XXX,XX +XXX,XX @@ static NVMeQueuePair *nvme_create_queue_pair(BlockDriverState *bs, | ||
77 | |||
78 | return q; | ||
79 | fail: | ||
80 | - nvme_free_queue_pair(bs, q); | ||
81 | + nvme_free_queue_pair(q); | ||
82 | return NULL; | ||
83 | } | ||
84 | |||
85 | /* With q->lock */ | ||
86 | -static void nvme_kick(BDRVNVMeState *s, NVMeQueuePair *q) | ||
87 | +static void nvme_kick(NVMeQueuePair *q) | ||
88 | { | ||
89 | + BDRVNVMeState *s = q->s; | ||
90 | + | ||
91 | if (s->plugged || !q->need_kick) { | ||
92 | return; | ||
93 | } | ||
94 | @@ -XXX,XX +XXX,XX @@ static void nvme_put_free_req_locked(NVMeQueuePair *q, NVMeRequest *req) | ||
95 | } | ||
96 | |||
97 | /* With q->lock */ | ||
98 | -static void nvme_wake_free_req_locked(BDRVNVMeState *s, NVMeQueuePair *q) | ||
99 | +static void nvme_wake_free_req_locked(NVMeQueuePair *q) | ||
100 | { | ||
101 | if (!qemu_co_queue_empty(&q->free_req_queue)) { | ||
102 | - replay_bh_schedule_oneshot_event(s->aio_context, | ||
103 | + replay_bh_schedule_oneshot_event(q->s->aio_context, | ||
104 | nvme_free_req_queue_cb, q); | ||
105 | } | ||
106 | } | ||
107 | |||
108 | /* Insert a request in the freelist and wake waiters */ | ||
109 | -static void nvme_put_free_req_and_wake(BDRVNVMeState *s, NVMeQueuePair *q, | ||
110 | - NVMeRequest *req) | ||
111 | +static void nvme_put_free_req_and_wake(NVMeQueuePair *q, NVMeRequest *req) | ||
112 | { | ||
113 | qemu_mutex_lock(&q->lock); | ||
114 | nvme_put_free_req_locked(q, req); | ||
115 | - nvme_wake_free_req_locked(s, q); | ||
116 | + nvme_wake_free_req_locked(q); | ||
117 | qemu_mutex_unlock(&q->lock); | ||
118 | } | ||
119 | |||
120 | @@ -XXX,XX +XXX,XX @@ static inline int nvme_translate_error(const NvmeCqe *c) | ||
121 | } | ||
122 | |||
123 | /* With q->lock */ | ||
124 | -static bool nvme_process_completion(BDRVNVMeState *s, NVMeQueuePair *q) | ||
125 | +static bool nvme_process_completion(NVMeQueuePair *q) | ||
126 | { | ||
127 | + BDRVNVMeState *s = q->s; | ||
128 | bool progress = false; | ||
129 | NVMeRequest *preq; | ||
130 | NVMeRequest req; | ||
131 | @@ -XXX,XX +XXX,XX @@ static bool nvme_process_completion(BDRVNVMeState *s, NVMeQueuePair *q) | ||
132 | /* Notify the device so it can post more completions. */ | ||
133 | smp_mb_release(); | ||
134 | *q->cq.doorbell = cpu_to_le32(q->cq.head); | ||
135 | - nvme_wake_free_req_locked(s, q); | ||
136 | + nvme_wake_free_req_locked(q); | ||
137 | } | ||
138 | q->busy = false; | ||
139 | return progress; | ||
140 | @@ -XXX,XX +XXX,XX @@ static void nvme_trace_command(const NvmeCmd *cmd) | ||
141 | } | ||
142 | } | ||
143 | |||
144 | -static void nvme_submit_command(BDRVNVMeState *s, NVMeQueuePair *q, | ||
145 | - NVMeRequest *req, | ||
146 | +static void nvme_submit_command(NVMeQueuePair *q, NVMeRequest *req, | ||
147 | NvmeCmd *cmd, BlockCompletionFunc cb, | ||
148 | void *opaque) | ||
149 | { | ||
150 | @@ -XXX,XX +XXX,XX @@ static void nvme_submit_command(BDRVNVMeState *s, NVMeQueuePair *q, | ||
151 | req->opaque = opaque; | ||
152 | cmd->cid = cpu_to_le32(req->cid); | ||
153 | |||
154 | - trace_nvme_submit_command(s, q->index, req->cid); | ||
155 | + trace_nvme_submit_command(q->s, q->index, req->cid); | ||
156 | nvme_trace_command(cmd); | ||
157 | qemu_mutex_lock(&q->lock); | ||
158 | memcpy((uint8_t *)q->sq.queue + | ||
159 | q->sq.tail * NVME_SQ_ENTRY_BYTES, cmd, sizeof(*cmd)); | ||
160 | q->sq.tail = (q->sq.tail + 1) % NVME_QUEUE_SIZE; | ||
161 | q->need_kick++; | ||
162 | - nvme_kick(s, q); | ||
163 | - nvme_process_completion(s, q); | ||
164 | + nvme_kick(q); | ||
165 | + nvme_process_completion(q); | ||
166 | qemu_mutex_unlock(&q->lock); | ||
167 | } | ||
168 | |||
169 | @@ -XXX,XX +XXX,XX @@ static int nvme_cmd_sync(BlockDriverState *bs, NVMeQueuePair *q, | ||
170 | NvmeCmd *cmd) | ||
171 | { | ||
172 | NVMeRequest *req; | ||
173 | - BDRVNVMeState *s = bs->opaque; | ||
174 | int ret = -EINPROGRESS; | ||
175 | req = nvme_get_free_req(q); | ||
176 | if (!req) { | ||
177 | return -EBUSY; | ||
178 | } | ||
179 | - nvme_submit_command(s, q, req, cmd, nvme_cmd_sync_cb, &ret); | ||
180 | + nvme_submit_command(q, req, cmd, nvme_cmd_sync_cb, &ret); | ||
181 | |||
182 | BDRV_POLL_WHILE(bs, ret == -EINPROGRESS); | ||
183 | return ret; | ||
184 | @@ -XXX,XX +XXX,XX @@ static bool nvme_poll_queues(BDRVNVMeState *s) | ||
185 | } | ||
186 | |||
187 | qemu_mutex_lock(&q->lock); | ||
188 | - while (nvme_process_completion(s, q)) { | ||
189 | + while (nvme_process_completion(q)) { | ||
190 | /* Keep polling */ | ||
191 | progress = true; | ||
192 | } | ||
193 | @@ -XXX,XX +XXX,XX @@ static bool nvme_add_io_queue(BlockDriverState *bs, Error **errp) | ||
194 | }; | ||
195 | if (nvme_cmd_sync(bs, s->queues[0], &cmd)) { | ||
196 | error_setg(errp, "Failed to create io queue [%d]", n); | ||
197 | - nvme_free_queue_pair(bs, q); | ||
198 | + nvme_free_queue_pair(q); | ||
199 | return false; | ||
200 | } | ||
201 | cmd = (NvmeCmd) { | ||
202 | @@ -XXX,XX +XXX,XX @@ static bool nvme_add_io_queue(BlockDriverState *bs, Error **errp) | ||
203 | }; | ||
204 | if (nvme_cmd_sync(bs, s->queues[0], &cmd)) { | ||
205 | error_setg(errp, "Failed to create io queue [%d]", n); | ||
206 | - nvme_free_queue_pair(bs, q); | ||
207 | + nvme_free_queue_pair(q); | ||
208 | return false; | ||
209 | } | ||
210 | s->queues = g_renew(NVMeQueuePair *, s->queues, n + 1); | ||
211 | @@ -XXX,XX +XXX,XX @@ static void nvme_close(BlockDriverState *bs) | ||
212 | BDRVNVMeState *s = bs->opaque; | ||
213 | |||
214 | for (i = 0; i < s->nr_queues; ++i) { | ||
215 | - nvme_free_queue_pair(bs, s->queues[i]); | ||
216 | + nvme_free_queue_pair(s->queues[i]); | ||
217 | } | ||
218 | g_free(s->queues); | ||
219 | aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier, | ||
220 | @@ -XXX,XX +XXX,XX @@ static coroutine_fn int nvme_co_prw_aligned(BlockDriverState *bs, | ||
221 | r = nvme_cmd_map_qiov(bs, &cmd, req, qiov); | ||
222 | qemu_co_mutex_unlock(&s->dma_map_lock); | ||
223 | if (r) { | ||
224 | - nvme_put_free_req_and_wake(s, ioq, req); | ||
225 | + nvme_put_free_req_and_wake(ioq, req); | ||
226 | return r; | ||
227 | } | ||
228 | - nvme_submit_command(s, ioq, req, &cmd, nvme_rw_cb, &data); | ||
229 | + nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data); | ||
230 | |||
231 | data.co = qemu_coroutine_self(); | ||
232 | while (data.ret == -EINPROGRESS) { | ||
233 | @@ -XXX,XX +XXX,XX @@ static coroutine_fn int nvme_co_flush(BlockDriverState *bs) | ||
234 | assert(s->nr_queues > 1); | ||
235 | req = nvme_get_free_req(ioq); | ||
236 | assert(req); | ||
237 | - nvme_submit_command(s, ioq, req, &cmd, nvme_rw_cb, &data); | ||
238 | + nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data); | ||
239 | |||
240 | data.co = qemu_coroutine_self(); | ||
241 | if (data.ret == -EINPROGRESS) { | ||
242 | @@ -XXX,XX +XXX,XX @@ static coroutine_fn int nvme_co_pwrite_zeroes(BlockDriverState *bs, | ||
243 | req = nvme_get_free_req(ioq); | ||
244 | assert(req); | ||
245 | |||
246 | - nvme_submit_command(s, ioq, req, &cmd, nvme_rw_cb, &data); | ||
247 | + nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data); | ||
248 | |||
249 | data.co = qemu_coroutine_self(); | ||
250 | while (data.ret == -EINPROGRESS) { | ||
251 | @@ -XXX,XX +XXX,XX @@ static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs, | ||
252 | qemu_co_mutex_unlock(&s->dma_map_lock); | ||
253 | |||
254 | if (ret) { | ||
255 | - nvme_put_free_req_and_wake(s, ioq, req); | ||
256 | + nvme_put_free_req_and_wake(ioq, req); | ||
257 | goto out; | ||
258 | } | ||
259 | |||
260 | trace_nvme_dsm(s, offset, bytes); | ||
261 | |||
262 | - nvme_submit_command(s, ioq, req, &cmd, nvme_rw_cb, &data); | ||
263 | + nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data); | ||
264 | |||
265 | data.co = qemu_coroutine_self(); | ||
266 | while (data.ret == -EINPROGRESS) { | ||
267 | @@ -XXX,XX +XXX,XX @@ static void nvme_aio_unplug(BlockDriverState *bs) | ||
268 | for (i = 1; i < s->nr_queues; i++) { | ||
269 | NVMeQueuePair *q = s->queues[i]; | ||
270 | qemu_mutex_lock(&q->lock); | ||
271 | - nvme_kick(s, q); | ||
272 | - nvme_process_completion(s, q); | ||
273 | + nvme_kick(q); | ||
274 | + nvme_process_completion(q); | ||
275 | qemu_mutex_unlock(&q->lock); | ||
276 | } | ||
277 | } | ||
278 | -- | ||
279 | 2.26.2 | ||
280 | diff view generated by jsdifflib |
1 | tests/test-bdrv-drain can hang in tests/iothread.c:iothread_run(): | 1 | QEMU block drivers are supposed to support aio_poll() from I/O |
---|---|---|---|
2 | completion callback functions. This means completion processing must be | ||
3 | re-entrant. | ||
2 | 4 | ||
3 | while (!atomic_read(&iothread->stopping)) { | 5 | The standard approach is to schedule a BH during completion processing |
4 | aio_poll(iothread->ctx, true); | 6 | and cancel it at the end of processing. If aio_poll() is invoked by a |
5 | } | 7 | callback function then the BH will run. The BH continues the suspended |
8 | completion processing. | ||
6 | 9 | ||
7 | The iothread_join() function works as follows: | 10 | All of this means that request A's cb() can synchronously wait for |
11 | request B to complete. Previously the nvme block driver would hang | ||
12 | because it didn't process completions from nested aio_poll(). | ||
8 | 13 | ||
9 | void iothread_join(IOThread *iothread) | 14 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
10 | { | 15 | Reviewed-by: Sergio Lopez <slp@redhat.com> |
11 | iothread->stopping = true; | 16 | Message-id: 20200617132201.1832152-8-stefanha@redhat.com |
12 | aio_notify(iothread->ctx); | ||
13 | qemu_thread_join(&iothread->thread); | ||
14 | |||
15 | If iothread_run() checks iothread->stopping before the iothread_join() | ||
16 | thread sets stopping to true, then aio_notify() may be optimized away | ||
17 | and iothread_run() hangs forever in aio_poll(). | ||
18 | |||
19 | The correct way to change iothread->stopping is from a BH that executes | ||
20 | within iothread_run(). This ensures that iothread->stopping is checked | ||
21 | after we set it to true. | ||
22 | |||
23 | This was already fixed for ./iothread.c (note this is a different source | ||
24 | file!) by commit 2362a28ea11c145e1a13ae79342d76dc118a72a6 ("iothread: | ||
25 | fix iothread_stop() race condition"), but not for tests/iothread.c. | ||
26 | |||
27 | Fixes: 0c330a734b51c177ab8488932ac3b0c4d63a718a | ||
28 | ("aio: introduce aio_co_schedule and aio_co_wake") | ||
29 | Reported-by: Dr. David Alan Gilbert <dgilbert@redhat.com> | ||
30 | Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> | ||
31 | Message-Id: <20191003100103.331-1-stefanha@redhat.com> | ||
32 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> | 17 | Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> |
33 | --- | 18 | --- |
34 | tests/iothread.c | 10 ++++++++-- | 19 | block/nvme.c | 67 ++++++++++++++++++++++++++++++++++++++++------ |
35 | 1 file changed, 8 insertions(+), 2 deletions(-) | 20 | block/trace-events | 2 +- |
21 | 2 files changed, 60 insertions(+), 9 deletions(-) | ||
36 | 22 | ||
37 | diff --git a/tests/iothread.c b/tests/iothread.c | 23 | diff --git a/block/nvme.c b/block/nvme.c |
38 | index XXXXXXX..XXXXXXX 100644 | 24 | index XXXXXXX..XXXXXXX 100644 |
39 | --- a/tests/iothread.c | 25 | --- a/block/nvme.c |
40 | +++ b/tests/iothread.c | 26 | +++ b/block/nvme.c |
41 | @@ -XXX,XX +XXX,XX @@ static void *iothread_run(void *opaque) | 27 | @@ -XXX,XX +XXX,XX @@ typedef struct { |
42 | return NULL; | 28 | int cq_phase; |
29 | int free_req_head; | ||
30 | NVMeRequest reqs[NVME_NUM_REQS]; | ||
31 | - bool busy; | ||
32 | int need_kick; | ||
33 | int inflight; | ||
34 | + | ||
35 | + /* Thread-safe, no lock necessary */ | ||
36 | + QEMUBH *completion_bh; | ||
37 | } NVMeQueuePair; | ||
38 | |||
39 | /* Memory mapped registers */ | ||
40 | @@ -XXX,XX +XXX,XX @@ struct BDRVNVMeState { | ||
41 | #define NVME_BLOCK_OPT_DEVICE "device" | ||
42 | #define NVME_BLOCK_OPT_NAMESPACE "namespace" | ||
43 | |||
44 | +static void nvme_process_completion_bh(void *opaque); | ||
45 | + | ||
46 | static QemuOptsList runtime_opts = { | ||
47 | .name = "nvme", | ||
48 | .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head), | ||
49 | @@ -XXX,XX +XXX,XX @@ static void nvme_init_queue(BlockDriverState *bs, NVMeQueue *q, | ||
50 | |||
51 | static void nvme_free_queue_pair(NVMeQueuePair *q) | ||
52 | { | ||
53 | + if (q->completion_bh) { | ||
54 | + qemu_bh_delete(q->completion_bh); | ||
55 | + } | ||
56 | qemu_vfree(q->prp_list_pages); | ||
57 | qemu_vfree(q->sq.queue); | ||
58 | qemu_vfree(q->cq.queue); | ||
59 | @@ -XXX,XX +XXX,XX @@ static NVMeQueuePair *nvme_create_queue_pair(BlockDriverState *bs, | ||
60 | q->index = idx; | ||
61 | qemu_co_queue_init(&q->free_req_queue); | ||
62 | q->prp_list_pages = qemu_blockalign0(bs, s->page_size * NVME_NUM_REQS); | ||
63 | + q->completion_bh = aio_bh_new(bdrv_get_aio_context(bs), | ||
64 | + nvme_process_completion_bh, q); | ||
65 | r = qemu_vfio_dma_map(s->vfio, q->prp_list_pages, | ||
66 | s->page_size * NVME_NUM_REQS, | ||
67 | false, &prp_list_iova); | ||
68 | @@ -XXX,XX +XXX,XX @@ static bool nvme_process_completion(NVMeQueuePair *q) | ||
69 | NvmeCqe *c; | ||
70 | |||
71 | trace_nvme_process_completion(s, q->index, q->inflight); | ||
72 | - if (q->busy || s->plugged) { | ||
73 | - trace_nvme_process_completion_queue_busy(s, q->index); | ||
74 | + if (s->plugged) { | ||
75 | + trace_nvme_process_completion_queue_plugged(s, q->index); | ||
76 | return false; | ||
77 | } | ||
78 | - q->busy = true; | ||
79 | + | ||
80 | + /* | ||
81 | + * Support re-entrancy when a request cb() function invokes aio_poll(). | ||
82 | + * Pending completions must be visible to aio_poll() so that a cb() | ||
83 | + * function can wait for the completion of another request. | ||
84 | + * | ||
85 | + * The aio_poll() loop will execute our BH and we'll resume completion | ||
86 | + * processing there. | ||
87 | + */ | ||
88 | + qemu_bh_schedule(q->completion_bh); | ||
89 | + | ||
90 | assert(q->inflight >= 0); | ||
91 | while (q->inflight) { | ||
92 | int ret; | ||
93 | @@ -XXX,XX +XXX,XX @@ static bool nvme_process_completion(NVMeQueuePair *q) | ||
94 | assert(req.cb); | ||
95 | nvme_put_free_req_locked(q, preq); | ||
96 | preq->cb = preq->opaque = NULL; | ||
97 | - qemu_mutex_unlock(&q->lock); | ||
98 | - req.cb(req.opaque, ret); | ||
99 | - qemu_mutex_lock(&q->lock); | ||
100 | q->inflight--; | ||
101 | + qemu_mutex_unlock(&q->lock); | ||
102 | + req.cb(req.opaque, ret); | ||
103 | + qemu_mutex_lock(&q->lock); | ||
104 | progress = true; | ||
105 | } | ||
106 | if (progress) { | ||
107 | @@ -XXX,XX +XXX,XX @@ static bool nvme_process_completion(NVMeQueuePair *q) | ||
108 | *q->cq.doorbell = cpu_to_le32(q->cq.head); | ||
109 | nvme_wake_free_req_locked(q); | ||
110 | } | ||
111 | - q->busy = false; | ||
112 | + | ||
113 | + qemu_bh_cancel(q->completion_bh); | ||
114 | + | ||
115 | return progress; | ||
43 | } | 116 | } |
44 | 117 | ||
45 | -void iothread_join(IOThread *iothread) | 118 | +static void nvme_process_completion_bh(void *opaque) |
46 | +static void iothread_stop_bh(void *opaque) | 119 | +{ |
47 | { | 120 | + NVMeQueuePair *q = opaque; |
48 | + IOThread *iothread = opaque; | ||
49 | + | 121 | + |
50 | iothread->stopping = true; | 122 | + /* |
51 | - aio_notify(iothread->ctx); | 123 | + * We're being invoked because a nvme_process_completion() cb() function |
124 | + * called aio_poll(). The callback may be waiting for further completions | ||
125 | + * so notify the device that it has space to fill in more completions now. | ||
126 | + */ | ||
127 | + smp_mb_release(); | ||
128 | + *q->cq.doorbell = cpu_to_le32(q->cq.head); | ||
129 | + nvme_wake_free_req_locked(q); | ||
130 | + | ||
131 | + nvme_process_completion(q); | ||
52 | +} | 132 | +} |
53 | + | 133 | + |
54 | +void iothread_join(IOThread *iothread) | 134 | static void nvme_trace_command(const NvmeCmd *cmd) |
55 | +{ | 135 | { |
56 | + aio_bh_schedule_oneshot(iothread->ctx, iothread_stop_bh, iothread); | 136 | int i; |
57 | qemu_thread_join(&iothread->thread); | 137 | @@ -XXX,XX +XXX,XX @@ static void nvme_detach_aio_context(BlockDriverState *bs) |
58 | qemu_cond_destroy(&iothread->init_done_cond); | 138 | { |
59 | qemu_mutex_destroy(&iothread->init_done_lock); | 139 | BDRVNVMeState *s = bs->opaque; |
140 | |||
141 | + for (int i = 0; i < s->nr_queues; i++) { | ||
142 | + NVMeQueuePair *q = s->queues[i]; | ||
143 | + | ||
144 | + qemu_bh_delete(q->completion_bh); | ||
145 | + q->completion_bh = NULL; | ||
146 | + } | ||
147 | + | ||
148 | aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier, | ||
149 | false, NULL, NULL); | ||
150 | } | ||
151 | @@ -XXX,XX +XXX,XX @@ static void nvme_attach_aio_context(BlockDriverState *bs, | ||
152 | s->aio_context = new_context; | ||
153 | aio_set_event_notifier(new_context, &s->irq_notifier, | ||
154 | false, nvme_handle_event, nvme_poll_cb); | ||
155 | + | ||
156 | + for (int i = 0; i < s->nr_queues; i++) { | ||
157 | + NVMeQueuePair *q = s->queues[i]; | ||
158 | + | ||
159 | + q->completion_bh = | ||
160 | + aio_bh_new(new_context, nvme_process_completion_bh, q); | ||
161 | + } | ||
162 | } | ||
163 | |||
164 | static void nvme_aio_plug(BlockDriverState *bs) | ||
165 | diff --git a/block/trace-events b/block/trace-events | ||
166 | index XXXXXXX..XXXXXXX 100644 | ||
167 | --- a/block/trace-events | ||
168 | +++ b/block/trace-events | ||
169 | @@ -XXX,XX +XXX,XX @@ nvme_kick(void *s, int queue) "s %p queue %d" | ||
170 | nvme_dma_flush_queue_wait(void *s) "s %p" | ||
171 | nvme_error(int cmd_specific, int sq_head, int sqid, int cid, int status) "cmd_specific %d sq_head %d sqid %d cid %d status 0x%x" | ||
172 | nvme_process_completion(void *s, int index, int inflight) "s %p queue %d inflight %d" | ||
173 | -nvme_process_completion_queue_busy(void *s, int index) "s %p queue %d" | ||
174 | +nvme_process_completion_queue_plugged(void *s, int index) "s %p queue %d" | ||
175 | nvme_complete_command(void *s, int index, int cid) "s %p queue %d cid %d" | ||
176 | nvme_submit_command(void *s, int index, int cid) "s %p queue %d cid %d" | ||
177 | nvme_submit_command_raw(int c0, int c1, int c2, int c3, int c4, int c5, int c6, int c7) "%02x %02x %02x %02x %02x %02x %02x %02x" | ||
60 | -- | 178 | -- |
61 | 2.21.0 | 179 | 2.26.2 |
62 | 180 | ||
63 | diff view generated by jsdifflib |