1 | The following changes since commit 1316b1ddc8a05e418c8134243f8bff8cccbbccb1: | 1 | The following changes since commit e18e5501d8ac692d32657a3e1ef545b14e72b730: |
---|---|---|---|
2 | 2 | ||
3 | Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging (2019-07-12 15:38:22 +0100) | 3 | Merge remote-tracking branch 'remotes/dgilbert-gitlab/tags/pull-virtiofs-20200210' into staging (2020-02-10 18:09:14 +0000) |
4 | 4 | ||
5 | are available in the Git repository at: | 5 | are available in the Git repository at: |
6 | 6 | ||
7 | https://github.com/rth7680/qemu.git tags/pull-tcg-20190714 | 7 | https://github.com/rth7680/qemu.git tags/pull-tcg-20200212 |
8 | 8 | ||
9 | for you to fetch changes up to 52ba13f042714c4086416973fb88e2465e0888a1: | 9 | for you to fetch changes up to 2445971604c1cfd3ec484457159f4ac300fb04d2: |
10 | 10 | ||
11 | tcg: Release mmap_lock on translation fault (2019-07-14 12:19:01 +0200) | 11 | tcg: Add tcg_gen_gvec_5_ptr (2020-02-12 14:58:36 -0800) |
12 | 12 | ||
13 | ---------------------------------------------------------------- | 13 | ---------------------------------------------------------------- |
14 | Fixes for 3 tcg bugs | 14 | Fix breakpoint invalidation. |
15 | Add support for tcg helpers with 7 arguments. | ||
16 | Add support for gvec helpers with 5 arguments. | ||
15 | 17 | ||
16 | ---------------------------------------------------------------- | 18 | ---------------------------------------------------------------- |
17 | Richard Henderson (7): | 19 | Max Filippov (1): |
18 | tcg: Fix constant folding of INDEX_op_extract2_i32 | 20 | exec: flush CPU TB cache in breakpoint_invalidate |
19 | tcg/aarch64: Fix output of extract2 opcodes | ||
20 | include/qemu/atomic.h: Add signal_barrier | ||
21 | tcg: Introduce set/clear_helper_retaddr | ||
22 | tcg: Remove cpu_ld*_code_ra | ||
23 | tcg: Remove duplicate #if !defined(CODE_ACCESS) | ||
24 | tcg: Release mmap_lock on translation fault | ||
25 | 21 | ||
26 | include/exec/cpu_ldst.h | 20 ++++++++ | 22 | Richard Henderson (1): |
27 | include/exec/cpu_ldst_useronly_template.h | 40 ++++++++++------ | 23 | tcg: Add tcg_gen_gvec_5_ptr |
28 | include/qemu/atomic.h | 11 +++++ | ||
29 | accel/tcg/user-exec.c | 77 +++++++++++++++++++++---------- | ||
30 | target/arm/helper-a64.c | 8 ++-- | ||
31 | target/arm/sve_helper.c | 43 +++++++++-------- | ||
32 | tcg/aarch64/tcg-target.inc.c | 2 +- | ||
33 | tcg/optimize.c | 4 +- | ||
34 | 8 files changed, 139 insertions(+), 66 deletions(-) | ||
35 | 24 | ||
25 | Taylor Simpson (1): | ||
26 | tcg: Add support for a helper with 7 arguments | ||
27 | |||
28 | include/exec/helper-gen.h | 13 +++++++++++++ | ||
29 | include/exec/helper-head.h | 2 ++ | ||
30 | include/exec/helper-proto.h | 6 ++++++ | ||
31 | include/exec/helper-tcg.h | 7 +++++++ | ||
32 | include/tcg/tcg-op-gvec.h | 7 +++++++ | ||
33 | exec.c | 15 +++++++-------- | ||
34 | tcg/tcg-op-gvec.c | 32 ++++++++++++++++++++++++++++++++ | ||
35 | 7 files changed, 74 insertions(+), 8 deletions(-) | ||
36 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | On a 64-bit host, discard any replications of the 32-bit | ||
2 | sign bit when performing the shift and merge. | ||
3 | 1 | ||
4 | Fixes: https://bugs.launchpad.net/bugs/1834496 | ||
5 | Tested-by: Christophe Lyon <christophe.lyon@linaro.org> | ||
6 | Tested-by: Alex Bennée <alex.bennee@linaro.org> | ||
7 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | --- | ||
10 | tcg/optimize.c | 4 ++-- | ||
11 | 1 file changed, 2 insertions(+), 2 deletions(-) | ||
12 | |||
13 | diff --git a/tcg/optimize.c b/tcg/optimize.c | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/tcg/optimize.c | ||
16 | +++ b/tcg/optimize.c | ||
17 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
18 | if (opc == INDEX_op_extract2_i64) { | ||
19 | tmp = (v1 >> op->args[3]) | (v2 << (64 - op->args[3])); | ||
20 | } else { | ||
21 | - tmp = (v1 >> op->args[3]) | (v2 << (32 - op->args[3])); | ||
22 | - tmp = (int32_t)tmp; | ||
23 | + tmp = (int32_t)(((uint32_t)v1 >> op->args[3]) | | ||
24 | + ((uint32_t)v2 << (32 - op->args[3]))); | ||
25 | } | ||
26 | tcg_opt_gen_movi(s, op, op->args[0], tmp); | ||
27 | break; | ||
28 | -- | ||
29 | 2.17.1 | ||
30 | |||
31 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | This patch fixes two problems: | ||
2 | (1) The inputs to the EXTR insn were reversed, | ||
3 | (2) The input constraints use rZ, which means that we need to use | ||
4 | the REG0 macro in order to supply XZR for a constant 0 input. | ||
5 | 1 | ||
6 | Fixes: 464c2969d5d | ||
7 | Reported-by: Peter Maydell <peter.maydell@linaro.org> | ||
8 | Tested-by: Alex Bennée <alex.bennee@linaro.org> | ||
9 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
10 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | ||
11 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
12 | --- | ||
13 | tcg/aarch64/tcg-target.inc.c | 2 +- | ||
14 | 1 file changed, 1 insertion(+), 1 deletion(-) | ||
15 | |||
16 | diff --git a/tcg/aarch64/tcg-target.inc.c b/tcg/aarch64/tcg-target.inc.c | ||
17 | index XXXXXXX..XXXXXXX 100644 | ||
18 | --- a/tcg/aarch64/tcg-target.inc.c | ||
19 | +++ b/tcg/aarch64/tcg-target.inc.c | ||
20 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, | ||
21 | |||
22 | case INDEX_op_extract2_i64: | ||
23 | case INDEX_op_extract2_i32: | ||
24 | - tcg_out_extr(s, ext, a0, a1, a2, args[3]); | ||
25 | + tcg_out_extr(s, ext, a0, REG0(2), REG0(1), args[3]); | ||
26 | break; | ||
27 | |||
28 | case INDEX_op_add2_i32: | ||
29 | -- | ||
30 | 2.17.1 | ||
31 | |||
32 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | We have some potential race conditions vs our user-exec signal | ||
2 | handler that will be solved with this barrier. | ||
3 | 1 | ||
4 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | --- | ||
7 | include/qemu/atomic.h | 11 +++++++++++ | ||
8 | 1 file changed, 11 insertions(+) | ||
9 | |||
10 | diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h | ||
11 | index XXXXXXX..XXXXXXX 100644 | ||
12 | --- a/include/qemu/atomic.h | ||
13 | +++ b/include/qemu/atomic.h | ||
14 | @@ -XXX,XX +XXX,XX @@ | ||
15 | #define smp_read_barrier_depends() barrier() | ||
16 | #endif | ||
17 | |||
18 | +/* | ||
19 | + * A signal barrier forces all pending local memory ops to be observed before | ||
20 | + * a SIGSEGV is delivered to the *same* thread. In practice this is exactly | ||
21 | + * the same as barrier(), but since we have the correct builtin, use it. | ||
22 | + */ | ||
23 | +#define signal_barrier() __atomic_signal_fence(__ATOMIC_SEQ_CST) | ||
24 | + | ||
25 | /* Sanity check that the size of an atomic operation isn't "overly large". | ||
26 | * Despite the fact that e.g. i686 has 64-bit atomic operations, we do not | ||
27 | * want to use them because we ought not need them, and this lets us do a | ||
28 | @@ -XXX,XX +XXX,XX @@ | ||
29 | #define smp_read_barrier_depends() barrier() | ||
30 | #endif | ||
31 | |||
32 | +#ifndef signal_barrier | ||
33 | +#define signal_barrier() barrier() | ||
34 | +#endif | ||
35 | + | ||
36 | /* These will only be atomic if the processor does the fetch or store | ||
37 | * in a single issue memory operation | ||
38 | */ | ||
39 | -- | ||
40 | 2.17.1 | ||
41 | |||
42 | diff view generated by jsdifflib |
1 | This code block is already surrounded by #ifndef CODE_ACCESS. | 1 | From: Max Filippov <jcmvbkbc@gmail.com> |
---|---|---|---|
2 | 2 | ||
3 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 3 | When a breakpoint is inserted at location for which there's currently no |
4 | virtual to physical translation no action is taken on CPU TB cache. If a | ||
5 | TB for that virtual address already exists but is not visible ATM the | ||
6 | breakpoint won't be hit next time an instruction at that address will be | ||
7 | executed. | ||
8 | |||
9 | Flush entire CPU TB cache in breakpoint_invalidate to force | ||
10 | re-translation of all TBs for the breakpoint address. | ||
11 | |||
12 | This change fixes the following scenario: | ||
13 | - linux user application is running | ||
14 | - a breakpoint is inserted from QEMU gdbstub for a user address that is | ||
15 | not currently present in the target CPU TLB | ||
16 | - an instruction at that address is executed, but the external debugger | ||
17 | doesn't get control. | ||
18 | |||
19 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
20 | Signed-off-by: Max Filippov <jcmvbkbc@gmail.com> | ||
21 | Message-Id: <20191127220602.10827-2-jcmvbkbc@gmail.com> | ||
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 22 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | --- | 23 | --- |
6 | include/exec/cpu_ldst_useronly_template.h | 2 -- | 24 | exec.c | 15 +++++++-------- |
7 | 1 file changed, 2 deletions(-) | 25 | 1 file changed, 7 insertions(+), 8 deletions(-) |
8 | 26 | ||
9 | diff --git a/include/exec/cpu_ldst_useronly_template.h b/include/exec/cpu_ldst_useronly_template.h | 27 | diff --git a/exec.c b/exec.c |
10 | index XXXXXXX..XXXXXXX 100644 | 28 | index XXXXXXX..XXXXXXX 100644 |
11 | --- a/include/exec/cpu_ldst_useronly_template.h | 29 | --- a/exec.c |
12 | +++ b/include/exec/cpu_ldst_useronly_template.h | 30 | +++ b/exec.c |
13 | @@ -XXX,XX +XXX,XX @@ static inline void | 31 | @@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs) |
14 | glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr, | 32 | |
15 | RES_TYPE v) | 33 | static void breakpoint_invalidate(CPUState *cpu, target_ulong pc) |
16 | { | 34 | { |
17 | -#if !defined(CODE_ACCESS) | 35 | - MemTxAttrs attrs; |
18 | trace_guest_mem_before_exec( | 36 | - hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs); |
19 | env_cpu(env), ptr, | 37 | - int asidx = cpu_asidx_from_attrs(cpu, attrs); |
20 | trace_mem_build_info(SHIFT, false, MO_TE, true)); | 38 | - if (phys != -1) { |
21 | -#endif | 39 | - /* Locks grabbed by tb_invalidate_phys_addr */ |
22 | glue(glue(st, SUFFIX), _p)(g2h(ptr), v); | 40 | - tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as, |
41 | - phys | (pc & ~TARGET_PAGE_MASK), attrs); | ||
42 | - } | ||
43 | + /* | ||
44 | + * There may not be a virtual to physical translation for the pc | ||
45 | + * right now, but there may exist cached TB for this pc. | ||
46 | + * Flush the whole TB cache to force re-translation of such TBs. | ||
47 | + * This is heavyweight, but we're debugging anyway. | ||
48 | + */ | ||
49 | + tb_flush(cpu); | ||
23 | } | 50 | } |
51 | #endif | ||
24 | 52 | ||
25 | -- | 53 | -- |
26 | 2.17.1 | 54 | 2.20.1 |
27 | 55 | ||
28 | 56 | diff view generated by jsdifflib |
1 | At present we have a potential error in that helper_retaddr contains | 1 | From: Taylor Simpson <tsimpson@quicinc.com> |
---|---|---|---|
2 | data for handle_cpu_signal, but we have not ensured that those stores | ||
3 | will be scheduled properly before the operation that may fault. | ||
4 | 2 | ||
5 | It might be that these races are not in practice observable, due to | 3 | Currently, helpers can only take up to 6 arguments. This patch adds the |
6 | our use of -fno-strict-aliasing, but better safe than sorry. | 4 | capability for up to 7 arguments. I have tested it with the Hexagon port |
5 | that I am preparing for submission. | ||
7 | 6 | ||
8 | Adjust all of the setters of helper_retaddr. | 7 | Signed-off-by: Taylor Simpson <tsimpson@quicinc.com> |
9 | 8 | Message-Id: <1580942510-2820-1-git-send-email-tsimpson@quicinc.com> | |
10 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
11 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
12 | --- | 10 | --- |
13 | include/exec/cpu_ldst.h | 20 +++++++++++ | 11 | include/exec/helper-gen.h | 13 +++++++++++++ |
14 | include/exec/cpu_ldst_useronly_template.h | 12 +++---- | 12 | include/exec/helper-head.h | 2 ++ |
15 | accel/tcg/user-exec.c | 11 +++--- | 13 | include/exec/helper-proto.h | 6 ++++++ |
16 | target/arm/helper-a64.c | 8 ++--- | 14 | include/exec/helper-tcg.h | 7 +++++++ |
17 | target/arm/sve_helper.c | 43 +++++++++++------------ | 15 | 4 files changed, 28 insertions(+) |
18 | 5 files changed, 57 insertions(+), 37 deletions(-) | ||
19 | 16 | ||
20 | diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h | 17 | diff --git a/include/exec/helper-gen.h b/include/exec/helper-gen.h |
21 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
22 | --- a/include/exec/cpu_ldst.h | 19 | --- a/include/exec/helper-gen.h |
23 | +++ b/include/exec/cpu_ldst.h | 20 | +++ b/include/exec/helper-gen.h |
24 | @@ -XXX,XX +XXX,XX @@ typedef target_ulong abi_ptr; | 21 | @@ -XXX,XX +XXX,XX @@ static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \ |
25 | 22 | tcg_gen_callN(HELPER(name), dh_retvar(ret), 6, args); \ | |
26 | extern __thread uintptr_t helper_retaddr; | 23 | } |
27 | 24 | ||
28 | +static inline void set_helper_retaddr(uintptr_t ra) | 25 | +#define DEF_HELPER_FLAGS_7(name, flags, ret, t1, t2, t3, t4, t5, t6, t7)\ |
29 | +{ | 26 | +static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \ |
30 | + helper_retaddr = ra; | 27 | + dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \ |
31 | + /* | 28 | + dh_arg_decl(t4, 4), dh_arg_decl(t5, 5), dh_arg_decl(t6, 6), \ |
32 | + * Ensure that this write is visible to the SIGSEGV handler that | 29 | + dh_arg_decl(t7, 7)) \ |
33 | + * may be invoked due to a subsequent invalid memory operation. | 30 | +{ \ |
34 | + */ | 31 | + TCGTemp *args[7] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \ |
35 | + signal_barrier(); | 32 | + dh_arg(t4, 4), dh_arg(t5, 5), dh_arg(t6, 6), \ |
33 | + dh_arg(t7, 7) }; \ | ||
34 | + tcg_gen_callN(HELPER(name), dh_retvar(ret), 7, args); \ | ||
36 | +} | 35 | +} |
37 | + | 36 | + |
38 | +static inline void clear_helper_retaddr(void) | 37 | #include "helper.h" |
39 | +{ | 38 | #include "trace/generated-helpers.h" |
40 | + /* | 39 | #include "trace/generated-helpers-wrappers.h" |
41 | + * Ensure that previous memory operations have succeeded before | 40 | @@ -XXX,XX +XXX,XX @@ static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \ |
42 | + * removing the data visible to the signal handler. | 41 | #undef DEF_HELPER_FLAGS_4 |
43 | + */ | 42 | #undef DEF_HELPER_FLAGS_5 |
44 | + signal_barrier(); | 43 | #undef DEF_HELPER_FLAGS_6 |
45 | + helper_retaddr = 0; | 44 | +#undef DEF_HELPER_FLAGS_7 |
46 | +} | 45 | #undef GEN_HELPER |
46 | |||
47 | #endif /* HELPER_GEN_H */ | ||
48 | diff --git a/include/exec/helper-head.h b/include/exec/helper-head.h | ||
49 | index XXXXXXX..XXXXXXX 100644 | ||
50 | --- a/include/exec/helper-head.h | ||
51 | +++ b/include/exec/helper-head.h | ||
52 | @@ -XXX,XX +XXX,XX @@ | ||
53 | DEF_HELPER_FLAGS_5(name, 0, ret, t1, t2, t3, t4, t5) | ||
54 | #define DEF_HELPER_6(name, ret, t1, t2, t3, t4, t5, t6) \ | ||
55 | DEF_HELPER_FLAGS_6(name, 0, ret, t1, t2, t3, t4, t5, t6) | ||
56 | +#define DEF_HELPER_7(name, ret, t1, t2, t3, t4, t5, t6, t7) \ | ||
57 | + DEF_HELPER_FLAGS_7(name, 0, ret, t1, t2, t3, t4, t5, t6, t7) | ||
58 | |||
59 | /* MAX_OPC_PARAM_IARGS must be set to n if last entry is DEF_HELPER_FLAGS_n. */ | ||
60 | |||
61 | diff --git a/include/exec/helper-proto.h b/include/exec/helper-proto.h | ||
62 | index XXXXXXX..XXXXXXX 100644 | ||
63 | --- a/include/exec/helper-proto.h | ||
64 | +++ b/include/exec/helper-proto.h | ||
65 | @@ -XXX,XX +XXX,XX @@ dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \ | ||
66 | dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \ | ||
67 | dh_ctype(t4), dh_ctype(t5), dh_ctype(t6)); | ||
68 | |||
69 | +#define DEF_HELPER_FLAGS_7(name, flags, ret, t1, t2, t3, t4, t5, t6, t7) \ | ||
70 | +dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \ | ||
71 | + dh_ctype(t4), dh_ctype(t5), dh_ctype(t6), \ | ||
72 | + dh_ctype(t7)); | ||
47 | + | 73 | + |
48 | /* In user-only mode we provide only the _code and _data accessors. */ | 74 | #include "helper.h" |
49 | 75 | #include "trace/generated-helpers.h" | |
50 | #define MEMSUFFIX _data | 76 | #include "tcg-runtime.h" |
51 | diff --git a/include/exec/cpu_ldst_useronly_template.h b/include/exec/cpu_ldst_useronly_template.h | 77 | @@ -XXX,XX +XXX,XX @@ dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \ |
78 | #undef DEF_HELPER_FLAGS_4 | ||
79 | #undef DEF_HELPER_FLAGS_5 | ||
80 | #undef DEF_HELPER_FLAGS_6 | ||
81 | +#undef DEF_HELPER_FLAGS_7 | ||
82 | |||
83 | #endif /* HELPER_PROTO_H */ | ||
84 | diff --git a/include/exec/helper-tcg.h b/include/exec/helper-tcg.h | ||
52 | index XXXXXXX..XXXXXXX 100644 | 85 | index XXXXXXX..XXXXXXX 100644 |
53 | --- a/include/exec/cpu_ldst_useronly_template.h | 86 | --- a/include/exec/helper-tcg.h |
54 | +++ b/include/exec/cpu_ldst_useronly_template.h | 87 | +++ b/include/exec/helper-tcg.h |
55 | @@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env, | 88 | @@ -XXX,XX +XXX,XX @@ |
56 | uintptr_t retaddr) | 89 | | dh_sizemask(t2, 2) | dh_sizemask(t3, 3) | dh_sizemask(t4, 4) \ |
57 | { | 90 | | dh_sizemask(t5, 5) | dh_sizemask(t6, 6) }, |
58 | RES_TYPE ret; | 91 | |
59 | - helper_retaddr = retaddr; | 92 | +#define DEF_HELPER_FLAGS_7(NAME, FLAGS, ret, t1, t2, t3, t4, t5, t6, t7) \ |
60 | + set_helper_retaddr(retaddr); | 93 | + { .func = HELPER(NAME), .name = str(NAME), .flags = FLAGS, \ |
61 | ret = glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(env, ptr); | 94 | + .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \ |
62 | - helper_retaddr = 0; | 95 | + | dh_sizemask(t2, 2) | dh_sizemask(t3, 3) | dh_sizemask(t4, 4) \ |
63 | + clear_helper_retaddr(); | 96 | + | dh_sizemask(t5, 5) | dh_sizemask(t6, 6) | dh_sizemask(t7, 7) }, |
64 | return ret; | 97 | + |
65 | } | 98 | #include "helper.h" |
66 | 99 | #include "trace/generated-helpers.h" | |
67 | @@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env, | 100 | #include "tcg-runtime.h" |
68 | uintptr_t retaddr) | 101 | @@ -XXX,XX +XXX,XX @@ |
69 | { | 102 | #undef DEF_HELPER_FLAGS_4 |
70 | int ret; | 103 | #undef DEF_HELPER_FLAGS_5 |
71 | - helper_retaddr = retaddr; | 104 | #undef DEF_HELPER_FLAGS_6 |
72 | + set_helper_retaddr(retaddr); | 105 | +#undef DEF_HELPER_FLAGS_7 |
73 | ret = glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(env, ptr); | 106 | |
74 | - helper_retaddr = 0; | 107 | #endif /* HELPER_TCG_H */ |
75 | + clear_helper_retaddr(); | ||
76 | return ret; | ||
77 | } | ||
78 | #endif | ||
79 | @@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env, | ||
80 | RES_TYPE v, | ||
81 | uintptr_t retaddr) | ||
82 | { | ||
83 | - helper_retaddr = retaddr; | ||
84 | + set_helper_retaddr(retaddr); | ||
85 | glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(env, ptr, v); | ||
86 | - helper_retaddr = 0; | ||
87 | + clear_helper_retaddr(); | ||
88 | } | ||
89 | #endif | ||
90 | |||
91 | diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c | ||
92 | index XXXXXXX..XXXXXXX 100644 | ||
93 | --- a/accel/tcg/user-exec.c | ||
94 | +++ b/accel/tcg/user-exec.c | ||
95 | @@ -XXX,XX +XXX,XX @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info, | ||
96 | * currently executing TB was modified and must be exited | ||
97 | * immediately. Clear helper_retaddr for next execution. | ||
98 | */ | ||
99 | - helper_retaddr = 0; | ||
100 | + clear_helper_retaddr(); | ||
101 | cpu_exit_tb_from_sighandler(cpu, old_set); | ||
102 | /* NORETURN */ | ||
103 | |||
104 | @@ -XXX,XX +XXX,XX @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info, | ||
105 | * an exception. Undo signal and retaddr state prior to longjmp. | ||
106 | */ | ||
107 | sigprocmask(SIG_SETMASK, old_set, NULL); | ||
108 | - helper_retaddr = 0; | ||
109 | + clear_helper_retaddr(); | ||
110 | |||
111 | cc = CPU_GET_CLASS(cpu); | ||
112 | access_type = is_write ? MMU_DATA_STORE : MMU_DATA_LOAD; | ||
113 | @@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, | ||
114 | if (unlikely(addr & (size - 1))) { | ||
115 | cpu_loop_exit_atomic(env_cpu(env), retaddr); | ||
116 | } | ||
117 | - helper_retaddr = retaddr; | ||
118 | - return g2h(addr); | ||
119 | + void *ret = g2h(addr); | ||
120 | + set_helper_retaddr(retaddr); | ||
121 | + return ret; | ||
122 | } | ||
123 | |||
124 | /* Macro to call the above, with local variables from the use context. */ | ||
125 | #define ATOMIC_MMU_DECLS do {} while (0) | ||
126 | #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, DATA_SIZE, GETPC()) | ||
127 | -#define ATOMIC_MMU_CLEANUP do { helper_retaddr = 0; } while (0) | ||
128 | +#define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0) | ||
129 | |||
130 | #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END)) | ||
131 | #define EXTRA_ARGS | ||
132 | diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c | ||
133 | index XXXXXXX..XXXXXXX 100644 | ||
134 | --- a/target/arm/helper-a64.c | ||
135 | +++ b/target/arm/helper-a64.c | ||
136 | @@ -XXX,XX +XXX,XX @@ uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr, | ||
137 | /* ??? Enforce alignment. */ | ||
138 | uint64_t *haddr = g2h(addr); | ||
139 | |||
140 | - helper_retaddr = ra; | ||
141 | + set_helper_retaddr(ra); | ||
142 | o0 = ldq_le_p(haddr + 0); | ||
143 | o1 = ldq_le_p(haddr + 1); | ||
144 | oldv = int128_make128(o0, o1); | ||
145 | @@ -XXX,XX +XXX,XX @@ uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr, | ||
146 | stq_le_p(haddr + 0, int128_getlo(newv)); | ||
147 | stq_le_p(haddr + 1, int128_gethi(newv)); | ||
148 | } | ||
149 | - helper_retaddr = 0; | ||
150 | + clear_helper_retaddr(); | ||
151 | #else | ||
152 | int mem_idx = cpu_mmu_index(env, false); | ||
153 | TCGMemOpIdx oi0 = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx); | ||
154 | @@ -XXX,XX +XXX,XX @@ uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr, | ||
155 | /* ??? Enforce alignment. */ | ||
156 | uint64_t *haddr = g2h(addr); | ||
157 | |||
158 | - helper_retaddr = ra; | ||
159 | + set_helper_retaddr(ra); | ||
160 | o1 = ldq_be_p(haddr + 0); | ||
161 | o0 = ldq_be_p(haddr + 1); | ||
162 | oldv = int128_make128(o0, o1); | ||
163 | @@ -XXX,XX +XXX,XX @@ uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr, | ||
164 | stq_be_p(haddr + 0, int128_gethi(newv)); | ||
165 | stq_be_p(haddr + 1, int128_getlo(newv)); | ||
166 | } | ||
167 | - helper_retaddr = 0; | ||
168 | + clear_helper_retaddr(); | ||
169 | #else | ||
170 | int mem_idx = cpu_mmu_index(env, false); | ||
171 | TCGMemOpIdx oi0 = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx); | ||
172 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
173 | index XXXXXXX..XXXXXXX 100644 | ||
174 | --- a/target/arm/sve_helper.c | ||
175 | +++ b/target/arm/sve_helper.c | ||
176 | @@ -XXX,XX +XXX,XX @@ static intptr_t max_for_page(target_ulong base, intptr_t mem_off, | ||
177 | return MIN(split, mem_max - mem_off) + mem_off; | ||
178 | } | ||
179 | |||
180 | -static inline void set_helper_retaddr(uintptr_t ra) | ||
181 | -{ | ||
182 | -#ifdef CONFIG_USER_ONLY | ||
183 | - helper_retaddr = ra; | ||
184 | +#ifndef CONFIG_USER_ONLY | ||
185 | +/* These are normally defined only for CONFIG_USER_ONLY in <exec/cpu_ldst.h> */ | ||
186 | +static inline void set_helper_retaddr(uintptr_t ra) { } | ||
187 | +static inline void clear_helper_retaddr(void) { } | ||
188 | #endif | ||
189 | -} | ||
190 | |||
191 | /* | ||
192 | * The result of tlb_vaddr_to_host for user-only is just g2h(x), | ||
193 | @@ -XXX,XX +XXX,XX @@ static void sve_ld1_r(CPUARMState *env, void *vg, const target_ulong addr, | ||
194 | if (test_host_page(host)) { | ||
195 | mem_off = host_fn(vd, vg, host - mem_off, mem_off, mem_max); | ||
196 | tcg_debug_assert(mem_off == mem_max); | ||
197 | - set_helper_retaddr(0); | ||
198 | + clear_helper_retaddr(); | ||
199 | /* After having taken any fault, zero leading inactive elements. */ | ||
200 | swap_memzero(vd, reg_off); | ||
201 | return; | ||
202 | @@ -XXX,XX +XXX,XX @@ static void sve_ld1_r(CPUARMState *env, void *vg, const target_ulong addr, | ||
203 | } | ||
204 | #endif | ||
205 | |||
206 | - set_helper_retaddr(0); | ||
207 | + clear_helper_retaddr(); | ||
208 | memcpy(vd, &scratch, reg_max); | ||
209 | } | ||
210 | |||
211 | @@ -XXX,XX +XXX,XX @@ static void sve_ld2_r(CPUARMState *env, void *vg, target_ulong addr, | ||
212 | addr += 2 * size; | ||
213 | } while (i & 15); | ||
214 | } | ||
215 | - set_helper_retaddr(0); | ||
216 | + clear_helper_retaddr(); | ||
217 | |||
218 | /* Wait until all exceptions have been raised to write back. */ | ||
219 | memcpy(&env->vfp.zregs[rd], &scratch[0], oprsz); | ||
220 | @@ -XXX,XX +XXX,XX @@ static void sve_ld3_r(CPUARMState *env, void *vg, target_ulong addr, | ||
221 | addr += 3 * size; | ||
222 | } while (i & 15); | ||
223 | } | ||
224 | - set_helper_retaddr(0); | ||
225 | + clear_helper_retaddr(); | ||
226 | |||
227 | /* Wait until all exceptions have been raised to write back. */ | ||
228 | memcpy(&env->vfp.zregs[rd], &scratch[0], oprsz); | ||
229 | @@ -XXX,XX +XXX,XX @@ static void sve_ld4_r(CPUARMState *env, void *vg, target_ulong addr, | ||
230 | addr += 4 * size; | ||
231 | } while (i & 15); | ||
232 | } | ||
233 | - set_helper_retaddr(0); | ||
234 | + clear_helper_retaddr(); | ||
235 | |||
236 | /* Wait until all exceptions have been raised to write back. */ | ||
237 | memcpy(&env->vfp.zregs[rd], &scratch[0], oprsz); | ||
238 | @@ -XXX,XX +XXX,XX @@ static void sve_ldff1_r(CPUARMState *env, void *vg, const target_ulong addr, | ||
239 | if (test_host_page(host)) { | ||
240 | mem_off = host_fn(vd, vg, host - mem_off, mem_off, mem_max); | ||
241 | tcg_debug_assert(mem_off == mem_max); | ||
242 | - set_helper_retaddr(0); | ||
243 | + clear_helper_retaddr(); | ||
244 | /* After any fault, zero any leading inactive elements. */ | ||
245 | swap_memzero(vd, reg_off); | ||
246 | return; | ||
247 | @@ -XXX,XX +XXX,XX @@ static void sve_ldff1_r(CPUARMState *env, void *vg, const target_ulong addr, | ||
248 | } | ||
249 | #endif | ||
250 | |||
251 | - set_helper_retaddr(0); | ||
252 | + clear_helper_retaddr(); | ||
253 | record_fault(env, reg_off, reg_max); | ||
254 | } | ||
255 | |||
256 | @@ -XXX,XX +XXX,XX @@ static void sve_st1_r(CPUARMState *env, void *vg, target_ulong addr, | ||
257 | addr += msize; | ||
258 | } while (i & 15); | ||
259 | } | ||
260 | - set_helper_retaddr(0); | ||
261 | + clear_helper_retaddr(); | ||
262 | } | ||
263 | |||
264 | static void sve_st2_r(CPUARMState *env, void *vg, target_ulong addr, | ||
265 | @@ -XXX,XX +XXX,XX @@ static void sve_st2_r(CPUARMState *env, void *vg, target_ulong addr, | ||
266 | addr += 2 * msize; | ||
267 | } while (i & 15); | ||
268 | } | ||
269 | - set_helper_retaddr(0); | ||
270 | + clear_helper_retaddr(); | ||
271 | } | ||
272 | |||
273 | static void sve_st3_r(CPUARMState *env, void *vg, target_ulong addr, | ||
274 | @@ -XXX,XX +XXX,XX @@ static void sve_st3_r(CPUARMState *env, void *vg, target_ulong addr, | ||
275 | addr += 3 * msize; | ||
276 | } while (i & 15); | ||
277 | } | ||
278 | - set_helper_retaddr(0); | ||
279 | + clear_helper_retaddr(); | ||
280 | } | ||
281 | |||
282 | static void sve_st4_r(CPUARMState *env, void *vg, target_ulong addr, | ||
283 | @@ -XXX,XX +XXX,XX @@ static void sve_st4_r(CPUARMState *env, void *vg, target_ulong addr, | ||
284 | addr += 4 * msize; | ||
285 | } while (i & 15); | ||
286 | } | ||
287 | - set_helper_retaddr(0); | ||
288 | + clear_helper_retaddr(); | ||
289 | } | ||
290 | |||
291 | #define DO_STN_1(N, NAME, ESIZE) \ | ||
292 | @@ -XXX,XX +XXX,XX @@ static void sve_ld1_zs(CPUARMState *env, void *vd, void *vg, void *vm, | ||
293 | i += 4, pg >>= 4; | ||
294 | } while (i & 15); | ||
295 | } | ||
296 | - set_helper_retaddr(0); | ||
297 | + clear_helper_retaddr(); | ||
298 | |||
299 | /* Wait until all exceptions have been raised to write back. */ | ||
300 | memcpy(vd, &scratch, oprsz); | ||
301 | @@ -XXX,XX +XXX,XX @@ static void sve_ld1_zd(CPUARMState *env, void *vd, void *vg, void *vm, | ||
302 | tlb_fn(env, &scratch, i * 8, base + (off << scale), oi, ra); | ||
303 | } | ||
304 | } | ||
305 | - set_helper_retaddr(0); | ||
306 | + clear_helper_retaddr(); | ||
307 | |||
308 | /* Wait until all exceptions have been raised to write back. */ | ||
309 | memcpy(vd, &scratch, oprsz * 8); | ||
310 | @@ -XXX,XX +XXX,XX @@ static inline void sve_ldff1_zs(CPUARMState *env, void *vd, void *vg, void *vm, | ||
311 | tlb_fn(env, vd, reg_off, addr, oi, ra); | ||
312 | |||
313 | /* The rest of the reads will be non-faulting. */ | ||
314 | - set_helper_retaddr(0); | ||
315 | + clear_helper_retaddr(); | ||
316 | } | ||
317 | |||
318 | /* After any fault, zero the leading predicated false elements. */ | ||
319 | @@ -XXX,XX +XXX,XX @@ static inline void sve_ldff1_zd(CPUARMState *env, void *vd, void *vg, void *vm, | ||
320 | tlb_fn(env, vd, reg_off, addr, oi, ra); | ||
321 | |||
322 | /* The rest of the reads will be non-faulting. */ | ||
323 | - set_helper_retaddr(0); | ||
324 | + clear_helper_retaddr(); | ||
325 | } | ||
326 | |||
327 | /* After any fault, zero the leading predicated false elements. */ | ||
328 | @@ -XXX,XX +XXX,XX @@ static void sve_st1_zs(CPUARMState *env, void *vd, void *vg, void *vm, | ||
329 | i += 4, pg >>= 4; | ||
330 | } while (i & 15); | ||
331 | } | ||
332 | - set_helper_retaddr(0); | ||
333 | + clear_helper_retaddr(); | ||
334 | } | ||
335 | |||
336 | static void sve_st1_zd(CPUARMState *env, void *vd, void *vg, void *vm, | ||
337 | @@ -XXX,XX +XXX,XX @@ static void sve_st1_zd(CPUARMState *env, void *vd, void *vg, void *vm, | ||
338 | tlb_fn(env, vd, i * 8, base + (off << scale), oi, ra); | ||
339 | } | ||
340 | } | ||
341 | - set_helper_retaddr(0); | ||
342 | + clear_helper_retaddr(); | ||
343 | } | ||
344 | |||
345 | #define DO_ST1_ZPZ_S(MEM, OFS) \ | ||
346 | -- | 108 | -- |
347 | 2.17.1 | 109 | 2.20.1 |
348 | 110 | ||
349 | 111 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | These functions are not used, and are not usable in the | ||
2 | context of code generation, because we never have a helper | ||
3 | return address to pass in to them. | ||
4 | 1 | ||
5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | --- | ||
8 | include/exec/cpu_ldst_useronly_template.h | 6 +++++- | ||
9 | 1 file changed, 5 insertions(+), 1 deletion(-) | ||
10 | |||
11 | diff --git a/include/exec/cpu_ldst_useronly_template.h b/include/exec/cpu_ldst_useronly_template.h | ||
12 | index XXXXXXX..XXXXXXX 100644 | ||
13 | --- a/include/exec/cpu_ldst_useronly_template.h | ||
14 | +++ b/include/exec/cpu_ldst_useronly_template.h | ||
15 | @@ -XXX,XX +XXX,XX @@ glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr) | ||
16 | return glue(glue(ld, USUFFIX), _p)(g2h(ptr)); | ||
17 | } | ||
18 | |||
19 | +#ifndef CODE_ACCESS | ||
20 | static inline RES_TYPE | ||
21 | glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env, | ||
22 | abi_ptr ptr, | ||
23 | @@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env, | ||
24 | clear_helper_retaddr(); | ||
25 | return ret; | ||
26 | } | ||
27 | +#endif | ||
28 | |||
29 | #if DATA_SIZE <= 2 | ||
30 | static inline int | ||
31 | @@ -XXX,XX +XXX,XX @@ glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr) | ||
32 | return glue(glue(lds, SUFFIX), _p)(g2h(ptr)); | ||
33 | } | ||
34 | |||
35 | +#ifndef CODE_ACCESS | ||
36 | static inline int | ||
37 | glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env, | ||
38 | abi_ptr ptr, | ||
39 | @@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env, | ||
40 | clear_helper_retaddr(); | ||
41 | return ret; | ||
42 | } | ||
43 | -#endif | ||
44 | +#endif /* CODE_ACCESS */ | ||
45 | +#endif /* DATA_SIZE <= 2 */ | ||
46 | |||
47 | #ifndef CODE_ACCESS | ||
48 | static inline void | ||
49 | -- | ||
50 | 2.17.1 | ||
51 | |||
52 | diff view generated by jsdifflib |
1 | Turn helper_retaddr into a multi-state flag that may now also | 1 | Extend the vector generator infrastructure to handle |
---|---|---|---|
2 | indicate when we're performing a read on behalf of the translator. | 2 | 5 vector arguments. |
3 | In this case, release the mmap_lock before the longjmp back to | ||
4 | the main cpu loop, and thereby avoid a failing assert therein. | ||
5 | 3 | ||
6 | Fixes: https://bugs.launchpad.net/qemu/+bug/1832353 | 4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> |
7 | Tested-by: Alex Bennée <alex.bennee@linaro.org> | ||
8 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> |
6 | Reviewed-by: Taylor Simpson <tsimpson@quicinc.com> | ||
9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 7 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
10 | --- | 8 | --- |
11 | include/exec/cpu_ldst_useronly_template.h | 20 +++++-- | 9 | include/tcg/tcg-op-gvec.h | 7 +++++++ |
12 | accel/tcg/user-exec.c | 66 ++++++++++++++++------- | 10 | tcg/tcg-op-gvec.c | 32 ++++++++++++++++++++++++++++++++ |
13 | 2 files changed, 63 insertions(+), 23 deletions(-) | 11 | 2 files changed, 39 insertions(+) |
14 | 12 | ||
15 | diff --git a/include/exec/cpu_ldst_useronly_template.h b/include/exec/cpu_ldst_useronly_template.h | 13 | diff --git a/include/tcg/tcg-op-gvec.h b/include/tcg/tcg-op-gvec.h |
16 | index XXXXXXX..XXXXXXX 100644 | 14 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/include/exec/cpu_ldst_useronly_template.h | 15 | --- a/include/tcg/tcg-op-gvec.h |
18 | +++ b/include/exec/cpu_ldst_useronly_template.h | 16 | +++ b/include/tcg/tcg-op-gvec.h |
19 | @@ -XXX,XX +XXX,XX @@ | 17 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_4_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs, |
20 | static inline RES_TYPE | 18 | uint32_t maxsz, int32_t data, |
21 | glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr) | 19 | gen_helper_gvec_4_ptr *fn); |
22 | { | 20 | |
23 | -#if !defined(CODE_ACCESS) | 21 | +typedef void gen_helper_gvec_5_ptr(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, |
24 | +#ifdef CODE_ACCESS | 22 | + TCGv_ptr, TCGv_ptr, TCGv_i32); |
25 | + RES_TYPE ret; | 23 | +void tcg_gen_gvec_5_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs, |
26 | + set_helper_retaddr(1); | 24 | + uint32_t cofs, uint32_t eofs, TCGv_ptr ptr, |
27 | + ret = glue(glue(ld, USUFFIX), _p)(g2h(ptr)); | 25 | + uint32_t oprsz, uint32_t maxsz, int32_t data, |
28 | + clear_helper_retaddr(); | 26 | + gen_helper_gvec_5_ptr *fn); |
29 | + return ret; | 27 | + |
30 | +#else | 28 | /* Expand a gvec operation. Either inline or out-of-line depending on |
31 | trace_guest_mem_before_exec( | 29 | the actual vector size and the operations supported by the host. */ |
32 | env_cpu(env), ptr, | 30 | typedef struct { |
33 | trace_mem_build_info(SHIFT, false, MO_TE, false)); | 31 | diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c |
34 | -#endif | 32 | index XXXXXXX..XXXXXXX 100644 |
35 | return glue(glue(ld, USUFFIX), _p)(g2h(ptr)); | 33 | --- a/tcg/tcg-op-gvec.c |
36 | +#endif | 34 | +++ b/tcg/tcg-op-gvec.c |
35 | @@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_4_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs, | ||
36 | tcg_temp_free_i32(desc); | ||
37 | } | 37 | } |
38 | 38 | ||
39 | #ifndef CODE_ACCESS | 39 | +/* Generate a call to a gvec-style helper with five vector operands |
40 | @@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env, | 40 | + and an extra pointer operand. */ |
41 | static inline int | 41 | +void tcg_gen_gvec_5_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs, |
42 | glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr) | 42 | + uint32_t cofs, uint32_t eofs, TCGv_ptr ptr, |
43 | { | 43 | + uint32_t oprsz, uint32_t maxsz, int32_t data, |
44 | -#if !defined(CODE_ACCESS) | 44 | + gen_helper_gvec_5_ptr *fn) |
45 | +#ifdef CODE_ACCESS | 45 | +{ |
46 | + int ret; | 46 | + TCGv_ptr a0, a1, a2, a3, a4; |
47 | + set_helper_retaddr(1); | 47 | + TCGv_i32 desc = tcg_const_i32(simd_desc(oprsz, maxsz, data)); |
48 | + ret = glue(glue(lds, SUFFIX), _p)(g2h(ptr)); | ||
49 | + clear_helper_retaddr(); | ||
50 | + return ret; | ||
51 | +#else | ||
52 | trace_guest_mem_before_exec( | ||
53 | env_cpu(env), ptr, | ||
54 | trace_mem_build_info(SHIFT, true, MO_TE, false)); | ||
55 | -#endif | ||
56 | return glue(glue(lds, SUFFIX), _p)(g2h(ptr)); | ||
57 | +#endif | ||
58 | } | ||
59 | |||
60 | #ifndef CODE_ACCESS | ||
61 | diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c | ||
62 | index XXXXXXX..XXXXXXX 100644 | ||
63 | --- a/accel/tcg/user-exec.c | ||
64 | +++ b/accel/tcg/user-exec.c | ||
65 | @@ -XXX,XX +XXX,XX @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info, | ||
66 | CPUState *cpu = current_cpu; | ||
67 | CPUClass *cc; | ||
68 | unsigned long address = (unsigned long)info->si_addr; | ||
69 | - MMUAccessType access_type; | ||
70 | + MMUAccessType access_type = is_write ? MMU_DATA_STORE : MMU_DATA_LOAD; | ||
71 | |||
72 | - /* We must handle PC addresses from two different sources: | ||
73 | - * a call return address and a signal frame address. | ||
74 | - * | ||
75 | - * Within cpu_restore_state_from_tb we assume the former and adjust | ||
76 | - * the address by -GETPC_ADJ so that the address is within the call | ||
77 | - * insn so that addr does not accidentally match the beginning of the | ||
78 | - * next guest insn. | ||
79 | - * | ||
80 | - * However, when the PC comes from the signal frame, it points to | ||
81 | - * the actual faulting host insn and not a call insn. Subtracting | ||
82 | - * GETPC_ADJ in that case may accidentally match the previous guest insn. | ||
83 | - * | ||
84 | - * So for the later case, adjust forward to compensate for what | ||
85 | - * will be done later by cpu_restore_state_from_tb. | ||
86 | - */ | ||
87 | - if (helper_retaddr) { | ||
88 | + switch (helper_retaddr) { | ||
89 | + default: | ||
90 | + /* | ||
91 | + * Fault during host memory operation within a helper function. | ||
92 | + * The helper's host return address, saved here, gives us a | ||
93 | + * pointer into the generated code that will unwind to the | ||
94 | + * correct guest pc. | ||
95 | + */ | ||
96 | pc = helper_retaddr; | ||
97 | - } else { | ||
98 | + break; | ||
99 | + | 48 | + |
100 | + case 0: | 49 | + a0 = tcg_temp_new_ptr(); |
101 | + /* | 50 | + a1 = tcg_temp_new_ptr(); |
102 | + * Fault during host memory operation within generated code. | 51 | + a2 = tcg_temp_new_ptr(); |
103 | + * (Or, a unrelated bug within qemu, but we can't tell from here). | 52 | + a3 = tcg_temp_new_ptr(); |
104 | + * | 53 | + a4 = tcg_temp_new_ptr(); |
105 | + * We take the host pc from the signal frame. However, we cannot | ||
106 | + * use that value directly. Within cpu_restore_state_from_tb, we | ||
107 | + * assume PC comes from GETPC(), as used by the helper functions, | ||
108 | + * so we adjust the address by -GETPC_ADJ to form an address that | ||
109 | + * is within the call insn, so that the address does not accidentially | ||
110 | + * match the beginning of the next guest insn. However, when the | ||
111 | + * pc comes from the signal frame it points to the actual faulting | ||
112 | + * host memory insn and not the return from a call insn. | ||
113 | + * | ||
114 | + * Therefore, adjust to compensate for what will be done later | ||
115 | + * by cpu_restore_state_from_tb. | ||
116 | + */ | ||
117 | pc += GETPC_ADJ; | ||
118 | + break; | ||
119 | + | 54 | + |
120 | + case 1: | 55 | + tcg_gen_addi_ptr(a0, cpu_env, dofs); |
121 | + /* | 56 | + tcg_gen_addi_ptr(a1, cpu_env, aofs); |
122 | + * Fault during host read for translation, or loosely, "execution". | 57 | + tcg_gen_addi_ptr(a2, cpu_env, bofs); |
123 | + * | 58 | + tcg_gen_addi_ptr(a3, cpu_env, cofs); |
124 | + * The guest pc is already pointing to the start of the TB for which | 59 | + tcg_gen_addi_ptr(a4, cpu_env, eofs); |
125 | + * code is being generated. If the guest translator manages the | 60 | + |
126 | + * page crossings correctly, this is exactly the correct address | 61 | + fn(a0, a1, a2, a3, a4, ptr, desc); |
127 | + * (and if the translator doesn't handle page boundaries correctly | 62 | + |
128 | + * there's little we can do about that here). Therefore, do not | 63 | + tcg_temp_free_ptr(a0); |
129 | + * trigger the unwinder. | 64 | + tcg_temp_free_ptr(a1); |
130 | + * | 65 | + tcg_temp_free_ptr(a2); |
131 | + * Like tb_gen_code, release the memory lock before cpu_loop_exit. | 66 | + tcg_temp_free_ptr(a3); |
132 | + */ | 67 | + tcg_temp_free_ptr(a4); |
133 | + pc = 0; | 68 | + tcg_temp_free_i32(desc); |
134 | + access_type = MMU_INST_FETCH; | 69 | +} |
135 | + mmap_unlock(); | 70 | + |
136 | + break; | 71 | /* Return true if we want to implement something of OPRSZ bytes |
137 | } | 72 | in units of LNSZ. This limits the expansion of inline code. */ |
138 | 73 | static inline bool check_size_impl(uint32_t oprsz, uint32_t lnsz) | |
139 | /* For synchronous signals we expect to be coming from the vCPU | ||
140 | @@ -XXX,XX +XXX,XX @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info, | ||
141 | clear_helper_retaddr(); | ||
142 | |||
143 | cc = CPU_GET_CLASS(cpu); | ||
144 | - access_type = is_write ? MMU_DATA_STORE : MMU_DATA_LOAD; | ||
145 | cc->tlb_fill(cpu, address, 0, access_type, MMU_USER_IDX, false, pc); | ||
146 | g_assert_not_reached(); | ||
147 | } | ||
148 | -- | 74 | -- |
149 | 2.17.1 | 75 | 2.20.1 |
150 | 76 | ||
151 | 77 | diff view generated by jsdifflib |