1 | The following changes since commit 1316b1ddc8a05e418c8134243f8bff8cccbbccb1: | 1 | Pretty small still, but there are two patches that ought |
---|---|---|---|
2 | to get backported to stable, so no point in delaying. | ||
2 | 3 | ||
3 | Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging (2019-07-12 15:38:22 +0100) | 4 | r~ |
5 | |||
6 | The following changes since commit a5ba0a7e4e150d1350a041f0d0ef9ca6c8d7c307: | ||
7 | |||
8 | Merge tag 'pull-aspeed-20241211' of https://github.com/legoater/qemu into staging (2024-12-11 15:16:47 +0000) | ||
4 | 9 | ||
5 | are available in the Git repository at: | 10 | are available in the Git repository at: |
6 | 11 | ||
7 | https://github.com/rth7680/qemu.git tags/pull-tcg-20190714 | 12 | https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20241212 |
8 | 13 | ||
9 | for you to fetch changes up to 52ba13f042714c4086416973fb88e2465e0888a1: | 14 | for you to fetch changes up to 7ac87b14a92234b6a89b701b4043ad6cf8bdcccf: |
10 | 15 | ||
11 | tcg: Release mmap_lock on translation fault (2019-07-14 12:19:01 +0200) | 16 | target/sparc: Use memcpy() and remove memcpy32() (2024-12-12 14:28:38 -0600) |
12 | 17 | ||
13 | ---------------------------------------------------------------- | 18 | ---------------------------------------------------------------- |
14 | Fixes for 3 tcg bugs | 19 | tcg: Reset free_temps before tcg_optimize |
20 | tcg/riscv: Fix StoreStore barrier generation | ||
21 | include/exec: Introduce fpst alias in helper-head.h.inc | ||
22 | target/sparc: Use memcpy() and remove memcpy32() | ||
15 | 23 | ||
16 | ---------------------------------------------------------------- | 24 | ---------------------------------------------------------------- |
17 | Richard Henderson (7): | 25 | Philippe Mathieu-Daudé (1): |
18 | tcg: Fix constant folding of INDEX_op_extract2_i32 | 26 | target/sparc: Use memcpy() and remove memcpy32() |
19 | tcg/aarch64: Fix output of extract2 opcodes | ||
20 | include/qemu/atomic.h: Add signal_barrier | ||
21 | tcg: Introduce set/clear_helper_retaddr | ||
22 | tcg: Remove cpu_ld*_code_ra | ||
23 | tcg: Remove duplicate #if !defined(CODE_ACCESS) | ||
24 | tcg: Release mmap_lock on translation fault | ||
25 | 27 | ||
26 | include/exec/cpu_ldst.h | 20 ++++++++ | 28 | Richard Henderson (2): |
27 | include/exec/cpu_ldst_useronly_template.h | 40 ++++++++++------ | 29 | tcg: Reset free_temps before tcg_optimize |
28 | include/qemu/atomic.h | 11 +++++ | 30 | include/exec: Introduce fpst alias in helper-head.h.inc |
29 | accel/tcg/user-exec.c | 77 +++++++++++++++++++++---------- | ||
30 | target/arm/helper-a64.c | 8 ++-- | ||
31 | target/arm/sve_helper.c | 43 +++++++++-------- | ||
32 | tcg/aarch64/tcg-target.inc.c | 2 +- | ||
33 | tcg/optimize.c | 4 +- | ||
34 | 8 files changed, 139 insertions(+), 66 deletions(-) | ||
35 | 31 | ||
32 | Roman Artemev (1): | ||
33 | tcg/riscv: Fix StoreStore barrier generation | ||
34 | |||
35 | include/tcg/tcg-temp-internal.h | 6 ++++++ | ||
36 | accel/tcg/plugin-gen.c | 2 +- | ||
37 | target/sparc/win_helper.c | 26 ++++++++------------------ | ||
38 | tcg/tcg.c | 5 ++++- | ||
39 | include/exec/helper-head.h.inc | 3 +++ | ||
40 | tcg/riscv/tcg-target.c.inc | 2 +- | ||
41 | 6 files changed, 23 insertions(+), 21 deletions(-) | ||
42 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | On a 64-bit host, discard any replications of the 32-bit | ||
2 | sign bit when performing the shift and merge. | ||
3 | 1 | ||
4 | Fixes: https://bugs.launchpad.net/bugs/1834496 | ||
5 | Tested-by: Christophe Lyon <christophe.lyon@linaro.org> | ||
6 | Tested-by: Alex Bennée <alex.bennee@linaro.org> | ||
7 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
8 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | --- | ||
10 | tcg/optimize.c | 4 ++-- | ||
11 | 1 file changed, 2 insertions(+), 2 deletions(-) | ||
12 | |||
13 | diff --git a/tcg/optimize.c b/tcg/optimize.c | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/tcg/optimize.c | ||
16 | +++ b/tcg/optimize.c | ||
17 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) | ||
18 | if (opc == INDEX_op_extract2_i64) { | ||
19 | tmp = (v1 >> op->args[3]) | (v2 << (64 - op->args[3])); | ||
20 | } else { | ||
21 | - tmp = (v1 >> op->args[3]) | (v2 << (32 - op->args[3])); | ||
22 | - tmp = (int32_t)tmp; | ||
23 | + tmp = (int32_t)(((uint32_t)v1 >> op->args[3]) | | ||
24 | + ((uint32_t)v2 << (32 - op->args[3]))); | ||
25 | } | ||
26 | tcg_opt_gen_movi(s, op, op->args[0], tmp); | ||
27 | break; | ||
28 | -- | ||
29 | 2.17.1 | ||
30 | |||
31 | diff view generated by jsdifflib |
1 | At present we have a potential error in that helper_retaddr contains | 1 | When allocating new temps during tcg_optmize, do not re-use |
---|---|---|---|
2 | data for handle_cpu_signal, but we have not ensured that those stores | 2 | any EBB temps that were used within the TB. We do not have |
3 | will be scheduled properly before the operation that may fault. | 3 | any idea what span of the TB in which the temp was live. |
4 | 4 | ||
5 | It might be that these races are not in practice observable, due to | 5 | Introduce tcg_temp_ebb_reset_freed and use before tcg_optimize, |
6 | our use of -fno-strict-aliasing, but better safe than sorry. | 6 | as well as replacing the equivalent in plugin_gen_inject and |
7 | tcg_func_start. | ||
7 | 8 | ||
8 | Adjust all of the setters of helper_retaddr. | 9 | Cc: qemu-stable@nongnu.org |
10 | Fixes: fb04ab7ddd8 ("tcg/optimize: Lower TCG_COND_TST{EQ,NE} if unsupported") | ||
11 | Resolves: https://gitlab.com/qemu-project/qemu/-/issues/2711 | ||
12 | Reported-by: wannacu <wannacu2049@gmail.com> | ||
13 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
14 | Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> | ||
15 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
16 | --- | ||
17 | include/tcg/tcg-temp-internal.h | 6 ++++++ | ||
18 | accel/tcg/plugin-gen.c | 2 +- | ||
19 | tcg/tcg.c | 5 ++++- | ||
20 | 3 files changed, 11 insertions(+), 2 deletions(-) | ||
9 | 21 | ||
10 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 22 | diff --git a/include/tcg/tcg-temp-internal.h b/include/tcg/tcg-temp-internal.h |
11 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
12 | --- | ||
13 | include/exec/cpu_ldst.h | 20 +++++++++++ | ||
14 | include/exec/cpu_ldst_useronly_template.h | 12 +++---- | ||
15 | accel/tcg/user-exec.c | 11 +++--- | ||
16 | target/arm/helper-a64.c | 8 ++--- | ||
17 | target/arm/sve_helper.c | 43 +++++++++++------------ | ||
18 | 5 files changed, 57 insertions(+), 37 deletions(-) | ||
19 | |||
20 | diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h | ||
21 | index XXXXXXX..XXXXXXX 100644 | 23 | index XXXXXXX..XXXXXXX 100644 |
22 | --- a/include/exec/cpu_ldst.h | 24 | --- a/include/tcg/tcg-temp-internal.h |
23 | +++ b/include/exec/cpu_ldst.h | 25 | +++ b/include/tcg/tcg-temp-internal.h |
24 | @@ -XXX,XX +XXX,XX @@ typedef target_ulong abi_ptr; | 26 | @@ -XXX,XX +XXX,XX @@ TCGv_i64 tcg_temp_ebb_new_i64(void); |
25 | 27 | TCGv_ptr tcg_temp_ebb_new_ptr(void); | |
26 | extern __thread uintptr_t helper_retaddr; | 28 | TCGv_i128 tcg_temp_ebb_new_i128(void); |
27 | 29 | ||
28 | +static inline void set_helper_retaddr(uintptr_t ra) | 30 | +/* Forget all freed EBB temps, so that new allocations produce new temps. */ |
31 | +static inline void tcg_temp_ebb_reset_freed(TCGContext *s) | ||
29 | +{ | 32 | +{ |
30 | + helper_retaddr = ra; | 33 | + memset(s->free_temps, 0, sizeof(s->free_temps)); |
31 | + /* | ||
32 | + * Ensure that this write is visible to the SIGSEGV handler that | ||
33 | + * may be invoked due to a subsequent invalid memory operation. | ||
34 | + */ | ||
35 | + signal_barrier(); | ||
36 | +} | 34 | +} |
37 | + | 35 | + |
38 | +static inline void clear_helper_retaddr(void) | 36 | #endif /* TCG_TEMP_FREE_H */ |
39 | +{ | 37 | diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c |
40 | + /* | ||
41 | + * Ensure that previous memory operations have succeeded before | ||
42 | + * removing the data visible to the signal handler. | ||
43 | + */ | ||
44 | + signal_barrier(); | ||
45 | + helper_retaddr = 0; | ||
46 | +} | ||
47 | + | ||
48 | /* In user-only mode we provide only the _code and _data accessors. */ | ||
49 | |||
50 | #define MEMSUFFIX _data | ||
51 | diff --git a/include/exec/cpu_ldst_useronly_template.h b/include/exec/cpu_ldst_useronly_template.h | ||
52 | index XXXXXXX..XXXXXXX 100644 | 38 | index XXXXXXX..XXXXXXX 100644 |
53 | --- a/include/exec/cpu_ldst_useronly_template.h | 39 | --- a/accel/tcg/plugin-gen.c |
54 | +++ b/include/exec/cpu_ldst_useronly_template.h | 40 | +++ b/accel/tcg/plugin-gen.c |
55 | @@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env, | 41 | @@ -XXX,XX +XXX,XX @@ static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb) |
56 | uintptr_t retaddr) | 42 | * that might be live within the existing opcode stream. |
57 | { | 43 | * The simplest solution is to release them all and create new. |
58 | RES_TYPE ret; | 44 | */ |
59 | - helper_retaddr = retaddr; | 45 | - memset(tcg_ctx->free_temps, 0, sizeof(tcg_ctx->free_temps)); |
60 | + set_helper_retaddr(retaddr); | 46 | + tcg_temp_ebb_reset_freed(tcg_ctx); |
61 | ret = glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(env, ptr); | 47 | |
62 | - helper_retaddr = 0; | 48 | QTAILQ_FOREACH_SAFE(op, &tcg_ctx->ops, link, next) { |
63 | + clear_helper_retaddr(); | 49 | switch (op->opc) { |
64 | return ret; | 50 | diff --git a/tcg/tcg.c b/tcg/tcg.c |
65 | } | ||
66 | |||
67 | @@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env, | ||
68 | uintptr_t retaddr) | ||
69 | { | ||
70 | int ret; | ||
71 | - helper_retaddr = retaddr; | ||
72 | + set_helper_retaddr(retaddr); | ||
73 | ret = glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(env, ptr); | ||
74 | - helper_retaddr = 0; | ||
75 | + clear_helper_retaddr(); | ||
76 | return ret; | ||
77 | } | ||
78 | #endif | ||
79 | @@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env, | ||
80 | RES_TYPE v, | ||
81 | uintptr_t retaddr) | ||
82 | { | ||
83 | - helper_retaddr = retaddr; | ||
84 | + set_helper_retaddr(retaddr); | ||
85 | glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(env, ptr, v); | ||
86 | - helper_retaddr = 0; | ||
87 | + clear_helper_retaddr(); | ||
88 | } | ||
89 | #endif | ||
90 | |||
91 | diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c | ||
92 | index XXXXXXX..XXXXXXX 100644 | 51 | index XXXXXXX..XXXXXXX 100644 |
93 | --- a/accel/tcg/user-exec.c | 52 | --- a/tcg/tcg.c |
94 | +++ b/accel/tcg/user-exec.c | 53 | +++ b/tcg/tcg.c |
95 | @@ -XXX,XX +XXX,XX @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info, | 54 | @@ -XXX,XX +XXX,XX @@ void tcg_func_start(TCGContext *s) |
96 | * currently executing TB was modified and must be exited | 55 | s->nb_temps = s->nb_globals; |
97 | * immediately. Clear helper_retaddr for next execution. | 56 | |
98 | */ | 57 | /* No temps have been previously allocated for size or locality. */ |
99 | - helper_retaddr = 0; | 58 | - memset(s->free_temps, 0, sizeof(s->free_temps)); |
100 | + clear_helper_retaddr(); | 59 | + tcg_temp_ebb_reset_freed(s); |
101 | cpu_exit_tb_from_sighandler(cpu, old_set); | 60 | |
102 | /* NORETURN */ | 61 | /* No constant temps have been previously allocated. */ |
103 | 62 | for (int i = 0; i < TCG_TYPE_COUNT; ++i) { | |
104 | @@ -XXX,XX +XXX,XX @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info, | 63 | @@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start) |
105 | * an exception. Undo signal and retaddr state prior to longjmp. | ||
106 | */ | ||
107 | sigprocmask(SIG_SETMASK, old_set, NULL); | ||
108 | - helper_retaddr = 0; | ||
109 | + clear_helper_retaddr(); | ||
110 | |||
111 | cc = CPU_GET_CLASS(cpu); | ||
112 | access_type = is_write ? MMU_DATA_STORE : MMU_DATA_LOAD; | ||
113 | @@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, | ||
114 | if (unlikely(addr & (size - 1))) { | ||
115 | cpu_loop_exit_atomic(env_cpu(env), retaddr); | ||
116 | } | ||
117 | - helper_retaddr = retaddr; | ||
118 | - return g2h(addr); | ||
119 | + void *ret = g2h(addr); | ||
120 | + set_helper_retaddr(retaddr); | ||
121 | + return ret; | ||
122 | } | ||
123 | |||
124 | /* Macro to call the above, with local variables from the use context. */ | ||
125 | #define ATOMIC_MMU_DECLS do {} while (0) | ||
126 | #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, DATA_SIZE, GETPC()) | ||
127 | -#define ATOMIC_MMU_CLEANUP do { helper_retaddr = 0; } while (0) | ||
128 | +#define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0) | ||
129 | |||
130 | #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END)) | ||
131 | #define EXTRA_ARGS | ||
132 | diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c | ||
133 | index XXXXXXX..XXXXXXX 100644 | ||
134 | --- a/target/arm/helper-a64.c | ||
135 | +++ b/target/arm/helper-a64.c | ||
136 | @@ -XXX,XX +XXX,XX @@ uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr, | ||
137 | /* ??? Enforce alignment. */ | ||
138 | uint64_t *haddr = g2h(addr); | ||
139 | |||
140 | - helper_retaddr = ra; | ||
141 | + set_helper_retaddr(ra); | ||
142 | o0 = ldq_le_p(haddr + 0); | ||
143 | o1 = ldq_le_p(haddr + 1); | ||
144 | oldv = int128_make128(o0, o1); | ||
145 | @@ -XXX,XX +XXX,XX @@ uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr, | ||
146 | stq_le_p(haddr + 0, int128_getlo(newv)); | ||
147 | stq_le_p(haddr + 1, int128_gethi(newv)); | ||
148 | } | ||
149 | - helper_retaddr = 0; | ||
150 | + clear_helper_retaddr(); | ||
151 | #else | ||
152 | int mem_idx = cpu_mmu_index(env, false); | ||
153 | TCGMemOpIdx oi0 = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx); | ||
154 | @@ -XXX,XX +XXX,XX @@ uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr, | ||
155 | /* ??? Enforce alignment. */ | ||
156 | uint64_t *haddr = g2h(addr); | ||
157 | |||
158 | - helper_retaddr = ra; | ||
159 | + set_helper_retaddr(ra); | ||
160 | o1 = ldq_be_p(haddr + 0); | ||
161 | o0 = ldq_be_p(haddr + 1); | ||
162 | oldv = int128_make128(o0, o1); | ||
163 | @@ -XXX,XX +XXX,XX @@ uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr, | ||
164 | stq_be_p(haddr + 0, int128_gethi(newv)); | ||
165 | stq_be_p(haddr + 1, int128_getlo(newv)); | ||
166 | } | ||
167 | - helper_retaddr = 0; | ||
168 | + clear_helper_retaddr(); | ||
169 | #else | ||
170 | int mem_idx = cpu_mmu_index(env, false); | ||
171 | TCGMemOpIdx oi0 = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx); | ||
172 | diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c | ||
173 | index XXXXXXX..XXXXXXX 100644 | ||
174 | --- a/target/arm/sve_helper.c | ||
175 | +++ b/target/arm/sve_helper.c | ||
176 | @@ -XXX,XX +XXX,XX @@ static intptr_t max_for_page(target_ulong base, intptr_t mem_off, | ||
177 | return MIN(split, mem_max - mem_off) + mem_off; | ||
178 | } | ||
179 | |||
180 | -static inline void set_helper_retaddr(uintptr_t ra) | ||
181 | -{ | ||
182 | -#ifdef CONFIG_USER_ONLY | ||
183 | - helper_retaddr = ra; | ||
184 | +#ifndef CONFIG_USER_ONLY | ||
185 | +/* These are normally defined only for CONFIG_USER_ONLY in <exec/cpu_ldst.h> */ | ||
186 | +static inline void set_helper_retaddr(uintptr_t ra) { } | ||
187 | +static inline void clear_helper_retaddr(void) { } | ||
188 | #endif | ||
189 | -} | ||
190 | |||
191 | /* | ||
192 | * The result of tlb_vaddr_to_host for user-only is just g2h(x), | ||
193 | @@ -XXX,XX +XXX,XX @@ static void sve_ld1_r(CPUARMState *env, void *vg, const target_ulong addr, | ||
194 | if (test_host_page(host)) { | ||
195 | mem_off = host_fn(vd, vg, host - mem_off, mem_off, mem_max); | ||
196 | tcg_debug_assert(mem_off == mem_max); | ||
197 | - set_helper_retaddr(0); | ||
198 | + clear_helper_retaddr(); | ||
199 | /* After having taken any fault, zero leading inactive elements. */ | ||
200 | swap_memzero(vd, reg_off); | ||
201 | return; | ||
202 | @@ -XXX,XX +XXX,XX @@ static void sve_ld1_r(CPUARMState *env, void *vg, const target_ulong addr, | ||
203 | } | 64 | } |
204 | #endif | 65 | #endif |
205 | 66 | ||
206 | - set_helper_retaddr(0); | 67 | + /* Do not reuse any EBB that may be allocated within the TB. */ |
207 | + clear_helper_retaddr(); | 68 | + tcg_temp_ebb_reset_freed(s); |
208 | memcpy(vd, &scratch, reg_max); | 69 | + |
209 | } | 70 | tcg_optimize(s); |
210 | 71 | ||
211 | @@ -XXX,XX +XXX,XX @@ static void sve_ld2_r(CPUARMState *env, void *vg, target_ulong addr, | 72 | reachable_code_pass(s); |
212 | addr += 2 * size; | ||
213 | } while (i & 15); | ||
214 | } | ||
215 | - set_helper_retaddr(0); | ||
216 | + clear_helper_retaddr(); | ||
217 | |||
218 | /* Wait until all exceptions have been raised to write back. */ | ||
219 | memcpy(&env->vfp.zregs[rd], &scratch[0], oprsz); | ||
220 | @@ -XXX,XX +XXX,XX @@ static void sve_ld3_r(CPUARMState *env, void *vg, target_ulong addr, | ||
221 | addr += 3 * size; | ||
222 | } while (i & 15); | ||
223 | } | ||
224 | - set_helper_retaddr(0); | ||
225 | + clear_helper_retaddr(); | ||
226 | |||
227 | /* Wait until all exceptions have been raised to write back. */ | ||
228 | memcpy(&env->vfp.zregs[rd], &scratch[0], oprsz); | ||
229 | @@ -XXX,XX +XXX,XX @@ static void sve_ld4_r(CPUARMState *env, void *vg, target_ulong addr, | ||
230 | addr += 4 * size; | ||
231 | } while (i & 15); | ||
232 | } | ||
233 | - set_helper_retaddr(0); | ||
234 | + clear_helper_retaddr(); | ||
235 | |||
236 | /* Wait until all exceptions have been raised to write back. */ | ||
237 | memcpy(&env->vfp.zregs[rd], &scratch[0], oprsz); | ||
238 | @@ -XXX,XX +XXX,XX @@ static void sve_ldff1_r(CPUARMState *env, void *vg, const target_ulong addr, | ||
239 | if (test_host_page(host)) { | ||
240 | mem_off = host_fn(vd, vg, host - mem_off, mem_off, mem_max); | ||
241 | tcg_debug_assert(mem_off == mem_max); | ||
242 | - set_helper_retaddr(0); | ||
243 | + clear_helper_retaddr(); | ||
244 | /* After any fault, zero any leading inactive elements. */ | ||
245 | swap_memzero(vd, reg_off); | ||
246 | return; | ||
247 | @@ -XXX,XX +XXX,XX @@ static void sve_ldff1_r(CPUARMState *env, void *vg, const target_ulong addr, | ||
248 | } | ||
249 | #endif | ||
250 | |||
251 | - set_helper_retaddr(0); | ||
252 | + clear_helper_retaddr(); | ||
253 | record_fault(env, reg_off, reg_max); | ||
254 | } | ||
255 | |||
256 | @@ -XXX,XX +XXX,XX @@ static void sve_st1_r(CPUARMState *env, void *vg, target_ulong addr, | ||
257 | addr += msize; | ||
258 | } while (i & 15); | ||
259 | } | ||
260 | - set_helper_retaddr(0); | ||
261 | + clear_helper_retaddr(); | ||
262 | } | ||
263 | |||
264 | static void sve_st2_r(CPUARMState *env, void *vg, target_ulong addr, | ||
265 | @@ -XXX,XX +XXX,XX @@ static void sve_st2_r(CPUARMState *env, void *vg, target_ulong addr, | ||
266 | addr += 2 * msize; | ||
267 | } while (i & 15); | ||
268 | } | ||
269 | - set_helper_retaddr(0); | ||
270 | + clear_helper_retaddr(); | ||
271 | } | ||
272 | |||
273 | static void sve_st3_r(CPUARMState *env, void *vg, target_ulong addr, | ||
274 | @@ -XXX,XX +XXX,XX @@ static void sve_st3_r(CPUARMState *env, void *vg, target_ulong addr, | ||
275 | addr += 3 * msize; | ||
276 | } while (i & 15); | ||
277 | } | ||
278 | - set_helper_retaddr(0); | ||
279 | + clear_helper_retaddr(); | ||
280 | } | ||
281 | |||
282 | static void sve_st4_r(CPUARMState *env, void *vg, target_ulong addr, | ||
283 | @@ -XXX,XX +XXX,XX @@ static void sve_st4_r(CPUARMState *env, void *vg, target_ulong addr, | ||
284 | addr += 4 * msize; | ||
285 | } while (i & 15); | ||
286 | } | ||
287 | - set_helper_retaddr(0); | ||
288 | + clear_helper_retaddr(); | ||
289 | } | ||
290 | |||
291 | #define DO_STN_1(N, NAME, ESIZE) \ | ||
292 | @@ -XXX,XX +XXX,XX @@ static void sve_ld1_zs(CPUARMState *env, void *vd, void *vg, void *vm, | ||
293 | i += 4, pg >>= 4; | ||
294 | } while (i & 15); | ||
295 | } | ||
296 | - set_helper_retaddr(0); | ||
297 | + clear_helper_retaddr(); | ||
298 | |||
299 | /* Wait until all exceptions have been raised to write back. */ | ||
300 | memcpy(vd, &scratch, oprsz); | ||
301 | @@ -XXX,XX +XXX,XX @@ static void sve_ld1_zd(CPUARMState *env, void *vd, void *vg, void *vm, | ||
302 | tlb_fn(env, &scratch, i * 8, base + (off << scale), oi, ra); | ||
303 | } | ||
304 | } | ||
305 | - set_helper_retaddr(0); | ||
306 | + clear_helper_retaddr(); | ||
307 | |||
308 | /* Wait until all exceptions have been raised to write back. */ | ||
309 | memcpy(vd, &scratch, oprsz * 8); | ||
310 | @@ -XXX,XX +XXX,XX @@ static inline void sve_ldff1_zs(CPUARMState *env, void *vd, void *vg, void *vm, | ||
311 | tlb_fn(env, vd, reg_off, addr, oi, ra); | ||
312 | |||
313 | /* The rest of the reads will be non-faulting. */ | ||
314 | - set_helper_retaddr(0); | ||
315 | + clear_helper_retaddr(); | ||
316 | } | ||
317 | |||
318 | /* After any fault, zero the leading predicated false elements. */ | ||
319 | @@ -XXX,XX +XXX,XX @@ static inline void sve_ldff1_zd(CPUARMState *env, void *vd, void *vg, void *vm, | ||
320 | tlb_fn(env, vd, reg_off, addr, oi, ra); | ||
321 | |||
322 | /* The rest of the reads will be non-faulting. */ | ||
323 | - set_helper_retaddr(0); | ||
324 | + clear_helper_retaddr(); | ||
325 | } | ||
326 | |||
327 | /* After any fault, zero the leading predicated false elements. */ | ||
328 | @@ -XXX,XX +XXX,XX @@ static void sve_st1_zs(CPUARMState *env, void *vd, void *vg, void *vm, | ||
329 | i += 4, pg >>= 4; | ||
330 | } while (i & 15); | ||
331 | } | ||
332 | - set_helper_retaddr(0); | ||
333 | + clear_helper_retaddr(); | ||
334 | } | ||
335 | |||
336 | static void sve_st1_zd(CPUARMState *env, void *vd, void *vg, void *vm, | ||
337 | @@ -XXX,XX +XXX,XX @@ static void sve_st1_zd(CPUARMState *env, void *vd, void *vg, void *vm, | ||
338 | tlb_fn(env, vd, i * 8, base + (off << scale), oi, ra); | ||
339 | } | ||
340 | } | ||
341 | - set_helper_retaddr(0); | ||
342 | + clear_helper_retaddr(); | ||
343 | } | ||
344 | |||
345 | #define DO_ST1_ZPZ_S(MEM, OFS) \ | ||
346 | -- | 73 | -- |
347 | 2.17.1 | 74 | 2.43.0 |
348 | 75 | ||
349 | 76 | diff view generated by jsdifflib |
1 | This patch fixes two problems: | 1 | From: Roman Artemev <roman.artemev@syntacore.com> |
---|---|---|---|
2 | (1) The inputs to the EXTR insn were reversed, | ||
3 | (2) The input constraints use rZ, which means that we need to use | ||
4 | the REG0 macro in order to supply XZR for a constant 0 input. | ||
5 | 2 | ||
6 | Fixes: 464c2969d5d | 3 | On RISC-V to StoreStore barrier corresponds |
7 | Reported-by: Peter Maydell <peter.maydell@linaro.org> | 4 | `fence w, w` not `fence r, r` |
8 | Tested-by: Alex Bennée <alex.bennee@linaro.org> | 5 | |
9 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 6 | Cc: qemu-stable@nongnu.org |
10 | Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> | 7 | Fixes: efbea94c76b ("tcg/riscv: Add slowpath load and store instructions") |
8 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | Signed-off-by: Denis Tomashev <denis.tomashev@syntacore.com> | ||
10 | Signed-off-by: Roman Artemev <roman.artemev@syntacore.com> | ||
11 | Message-ID: <e2f2131e294a49e79959d4fa9ec02cf4@syntacore.com> | ||
11 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 12 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
12 | --- | 13 | --- |
13 | tcg/aarch64/tcg-target.inc.c | 2 +- | 14 | tcg/riscv/tcg-target.c.inc | 2 +- |
14 | 1 file changed, 1 insertion(+), 1 deletion(-) | 15 | 1 file changed, 1 insertion(+), 1 deletion(-) |
15 | 16 | ||
16 | diff --git a/tcg/aarch64/tcg-target.inc.c b/tcg/aarch64/tcg-target.inc.c | 17 | diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc |
17 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/tcg/aarch64/tcg-target.inc.c | 19 | --- a/tcg/riscv/tcg-target.c.inc |
19 | +++ b/tcg/aarch64/tcg-target.inc.c | 20 | +++ b/tcg/riscv/tcg-target.c.inc |
20 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, | 21 | @@ -XXX,XX +XXX,XX @@ static void tcg_out_mb(TCGContext *s, TCGArg a0) |
21 | 22 | insn |= 0x02100000; | |
22 | case INDEX_op_extract2_i64: | 23 | } |
23 | case INDEX_op_extract2_i32: | 24 | if (a0 & TCG_MO_ST_ST) { |
24 | - tcg_out_extr(s, ext, a0, a1, a2, args[3]); | 25 | - insn |= 0x02200000; |
25 | + tcg_out_extr(s, ext, a0, REG0(2), REG0(1), args[3]); | 26 | + insn |= 0x01100000; |
26 | break; | 27 | } |
27 | 28 | tcg_out32(s, insn); | |
28 | case INDEX_op_add2_i32: | 29 | } |
29 | -- | 30 | -- |
30 | 2.17.1 | 31 | 2.43.0 |
31 | |||
32 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | We have some potential race conditions vs our user-exec signal | ||
2 | handler that will be solved with this barrier. | ||
3 | 1 | ||
4 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
6 | --- | ||
7 | include/qemu/atomic.h | 11 +++++++++++ | ||
8 | 1 file changed, 11 insertions(+) | ||
9 | |||
10 | diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h | ||
11 | index XXXXXXX..XXXXXXX 100644 | ||
12 | --- a/include/qemu/atomic.h | ||
13 | +++ b/include/qemu/atomic.h | ||
14 | @@ -XXX,XX +XXX,XX @@ | ||
15 | #define smp_read_barrier_depends() barrier() | ||
16 | #endif | ||
17 | |||
18 | +/* | ||
19 | + * A signal barrier forces all pending local memory ops to be observed before | ||
20 | + * a SIGSEGV is delivered to the *same* thread. In practice this is exactly | ||
21 | + * the same as barrier(), but since we have the correct builtin, use it. | ||
22 | + */ | ||
23 | +#define signal_barrier() __atomic_signal_fence(__ATOMIC_SEQ_CST) | ||
24 | + | ||
25 | /* Sanity check that the size of an atomic operation isn't "overly large". | ||
26 | * Despite the fact that e.g. i686 has 64-bit atomic operations, we do not | ||
27 | * want to use them because we ought not need them, and this lets us do a | ||
28 | @@ -XXX,XX +XXX,XX @@ | ||
29 | #define smp_read_barrier_depends() barrier() | ||
30 | #endif | ||
31 | |||
32 | +#ifndef signal_barrier | ||
33 | +#define signal_barrier() barrier() | ||
34 | +#endif | ||
35 | + | ||
36 | /* These will only be atomic if the processor does the fetch or store | ||
37 | * in a single issue memory operation | ||
38 | */ | ||
39 | -- | ||
40 | 2.17.1 | ||
41 | |||
42 | diff view generated by jsdifflib |
Deleted patch | |||
---|---|---|---|
1 | These functions are not used, and are not usable in the | ||
2 | context of code generation, because we never have a helper | ||
3 | return address to pass in to them. | ||
4 | 1 | ||
5 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | ||
6 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
7 | --- | ||
8 | include/exec/cpu_ldst_useronly_template.h | 6 +++++- | ||
9 | 1 file changed, 5 insertions(+), 1 deletion(-) | ||
10 | |||
11 | diff --git a/include/exec/cpu_ldst_useronly_template.h b/include/exec/cpu_ldst_useronly_template.h | ||
12 | index XXXXXXX..XXXXXXX 100644 | ||
13 | --- a/include/exec/cpu_ldst_useronly_template.h | ||
14 | +++ b/include/exec/cpu_ldst_useronly_template.h | ||
15 | @@ -XXX,XX +XXX,XX @@ glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr) | ||
16 | return glue(glue(ld, USUFFIX), _p)(g2h(ptr)); | ||
17 | } | ||
18 | |||
19 | +#ifndef CODE_ACCESS | ||
20 | static inline RES_TYPE | ||
21 | glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env, | ||
22 | abi_ptr ptr, | ||
23 | @@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env, | ||
24 | clear_helper_retaddr(); | ||
25 | return ret; | ||
26 | } | ||
27 | +#endif | ||
28 | |||
29 | #if DATA_SIZE <= 2 | ||
30 | static inline int | ||
31 | @@ -XXX,XX +XXX,XX @@ glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr) | ||
32 | return glue(glue(lds, SUFFIX), _p)(g2h(ptr)); | ||
33 | } | ||
34 | |||
35 | +#ifndef CODE_ACCESS | ||
36 | static inline int | ||
37 | glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env, | ||
38 | abi_ptr ptr, | ||
39 | @@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env, | ||
40 | clear_helper_retaddr(); | ||
41 | return ret; | ||
42 | } | ||
43 | -#endif | ||
44 | +#endif /* CODE_ACCESS */ | ||
45 | +#endif /* DATA_SIZE <= 2 */ | ||
46 | |||
47 | #ifndef CODE_ACCESS | ||
48 | static inline void | ||
49 | -- | ||
50 | 2.17.1 | ||
51 | |||
52 | diff view generated by jsdifflib |
1 | This code block is already surrounded by #ifndef CODE_ACCESS. | 1 | This allows targets to declare that the helper requires a |
---|---|---|---|
2 | float_status pointer and instead of a generic void pointer. | ||
2 | 3 | ||
3 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 4 | Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> |
4 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 5 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
5 | --- | 6 | --- |
6 | include/exec/cpu_ldst_useronly_template.h | 2 -- | 7 | include/exec/helper-head.h.inc | 3 +++ |
7 | 1 file changed, 2 deletions(-) | 8 | 1 file changed, 3 insertions(+) |
8 | 9 | ||
9 | diff --git a/include/exec/cpu_ldst_useronly_template.h b/include/exec/cpu_ldst_useronly_template.h | 10 | diff --git a/include/exec/helper-head.h.inc b/include/exec/helper-head.h.inc |
10 | index XXXXXXX..XXXXXXX 100644 | 11 | index XXXXXXX..XXXXXXX 100644 |
11 | --- a/include/exec/cpu_ldst_useronly_template.h | 12 | --- a/include/exec/helper-head.h.inc |
12 | +++ b/include/exec/cpu_ldst_useronly_template.h | 13 | +++ b/include/exec/helper-head.h.inc |
13 | @@ -XXX,XX +XXX,XX @@ static inline void | 14 | @@ -XXX,XX +XXX,XX @@ |
14 | glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr, | 15 | #define dh_alias_ptr ptr |
15 | RES_TYPE v) | 16 | #define dh_alias_cptr ptr |
16 | { | 17 | #define dh_alias_env ptr |
17 | -#if !defined(CODE_ACCESS) | 18 | +#define dh_alias_fpst ptr |
18 | trace_guest_mem_before_exec( | 19 | #define dh_alias_void void |
19 | env_cpu(env), ptr, | 20 | #define dh_alias_noreturn noreturn |
20 | trace_mem_build_info(SHIFT, false, MO_TE, true)); | 21 | #define dh_alias(t) glue(dh_alias_, t) |
21 | -#endif | 22 | @@ -XXX,XX +XXX,XX @@ |
22 | glue(glue(st, SUFFIX), _p)(g2h(ptr), v); | 23 | #define dh_ctype_ptr void * |
23 | } | 24 | #define dh_ctype_cptr const void * |
24 | 25 | #define dh_ctype_env CPUArchState * | |
26 | +#define dh_ctype_fpst float_status * | ||
27 | #define dh_ctype_void void | ||
28 | #define dh_ctype_noreturn G_NORETURN void | ||
29 | #define dh_ctype(t) dh_ctype_##t | ||
30 | @@ -XXX,XX +XXX,XX @@ | ||
31 | #define dh_typecode_f64 dh_typecode_i64 | ||
32 | #define dh_typecode_cptr dh_typecode_ptr | ||
33 | #define dh_typecode_env dh_typecode_ptr | ||
34 | +#define dh_typecode_fpst dh_typecode_ptr | ||
35 | #define dh_typecode(t) dh_typecode_##t | ||
36 | |||
37 | #define dh_callflag_i32 0 | ||
25 | -- | 38 | -- |
26 | 2.17.1 | 39 | 2.43.0 |
27 | 40 | ||
28 | 41 | diff view generated by jsdifflib |
1 | Turn helper_retaddr into a multi-state flag that may now also | 1 | From: Philippe Mathieu-Daudé <philmd@linaro.org> |
---|---|---|---|
2 | indicate when we're performing a read on behalf of the translator. | ||
3 | In this case, release the mmap_lock before the longjmp back to | ||
4 | the main cpu loop, and thereby avoid a failing assert therein. | ||
5 | 2 | ||
6 | Fixes: https://bugs.launchpad.net/qemu/+bug/1832353 | 3 | Rather than manually copying each register, use |
7 | Tested-by: Alex Bennée <alex.bennee@linaro.org> | 4 | the libc memcpy(), which is well optimized nowadays. |
8 | Reviewed-by: Alex Bennée <alex.bennee@linaro.org> | 5 | |
6 | Suggested-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> | ||
7 | Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> | ||
8 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
9 | Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org> | ||
10 | Message-ID: <20241205205418.67613-1-philmd@linaro.org> | ||
9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 11 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
10 | --- | 12 | --- |
11 | include/exec/cpu_ldst_useronly_template.h | 20 +++++-- | 13 | target/sparc/win_helper.c | 26 ++++++++------------------ |
12 | accel/tcg/user-exec.c | 66 ++++++++++++++++------- | 14 | 1 file changed, 8 insertions(+), 18 deletions(-) |
13 | 2 files changed, 63 insertions(+), 23 deletions(-) | ||
14 | 15 | ||
15 | diff --git a/include/exec/cpu_ldst_useronly_template.h b/include/exec/cpu_ldst_useronly_template.h | 16 | diff --git a/target/sparc/win_helper.c b/target/sparc/win_helper.c |
16 | index XXXXXXX..XXXXXXX 100644 | 17 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/include/exec/cpu_ldst_useronly_template.h | 18 | --- a/target/sparc/win_helper.c |
18 | +++ b/include/exec/cpu_ldst_useronly_template.h | 19 | +++ b/target/sparc/win_helper.c |
19 | @@ -XXX,XX +XXX,XX @@ | 20 | @@ -XXX,XX +XXX,XX @@ |
20 | static inline RES_TYPE | 21 | #include "exec/helper-proto.h" |
21 | glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr) | 22 | #include "trace.h" |
23 | |||
24 | -static inline void memcpy32(target_ulong *dst, const target_ulong *src) | ||
25 | -{ | ||
26 | - dst[0] = src[0]; | ||
27 | - dst[1] = src[1]; | ||
28 | - dst[2] = src[2]; | ||
29 | - dst[3] = src[3]; | ||
30 | - dst[4] = src[4]; | ||
31 | - dst[5] = src[5]; | ||
32 | - dst[6] = src[6]; | ||
33 | - dst[7] = src[7]; | ||
34 | -} | ||
35 | - | ||
36 | void cpu_set_cwp(CPUSPARCState *env, int new_cwp) | ||
22 | { | 37 | { |
23 | -#if !defined(CODE_ACCESS) | 38 | /* put the modified wrap registers at their proper location */ |
24 | +#ifdef CODE_ACCESS | 39 | if (env->cwp == env->nwindows - 1) { |
25 | + RES_TYPE ret; | 40 | - memcpy32(env->regbase, env->regbase + env->nwindows * 16); |
26 | + set_helper_retaddr(1); | 41 | + memcpy(env->regbase, env->regbase + env->nwindows * 16, |
27 | + ret = glue(glue(ld, USUFFIX), _p)(g2h(ptr)); | 42 | + sizeof(env->gregs)); |
28 | + clear_helper_retaddr(); | 43 | } |
29 | + return ret; | 44 | env->cwp = new_cwp; |
30 | +#else | 45 | |
31 | trace_guest_mem_before_exec( | 46 | /* put the wrap registers at their temporary location */ |
32 | env_cpu(env), ptr, | 47 | if (new_cwp == env->nwindows - 1) { |
33 | trace_mem_build_info(SHIFT, false, MO_TE, false)); | 48 | - memcpy32(env->regbase + env->nwindows * 16, env->regbase); |
34 | -#endif | 49 | + memcpy(env->regbase + env->nwindows * 16, env->regbase, |
35 | return glue(glue(ld, USUFFIX), _p)(g2h(ptr)); | 50 | + sizeof(env->gregs)); |
36 | +#endif | 51 | } |
52 | env->regwptr = env->regbase + (new_cwp * 16); | ||
37 | } | 53 | } |
38 | 54 | @@ -XXX,XX +XXX,XX @@ void cpu_gl_switch_gregs(CPUSPARCState *env, uint32_t new_gl) | |
39 | #ifndef CODE_ACCESS | 55 | dst = get_gl_gregset(env, env->gl); |
40 | @@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env, | 56 | |
41 | static inline int | 57 | if (src != dst) { |
42 | glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr) | 58 | - memcpy32(dst, env->gregs); |
43 | { | 59 | - memcpy32(env->gregs, src); |
44 | -#if !defined(CODE_ACCESS) | 60 | + memcpy(dst, env->gregs, sizeof(env->gregs)); |
45 | +#ifdef CODE_ACCESS | 61 | + memcpy(env->gregs, src, sizeof(env->gregs)); |
46 | + int ret; | 62 | } |
47 | + set_helper_retaddr(1); | ||
48 | + ret = glue(glue(lds, SUFFIX), _p)(g2h(ptr)); | ||
49 | + clear_helper_retaddr(); | ||
50 | + return ret; | ||
51 | +#else | ||
52 | trace_guest_mem_before_exec( | ||
53 | env_cpu(env), ptr, | ||
54 | trace_mem_build_info(SHIFT, true, MO_TE, false)); | ||
55 | -#endif | ||
56 | return glue(glue(lds, SUFFIX), _p)(g2h(ptr)); | ||
57 | +#endif | ||
58 | } | 63 | } |
59 | 64 | ||
60 | #ifndef CODE_ACCESS | 65 | @@ -XXX,XX +XXX,XX @@ void cpu_change_pstate(CPUSPARCState *env, uint32_t new_pstate) |
61 | diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c | 66 | /* Switch global register bank */ |
62 | index XXXXXXX..XXXXXXX 100644 | 67 | src = get_gregset(env, new_pstate_regs); |
63 | --- a/accel/tcg/user-exec.c | 68 | dst = get_gregset(env, pstate_regs); |
64 | +++ b/accel/tcg/user-exec.c | 69 | - memcpy32(dst, env->gregs); |
65 | @@ -XXX,XX +XXX,XX @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info, | 70 | - memcpy32(env->gregs, src); |
66 | CPUState *cpu = current_cpu; | 71 | + memcpy(dst, env->gregs, sizeof(env->gregs)); |
67 | CPUClass *cc; | 72 | + memcpy(env->gregs, src, sizeof(env->gregs)); |
68 | unsigned long address = (unsigned long)info->si_addr; | 73 | } else { |
69 | - MMUAccessType access_type; | 74 | trace_win_helper_no_switch_pstate(new_pstate_regs); |
70 | + MMUAccessType access_type = is_write ? MMU_DATA_STORE : MMU_DATA_LOAD; | ||
71 | |||
72 | - /* We must handle PC addresses from two different sources: | ||
73 | - * a call return address and a signal frame address. | ||
74 | - * | ||
75 | - * Within cpu_restore_state_from_tb we assume the former and adjust | ||
76 | - * the address by -GETPC_ADJ so that the address is within the call | ||
77 | - * insn so that addr does not accidentally match the beginning of the | ||
78 | - * next guest insn. | ||
79 | - * | ||
80 | - * However, when the PC comes from the signal frame, it points to | ||
81 | - * the actual faulting host insn and not a call insn. Subtracting | ||
82 | - * GETPC_ADJ in that case may accidentally match the previous guest insn. | ||
83 | - * | ||
84 | - * So for the later case, adjust forward to compensate for what | ||
85 | - * will be done later by cpu_restore_state_from_tb. | ||
86 | - */ | ||
87 | - if (helper_retaddr) { | ||
88 | + switch (helper_retaddr) { | ||
89 | + default: | ||
90 | + /* | ||
91 | + * Fault during host memory operation within a helper function. | ||
92 | + * The helper's host return address, saved here, gives us a | ||
93 | + * pointer into the generated code that will unwind to the | ||
94 | + * correct guest pc. | ||
95 | + */ | ||
96 | pc = helper_retaddr; | ||
97 | - } else { | ||
98 | + break; | ||
99 | + | ||
100 | + case 0: | ||
101 | + /* | ||
102 | + * Fault during host memory operation within generated code. | ||
103 | + * (Or, a unrelated bug within qemu, but we can't tell from here). | ||
104 | + * | ||
105 | + * We take the host pc from the signal frame. However, we cannot | ||
106 | + * use that value directly. Within cpu_restore_state_from_tb, we | ||
107 | + * assume PC comes from GETPC(), as used by the helper functions, | ||
108 | + * so we adjust the address by -GETPC_ADJ to form an address that | ||
109 | + * is within the call insn, so that the address does not accidentially | ||
110 | + * match the beginning of the next guest insn. However, when the | ||
111 | + * pc comes from the signal frame it points to the actual faulting | ||
112 | + * host memory insn and not the return from a call insn. | ||
113 | + * | ||
114 | + * Therefore, adjust to compensate for what will be done later | ||
115 | + * by cpu_restore_state_from_tb. | ||
116 | + */ | ||
117 | pc += GETPC_ADJ; | ||
118 | + break; | ||
119 | + | ||
120 | + case 1: | ||
121 | + /* | ||
122 | + * Fault during host read for translation, or loosely, "execution". | ||
123 | + * | ||
124 | + * The guest pc is already pointing to the start of the TB for which | ||
125 | + * code is being generated. If the guest translator manages the | ||
126 | + * page crossings correctly, this is exactly the correct address | ||
127 | + * (and if the translator doesn't handle page boundaries correctly | ||
128 | + * there's little we can do about that here). Therefore, do not | ||
129 | + * trigger the unwinder. | ||
130 | + * | ||
131 | + * Like tb_gen_code, release the memory lock before cpu_loop_exit. | ||
132 | + */ | ||
133 | + pc = 0; | ||
134 | + access_type = MMU_INST_FETCH; | ||
135 | + mmap_unlock(); | ||
136 | + break; | ||
137 | } | 75 | } |
138 | |||
139 | /* For synchronous signals we expect to be coming from the vCPU | ||
140 | @@ -XXX,XX +XXX,XX @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info, | ||
141 | clear_helper_retaddr(); | ||
142 | |||
143 | cc = CPU_GET_CLASS(cpu); | ||
144 | - access_type = is_write ? MMU_DATA_STORE : MMU_DATA_LOAD; | ||
145 | cc->tlb_fill(cpu, address, 0, access_type, MMU_USER_IDX, false, pc); | ||
146 | g_assert_not_reached(); | ||
147 | } | ||
148 | -- | 76 | -- |
149 | 2.17.1 | 77 | 2.43.0 |
150 | 78 | ||
151 | 79 | diff view generated by jsdifflib |