1 | The following changes since commit 26d6a7c87b05017ffabffb5e16837a0fccf67e90: | 1 | The following changes since commit 67e41fe0cfb62e6cdfa659f0155417d17e5274ea: |
---|---|---|---|
2 | 2 | ||
3 | Merge remote-tracking branch 'remotes/ericb/tags/pull-qapi-2018-04-10' into staging (2018-04-10 22:16:19 +0100) | 3 | Merge tag 'pull-ppc-20220104' of https://github.com/legoater/qemu into staging (2022-01-04 07:23:27 -0800) |
4 | 4 | ||
5 | are available in the Git repository at: | 5 | are available in the Git repository at: |
6 | 6 | ||
7 | git://github.com/rth7680/qemu.git tags/pull-tcg-20180411 | 7 | https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20220104 |
8 | 8 | ||
9 | for you to fetch changes up to afd46fcad2dceffda35c0586f5723c127b6e09d8: | 9 | for you to fetch changes up to d7478d4229f0a2b2817a55487e6b17081099fae4: |
10 | 10 | ||
11 | icount: fix cpu_restore_state_from_tb for non-tb-exit cases (2018-04-11 09:05:22 +1000) | 11 | common-user: Fix tail calls to safe_syscall_set_errno_tail (2022-01-04 15:41:03 -0800) |
12 | 12 | ||
13 | ---------------------------------------------------------------- | 13 | ---------------------------------------------------------------- |
14 | Handle read-modify-write i/o with icount | 14 | Fix for safe_syscall_base. |
15 | Fix for folding of vector add/sub. | ||
16 | Fix build on loongarch64 with gcc 8. | ||
17 | Remove decl for qemu_run_machine_init_done_notifiers. | ||
15 | 18 | ||
16 | ---------------------------------------------------------------- | 19 | ---------------------------------------------------------------- |
17 | Pavel Dovgalyuk (1): | 20 | Philippe Mathieu-Daudé (1): |
18 | icount: fix cpu_restore_state_from_tb for non-tb-exit cases | 21 | linux-user: Fix trivial build error on loongarch64 hosts |
19 | 22 | ||
20 | include/exec/exec-all.h | 5 ++++- | 23 | Richard Henderson (2): |
21 | accel/tcg/cpu-exec-common.c | 10 +++++----- | 24 | tcg/optimize: Fix folding of vector ops |
22 | accel/tcg/cpu-exec.c | 1 - | 25 | common-user: Fix tail calls to safe_syscall_set_errno_tail |
23 | accel/tcg/translate-all.c | 27 ++++++++++++++------------- | ||
24 | accel/tcg/user-exec.c | 2 +- | ||
25 | hw/misc/mips_itu.c | 3 +-- | ||
26 | target/alpha/helper.c | 2 +- | ||
27 | target/alpha/mem_helper.c | 6 ++---- | ||
28 | target/arm/op_helper.c | 6 +++--- | ||
29 | target/cris/op_helper.c | 4 ++-- | ||
30 | target/i386/helper.c | 2 +- | ||
31 | target/i386/svm_helper.c | 2 +- | ||
32 | target/m68k/op_helper.c | 4 ++-- | ||
33 | target/moxie/helper.c | 2 +- | ||
34 | target/openrisc/sys_helper.c | 8 ++++---- | ||
35 | target/tricore/op_helper.c | 2 +- | ||
36 | target/xtensa/op_helper.c | 4 ++-- | ||
37 | 17 files changed, 45 insertions(+), 45 deletions(-) | ||
38 | 26 | ||
27 | Xiaoyao Li (1): | ||
28 | sysemu: Cleanup qemu_run_machine_init_done_notifiers() | ||
29 | |||
30 | include/sysemu/sysemu.h | 1 - | ||
31 | linux-user/host/loongarch64/host-signal.h | 4 +-- | ||
32 | tcg/optimize.c | 49 +++++++++++++++++++++++------- | ||
33 | common-user/host/i386/safe-syscall.inc.S | 1 + | ||
34 | common-user/host/mips/safe-syscall.inc.S | 1 + | ||
35 | common-user/host/x86_64/safe-syscall.inc.S | 1 + | ||
36 | 6 files changed, 42 insertions(+), 15 deletions(-) | ||
37 | diff view generated by jsdifflib |
1 | From: Pavel Dovgalyuk <Pavel.Dovgaluk@ispras.ru> | 1 | Bitwise operations are easy to fold, because the operation is |
---|---|---|---|
2 | identical regardless of element size. But add and sub need | ||
3 | extra element size info that is not currently propagated. | ||
2 | 4 | ||
3 | In icount mode, instructions that access io memory spaces in the middle | 5 | Fixes: 2f9f08ba43d |
4 | of the translation block invoke TB recompilation. After recompilation, | 6 | Cc: qemu-stable@nongnu.org |
5 | such instructions become last in the TB and are allowed to access io | 7 | Resolves: https://gitlab.com/qemu-project/qemu/-/issues/799 |
6 | memory spaces. | 8 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> |
7 | |||
8 | When the code includes instruction like i386 'xchg eax, 0xffffd080' | ||
9 | which accesses APIC, QEMU goes into an infinite loop of the recompilation. | ||
10 | |||
11 | This instruction includes two memory accesses - one read and one write. | ||
12 | After the first access, APIC calls cpu_report_tpr_access, which restores | ||
13 | the CPU state to get the current eip. But cpu_restore_state_from_tb | ||
14 | resets the cpu->can_do_io flag which makes the second memory access invalid. | ||
15 | Therefore the second memory access causes a recompilation of the block. | ||
16 | Then these operations repeat again and again. | ||
17 | |||
18 | This patch moves resetting cpu->can_do_io flag from | ||
19 | cpu_restore_state_from_tb to cpu_loop_exit* functions. | ||
20 | |||
21 | It also adds a parameter for cpu_restore_state which controls restoring | ||
22 | icount. There is no need to restore icount when we only query CPU state | ||
23 | without breaking the TB. Restoring it in such cases leads to the | ||
24 | incorrect flow of the virtual time. | ||
25 | |||
26 | In most cases new parameter is true (icount should be recalculated). | ||
27 | But there are two cases in i386 and openrisc when the CPU state is only | ||
28 | queried without the need to break the TB. This patch fixes both of | ||
29 | these cases. | ||
30 | |||
31 | Signed-off-by: Pavel Dovgalyuk <Pavel.Dovgaluk@ispras.ru> | ||
32 | Message-Id: <20180409091320.12504.35329.stgit@pasha-VirtualBox> | ||
33 | [rth: Make can_do_io setting unconditional; move from cpu_exec; | ||
34 | make cpu_loop_exit_{noexc,restore} call cpu_loop_exit.] | ||
35 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | 9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> |
36 | --- | 10 | --- |
37 | include/exec/exec-all.h | 5 ++++- | 11 | tcg/optimize.c | 49 ++++++++++++++++++++++++++++++++++++++----------- |
38 | accel/tcg/cpu-exec-common.c | 10 +++++----- | 12 | 1 file changed, 38 insertions(+), 11 deletions(-) |
39 | accel/tcg/cpu-exec.c | 1 - | ||
40 | accel/tcg/translate-all.c | 27 ++++++++++++++------------- | ||
41 | accel/tcg/user-exec.c | 2 +- | ||
42 | hw/misc/mips_itu.c | 3 +-- | ||
43 | target/alpha/helper.c | 2 +- | ||
44 | target/alpha/mem_helper.c | 6 ++---- | ||
45 | target/arm/op_helper.c | 6 +++--- | ||
46 | target/cris/op_helper.c | 4 ++-- | ||
47 | target/i386/helper.c | 2 +- | ||
48 | target/i386/svm_helper.c | 2 +- | ||
49 | target/m68k/op_helper.c | 4 ++-- | ||
50 | target/moxie/helper.c | 2 +- | ||
51 | target/openrisc/sys_helper.c | 8 ++++---- | ||
52 | target/tricore/op_helper.c | 2 +- | ||
53 | target/xtensa/op_helper.c | 4 ++-- | ||
54 | 17 files changed, 45 insertions(+), 45 deletions(-) | ||
55 | 13 | ||
56 | diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h | 14 | diff --git a/tcg/optimize.c b/tcg/optimize.c |
57 | index XXXXXXX..XXXXXXX 100644 | 15 | index XXXXXXX..XXXXXXX 100644 |
58 | --- a/include/exec/exec-all.h | 16 | --- a/tcg/optimize.c |
59 | +++ b/include/exec/exec-all.h | 17 | +++ b/tcg/optimize.c |
60 | @@ -XXX,XX +XXX,XX @@ void cpu_gen_init(void); | 18 | @@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y) |
61 | * cpu_restore_state: | 19 | CASE_OP_32_64(mul): |
62 | * @cpu: the vCPU state is to be restore to | 20 | return x * y; |
63 | * @searched_pc: the host PC the fault occurred at | 21 | |
64 | + * @will_exit: true if the TB executed will be interrupted after some | 22 | - CASE_OP_32_64(and): |
65 | + cpu adjustments. Required for maintaining the correct | 23 | + CASE_OP_32_64_VEC(and): |
66 | + icount valus | 24 | return x & y; |
67 | * @return: true if state was restored, false otherwise | 25 | |
68 | * | 26 | - CASE_OP_32_64(or): |
69 | * Attempt to restore the state for a fault occurring in translated | 27 | + CASE_OP_32_64_VEC(or): |
70 | * code. If the searched_pc is not in translated code no state is | 28 | return x | y; |
71 | * restored and the function returns false. | 29 | |
72 | */ | 30 | - CASE_OP_32_64(xor): |
73 | -bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc); | 31 | + CASE_OP_32_64_VEC(xor): |
74 | +bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit); | 32 | return x ^ y; |
75 | 33 | ||
76 | void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu); | 34 | case INDEX_op_shl_i32: |
77 | void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr); | 35 | @@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y) |
78 | diff --git a/accel/tcg/cpu-exec-common.c b/accel/tcg/cpu-exec-common.c | 36 | case INDEX_op_rotl_i64: |
79 | index XXXXXXX..XXXXXXX 100644 | 37 | return rol64(x, y & 63); |
80 | --- a/accel/tcg/cpu-exec-common.c | 38 | |
81 | +++ b/accel/tcg/cpu-exec-common.c | 39 | - CASE_OP_32_64(not): |
82 | @@ -XXX,XX +XXX,XX @@ bool tcg_allowed; | 40 | + CASE_OP_32_64_VEC(not): |
83 | /* exit the current TB, but without causing any exception to be raised */ | 41 | return ~x; |
84 | void cpu_loop_exit_noexc(CPUState *cpu) | 42 | |
43 | CASE_OP_32_64(neg): | ||
44 | return -x; | ||
45 | |||
46 | - CASE_OP_32_64(andc): | ||
47 | + CASE_OP_32_64_VEC(andc): | ||
48 | return x & ~y; | ||
49 | |||
50 | - CASE_OP_32_64(orc): | ||
51 | + CASE_OP_32_64_VEC(orc): | ||
52 | return x | ~y; | ||
53 | |||
54 | CASE_OP_32_64(eqv): | ||
55 | @@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op) | ||
56 | return false; | ||
57 | } | ||
58 | |||
59 | +static bool fold_commutative(OptContext *ctx, TCGOp *op) | ||
60 | +{ | ||
61 | + swap_commutative(op->args[0], &op->args[1], &op->args[2]); | ||
62 | + return false; | ||
63 | +} | ||
64 | + | ||
65 | static bool fold_const2_commutative(OptContext *ctx, TCGOp *op) | ||
85 | { | 66 | { |
86 | - /* XXX: restore cpu registers saved in host registers */ | 67 | swap_commutative(op->args[0], &op->args[1], &op->args[2]); |
87 | - | 68 | @@ -XXX,XX +XXX,XX @@ static bool fold_add(OptContext *ctx, TCGOp *op) |
88 | cpu->exception_index = -1; | 69 | return false; |
89 | - siglongjmp(cpu->jmp_env, 1); | ||
90 | + cpu_loop_exit(cpu); | ||
91 | } | 70 | } |
92 | 71 | ||
93 | #if defined(CONFIG_SOFTMMU) | 72 | +/* We cannot as yet do_constant_folding with vectors. */ |
94 | @@ -XXX,XX +XXX,XX @@ void cpu_reloading_memory_map(void) | 73 | +static bool fold_add_vec(OptContext *ctx, TCGOp *op) |
95 | 74 | +{ | |
96 | void cpu_loop_exit(CPUState *cpu) | 75 | + if (fold_commutative(ctx, op) || |
76 | + fold_xi_to_x(ctx, op, 0)) { | ||
77 | + return true; | ||
78 | + } | ||
79 | + return false; | ||
80 | +} | ||
81 | + | ||
82 | static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add) | ||
97 | { | 83 | { |
98 | + /* Undo the setting in cpu_tb_exec. */ | 84 | if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) && |
99 | + cpu->can_do_io = 1; | 85 | @@ -XXX,XX +XXX,XX @@ static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op) |
100 | siglongjmp(cpu->jmp_env, 1); | 86 | return false; |
101 | } | 87 | } |
102 | 88 | ||
103 | void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc) | 89 | -static bool fold_sub(OptContext *ctx, TCGOp *op) |
90 | +/* We cannot as yet do_constant_folding with vectors. */ | ||
91 | +static bool fold_sub_vec(OptContext *ctx, TCGOp *op) | ||
104 | { | 92 | { |
105 | if (pc) { | 93 | - if (fold_const2(ctx, op) || |
106 | - cpu_restore_state(cpu, pc); | 94 | - fold_xx_to_i(ctx, op, 0) || |
107 | + cpu_restore_state(cpu, pc, true); | 95 | + if (fold_xx_to_i(ctx, op, 0) || |
108 | } | 96 | fold_xi_to_x(ctx, op, 0) || |
109 | - siglongjmp(cpu->jmp_env, 1); | 97 | fold_sub_to_neg(ctx, op)) { |
110 | + cpu_loop_exit(cpu); | 98 | return true; |
99 | @@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op) | ||
100 | return false; | ||
111 | } | 101 | } |
112 | 102 | ||
113 | void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc) | 103 | +static bool fold_sub(OptContext *ctx, TCGOp *op) |
114 | diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c | 104 | +{ |
115 | index XXXXXXX..XXXXXXX 100644 | 105 | + return fold_const2(ctx, op) || fold_sub_vec(ctx, op); |
116 | --- a/accel/tcg/cpu-exec.c | 106 | +} |
117 | +++ b/accel/tcg/cpu-exec.c | 107 | + |
118 | @@ -XXX,XX +XXX,XX @@ int cpu_exec(CPUState *cpu) | 108 | static bool fold_sub2(OptContext *ctx, TCGOp *op) |
119 | g_assert(cpu == current_cpu); | ||
120 | g_assert(cc == CPU_GET_CLASS(cpu)); | ||
121 | #endif /* buggy compiler */ | ||
122 | - cpu->can_do_io = 1; | ||
123 | tb_lock_reset(); | ||
124 | if (qemu_mutex_iothread_locked()) { | ||
125 | qemu_mutex_unlock_iothread(); | ||
126 | diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c | ||
127 | index XXXXXXX..XXXXXXX 100644 | ||
128 | --- a/accel/tcg/translate-all.c | ||
129 | +++ b/accel/tcg/translate-all.c | ||
130 | @@ -XXX,XX +XXX,XX @@ static int encode_search(TranslationBlock *tb, uint8_t *block) | ||
131 | |||
132 | /* The cpu state corresponding to 'searched_pc' is restored. | ||
133 | * Called with tb_lock held. | ||
134 | + * When reset_icount is true, current TB will be interrupted and | ||
135 | + * icount should be recalculated. | ||
136 | */ | ||
137 | static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, | ||
138 | - uintptr_t searched_pc) | ||
139 | + uintptr_t searched_pc, bool reset_icount) | ||
140 | { | 109 | { |
141 | target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc }; | 110 | return fold_addsub2(ctx, op, false); |
142 | uintptr_t host_pc = (uintptr_t)tb->tc.ptr; | 111 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) |
143 | @@ -XXX,XX +XXX,XX @@ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, | 112 | * Sorted alphabetically by opcode as much as possible. |
144 | return -1; | 113 | */ |
145 | 114 | switch (opc) { | |
146 | found: | 115 | - CASE_OP_32_64_VEC(add): |
147 | - if (tb->cflags & CF_USE_ICOUNT) { | 116 | + CASE_OP_32_64(add): |
148 | + if (reset_icount && (tb->cflags & CF_USE_ICOUNT)) { | 117 | done = fold_add(&ctx, op); |
149 | assert(use_icount); | 118 | break; |
150 | - /* Reset the cycle counter to the start of the block. */ | 119 | + case INDEX_op_add_vec: |
151 | - cpu->icount_decr.u16.low += num_insns; | 120 | + done = fold_add_vec(&ctx, op); |
152 | - /* Clear the IO flag. */ | 121 | + break; |
153 | - cpu->can_do_io = 0; | 122 | CASE_OP_32_64(add2): |
154 | + /* Reset the cycle counter to the start of the block | 123 | done = fold_add2(&ctx, op); |
155 | + and shift if to the number of actually executed instructions */ | 124 | break; |
156 | + cpu->icount_decr.u16.low += num_insns - i; | 125 | @@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s) |
157 | } | 126 | CASE_OP_32_64(sextract): |
158 | - cpu->icount_decr.u16.low -= i; | 127 | done = fold_sextract(&ctx, op); |
159 | restore_state_to_opc(env, tb, data); | 128 | break; |
160 | 129 | - CASE_OP_32_64_VEC(sub): | |
161 | #ifdef CONFIG_PROFILER | 130 | + CASE_OP_32_64(sub): |
162 | @@ -XXX,XX +XXX,XX @@ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, | 131 | done = fold_sub(&ctx, op); |
163 | return 0; | 132 | break; |
164 | } | 133 | + case INDEX_op_sub_vec: |
165 | 134 | + done = fold_sub_vec(&ctx, op); | |
166 | -bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc) | 135 | + break; |
167 | +bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit) | 136 | CASE_OP_32_64(sub2): |
168 | { | 137 | done = fold_sub2(&ctx, op); |
169 | TranslationBlock *tb; | 138 | break; |
170 | bool r = false; | ||
171 | @@ -XXX,XX +XXX,XX @@ bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc) | ||
172 | tb_lock(); | ||
173 | tb = tb_find_pc(host_pc); | ||
174 | if (tb) { | ||
175 | - cpu_restore_state_from_tb(cpu, tb, host_pc); | ||
176 | + cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit); | ||
177 | if (tb->cflags & CF_NOCACHE) { | ||
178 | /* one-shot translation, invalidate it immediately */ | ||
179 | tb_phys_invalidate(tb, -1); | ||
180 | @@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, | ||
181 | restore the CPU state */ | ||
182 | |||
183 | current_tb_modified = 1; | ||
184 | - cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc); | ||
185 | + cpu_restore_state_from_tb(cpu, current_tb, | ||
186 | + cpu->mem_io_pc, true); | ||
187 | cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, | ||
188 | ¤t_flags); | ||
189 | } | ||
190 | @@ -XXX,XX +XXX,XX @@ static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc) | ||
191 | restore the CPU state */ | ||
192 | |||
193 | current_tb_modified = 1; | ||
194 | - cpu_restore_state_from_tb(cpu, current_tb, pc); | ||
195 | + cpu_restore_state_from_tb(cpu, current_tb, pc, true); | ||
196 | cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, | ||
197 | ¤t_flags); | ||
198 | } | ||
199 | @@ -XXX,XX +XXX,XX @@ void tb_check_watchpoint(CPUState *cpu) | ||
200 | tb = tb_find_pc(cpu->mem_io_pc); | ||
201 | if (tb) { | ||
202 | /* We can use retranslation to find the PC. */ | ||
203 | - cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc); | ||
204 | + cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc, true); | ||
205 | tb_phys_invalidate(tb, -1); | ||
206 | } else { | ||
207 | /* The exception probably happened in a helper. The CPU state should | ||
208 | @@ -XXX,XX +XXX,XX @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) | ||
209 | cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p", | ||
210 | (void *)retaddr); | ||
211 | } | ||
212 | - cpu_restore_state_from_tb(cpu, tb, retaddr); | ||
213 | + cpu_restore_state_from_tb(cpu, tb, retaddr, true); | ||
214 | |||
215 | /* On MIPS and SH, delay slot instructions can only be restarted if | ||
216 | they were already the first instruction in the TB. If this is not | ||
217 | diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c | ||
218 | index XXXXXXX..XXXXXXX 100644 | ||
219 | --- a/accel/tcg/user-exec.c | ||
220 | +++ b/accel/tcg/user-exec.c | ||
221 | @@ -XXX,XX +XXX,XX @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info, | ||
222 | } | ||
223 | |||
224 | /* Now we have a real cpu fault. */ | ||
225 | - cpu_restore_state(cpu, pc); | ||
226 | + cpu_restore_state(cpu, pc, true); | ||
227 | |||
228 | sigprocmask(SIG_SETMASK, old_set, NULL); | ||
229 | cpu_loop_exit(cpu); | ||
230 | diff --git a/hw/misc/mips_itu.c b/hw/misc/mips_itu.c | ||
231 | index XXXXXXX..XXXXXXX 100644 | ||
232 | --- a/hw/misc/mips_itu.c | ||
233 | +++ b/hw/misc/mips_itu.c | ||
234 | @@ -XXX,XX +XXX,XX @@ static void wake_blocked_threads(ITCStorageCell *c) | ||
235 | static void QEMU_NORETURN block_thread_and_exit(ITCStorageCell *c) | ||
236 | { | ||
237 | c->blocked_threads |= 1ULL << current_cpu->cpu_index; | ||
238 | - cpu_restore_state(current_cpu, current_cpu->mem_io_pc); | ||
239 | current_cpu->halted = 1; | ||
240 | current_cpu->exception_index = EXCP_HLT; | ||
241 | - cpu_loop_exit(current_cpu); | ||
242 | + cpu_loop_exit_restore(current_cpu, current_cpu->mem_io_pc); | ||
243 | } | ||
244 | |||
245 | /* ITC Bypass View */ | ||
246 | diff --git a/target/alpha/helper.c b/target/alpha/helper.c | ||
247 | index XXXXXXX..XXXXXXX 100644 | ||
248 | --- a/target/alpha/helper.c | ||
249 | +++ b/target/alpha/helper.c | ||
250 | @@ -XXX,XX +XXX,XX @@ void QEMU_NORETURN dynamic_excp(CPUAlphaState *env, uintptr_t retaddr, | ||
251 | cs->exception_index = excp; | ||
252 | env->error_code = error; | ||
253 | if (retaddr) { | ||
254 | - cpu_restore_state(cs, retaddr); | ||
255 | + cpu_restore_state(cs, retaddr, true); | ||
256 | /* Floating-point exceptions (our only users) point to the next PC. */ | ||
257 | env->pc += 4; | ||
258 | } | ||
259 | diff --git a/target/alpha/mem_helper.c b/target/alpha/mem_helper.c | ||
260 | index XXXXXXX..XXXXXXX 100644 | ||
261 | --- a/target/alpha/mem_helper.c | ||
262 | +++ b/target/alpha/mem_helper.c | ||
263 | @@ -XXX,XX +XXX,XX @@ void alpha_cpu_do_unaligned_access(CPUState *cs, vaddr addr, | ||
264 | uint64_t pc; | ||
265 | uint32_t insn; | ||
266 | |||
267 | - cpu_restore_state(cs, retaddr); | ||
268 | + cpu_restore_state(cs, retaddr, true); | ||
269 | |||
270 | pc = env->pc; | ||
271 | insn = cpu_ldl_code(env, pc); | ||
272 | @@ -XXX,XX +XXX,XX @@ void alpha_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, | ||
273 | AlphaCPU *cpu = ALPHA_CPU(cs); | ||
274 | CPUAlphaState *env = &cpu->env; | ||
275 | |||
276 | - cpu_restore_state(cs, retaddr); | ||
277 | - | ||
278 | env->trap_arg0 = addr; | ||
279 | env->trap_arg1 = access_type == MMU_DATA_STORE ? 1 : 0; | ||
280 | cs->exception_index = EXCP_MCHK; | ||
281 | env->error_code = 0; | ||
282 | - cpu_loop_exit(cs); | ||
283 | + cpu_loop_exit_restore(cs, retaddr); | ||
284 | } | ||
285 | |||
286 | /* try to fill the TLB and return an exception if error. If retaddr is | ||
287 | diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c | ||
288 | index XXXXXXX..XXXXXXX 100644 | ||
289 | --- a/target/arm/op_helper.c | ||
290 | +++ b/target/arm/op_helper.c | ||
291 | @@ -XXX,XX +XXX,XX @@ void tlb_fill(CPUState *cs, target_ulong addr, int size, | ||
292 | ARMCPU *cpu = ARM_CPU(cs); | ||
293 | |||
294 | /* now we have a real cpu fault */ | ||
295 | - cpu_restore_state(cs, retaddr); | ||
296 | + cpu_restore_state(cs, retaddr, true); | ||
297 | |||
298 | deliver_fault(cpu, addr, access_type, mmu_idx, &fi); | ||
299 | } | ||
300 | @@ -XXX,XX +XXX,XX @@ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, | ||
301 | ARMMMUFaultInfo fi = {}; | ||
302 | |||
303 | /* now we have a real cpu fault */ | ||
304 | - cpu_restore_state(cs, retaddr); | ||
305 | + cpu_restore_state(cs, retaddr, true); | ||
306 | |||
307 | fi.type = ARMFault_Alignment; | ||
308 | deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi); | ||
309 | @@ -XXX,XX +XXX,XX @@ void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, | ||
310 | ARMMMUFaultInfo fi = {}; | ||
311 | |||
312 | /* now we have a real cpu fault */ | ||
313 | - cpu_restore_state(cs, retaddr); | ||
314 | + cpu_restore_state(cs, retaddr, true); | ||
315 | |||
316 | fi.ea = arm_extabort_type(response); | ||
317 | fi.type = ARMFault_SyncExternal; | ||
318 | diff --git a/target/cris/op_helper.c b/target/cris/op_helper.c | ||
319 | index XXXXXXX..XXXXXXX 100644 | ||
320 | --- a/target/cris/op_helper.c | ||
321 | +++ b/target/cris/op_helper.c | ||
322 | @@ -XXX,XX +XXX,XX @@ void tlb_fill(CPUState *cs, target_ulong addr, int size, | ||
323 | if (unlikely(ret)) { | ||
324 | if (retaddr) { | ||
325 | /* now we have a real cpu fault */ | ||
326 | - if (cpu_restore_state(cs, retaddr)) { | ||
327 | - /* Evaluate flags after retranslation. */ | ||
328 | + if (cpu_restore_state(cs, retaddr, true)) { | ||
329 | + /* Evaluate flags after retranslation. */ | ||
330 | helper_top_evaluate_flags(env); | ||
331 | } | ||
332 | } | ||
333 | diff --git a/target/i386/helper.c b/target/i386/helper.c | ||
334 | index XXXXXXX..XXXXXXX 100644 | ||
335 | --- a/target/i386/helper.c | ||
336 | +++ b/target/i386/helper.c | ||
337 | @@ -XXX,XX +XXX,XX @@ void cpu_report_tpr_access(CPUX86State *env, TPRAccess access) | ||
338 | |||
339 | cpu_interrupt(cs, CPU_INTERRUPT_TPR); | ||
340 | } else if (tcg_enabled()) { | ||
341 | - cpu_restore_state(cs, cs->mem_io_pc); | ||
342 | + cpu_restore_state(cs, cs->mem_io_pc, false); | ||
343 | |||
344 | apic_handle_tpr_access_report(cpu->apic_state, env->eip, access); | ||
345 | } | ||
346 | diff --git a/target/i386/svm_helper.c b/target/i386/svm_helper.c | ||
347 | index XXXXXXX..XXXXXXX 100644 | ||
348 | --- a/target/i386/svm_helper.c | ||
349 | +++ b/target/i386/svm_helper.c | ||
350 | @@ -XXX,XX +XXX,XX @@ void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1, | ||
351 | { | ||
352 | CPUState *cs = CPU(x86_env_get_cpu(env)); | ||
353 | |||
354 | - cpu_restore_state(cs, retaddr); | ||
355 | + cpu_restore_state(cs, retaddr, true); | ||
356 | |||
357 | qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" | ||
358 | PRIx64 ", " TARGET_FMT_lx ")!\n", | ||
359 | diff --git a/target/m68k/op_helper.c b/target/m68k/op_helper.c | ||
360 | index XXXXXXX..XXXXXXX 100644 | ||
361 | --- a/target/m68k/op_helper.c | ||
362 | +++ b/target/m68k/op_helper.c | ||
363 | @@ -XXX,XX +XXX,XX @@ void HELPER(chk)(CPUM68KState *env, int32_t val, int32_t ub) | ||
364 | CPUState *cs = CPU(m68k_env_get_cpu(env)); | ||
365 | |||
366 | /* Recover PC and CC_OP for the beginning of the insn. */ | ||
367 | - cpu_restore_state(cs, GETPC()); | ||
368 | + cpu_restore_state(cs, GETPC(), true); | ||
369 | |||
370 | /* flags have been modified by gen_flush_flags() */ | ||
371 | env->cc_op = CC_OP_FLAGS; | ||
372 | @@ -XXX,XX +XXX,XX @@ void HELPER(chk2)(CPUM68KState *env, int32_t val, int32_t lb, int32_t ub) | ||
373 | CPUState *cs = CPU(m68k_env_get_cpu(env)); | ||
374 | |||
375 | /* Recover PC and CC_OP for the beginning of the insn. */ | ||
376 | - cpu_restore_state(cs, GETPC()); | ||
377 | + cpu_restore_state(cs, GETPC(), true); | ||
378 | |||
379 | /* flags have been modified by gen_flush_flags() */ | ||
380 | env->cc_op = CC_OP_FLAGS; | ||
381 | diff --git a/target/moxie/helper.c b/target/moxie/helper.c | ||
382 | index XXXXXXX..XXXXXXX 100644 | ||
383 | --- a/target/moxie/helper.c | ||
384 | +++ b/target/moxie/helper.c | ||
385 | @@ -XXX,XX +XXX,XX @@ void helper_raise_exception(CPUMoxieState *env, int ex) | ||
386 | /* Stash the exception type. */ | ||
387 | env->sregs[2] = ex; | ||
388 | /* Stash the address where the exception occurred. */ | ||
389 | - cpu_restore_state(cs, GETPC()); | ||
390 | + cpu_restore_state(cs, GETPC(), true); | ||
391 | env->sregs[5] = env->pc; | ||
392 | /* Jump to the exception handline routine. */ | ||
393 | env->pc = env->sregs[1]; | ||
394 | diff --git a/target/openrisc/sys_helper.c b/target/openrisc/sys_helper.c | ||
395 | index XXXXXXX..XXXXXXX 100644 | ||
396 | --- a/target/openrisc/sys_helper.c | ||
397 | +++ b/target/openrisc/sys_helper.c | ||
398 | @@ -XXX,XX +XXX,XX @@ void HELPER(mtspr)(CPUOpenRISCState *env, | ||
399 | break; | ||
400 | |||
401 | case TO_SPR(0, 16): /* NPC */ | ||
402 | - cpu_restore_state(cs, GETPC()); | ||
403 | + cpu_restore_state(cs, GETPC(), true); | ||
404 | /* ??? Mirror or1ksim in not trashing delayed branch state | ||
405 | when "jumping" to the current instruction. */ | ||
406 | if (env->pc != rb) { | ||
407 | @@ -XXX,XX +XXX,XX @@ void HELPER(mtspr)(CPUOpenRISCState *env, | ||
408 | case TO_SPR(8, 0): /* PMR */ | ||
409 | env->pmr = rb; | ||
410 | if (env->pmr & PMR_DME || env->pmr & PMR_SME) { | ||
411 | - cpu_restore_state(cs, GETPC()); | ||
412 | + cpu_restore_state(cs, GETPC(), true); | ||
413 | env->pc += 4; | ||
414 | cs->halted = 1; | ||
415 | raise_exception(cpu, EXCP_HALTED); | ||
416 | @@ -XXX,XX +XXX,XX @@ target_ulong HELPER(mfspr)(CPUOpenRISCState *env, | ||
417 | return env->evbar; | ||
418 | |||
419 | case TO_SPR(0, 16): /* NPC (equals PC) */ | ||
420 | - cpu_restore_state(cs, GETPC()); | ||
421 | + cpu_restore_state(cs, GETPC(), false); | ||
422 | return env->pc; | ||
423 | |||
424 | case TO_SPR(0, 17): /* SR */ | ||
425 | return cpu_get_sr(env); | ||
426 | |||
427 | case TO_SPR(0, 18): /* PPC */ | ||
428 | - cpu_restore_state(cs, GETPC()); | ||
429 | + cpu_restore_state(cs, GETPC(), false); | ||
430 | return env->ppc; | ||
431 | |||
432 | case TO_SPR(0, 32): /* EPCR */ | ||
433 | diff --git a/target/tricore/op_helper.c b/target/tricore/op_helper.c | ||
434 | index XXXXXXX..XXXXXXX 100644 | ||
435 | --- a/target/tricore/op_helper.c | ||
436 | +++ b/target/tricore/op_helper.c | ||
437 | @@ -XXX,XX +XXX,XX @@ raise_exception_sync_internal(CPUTriCoreState *env, uint32_t class, int tin, | ||
438 | { | ||
439 | CPUState *cs = CPU(tricore_env_get_cpu(env)); | ||
440 | /* in case we come from a helper-call we need to restore the PC */ | ||
441 | - cpu_restore_state(cs, pc); | ||
442 | + cpu_restore_state(cs, pc, true); | ||
443 | |||
444 | /* Tin is loaded into d[15] */ | ||
445 | env->gpr_d[15] = tin; | ||
446 | diff --git a/target/xtensa/op_helper.c b/target/xtensa/op_helper.c | ||
447 | index XXXXXXX..XXXXXXX 100644 | ||
448 | --- a/target/xtensa/op_helper.c | ||
449 | +++ b/target/xtensa/op_helper.c | ||
450 | @@ -XXX,XX +XXX,XX @@ void xtensa_cpu_do_unaligned_access(CPUState *cs, | ||
451 | |||
452 | if (xtensa_option_enabled(env->config, XTENSA_OPTION_UNALIGNED_EXCEPTION) && | ||
453 | !xtensa_option_enabled(env->config, XTENSA_OPTION_HW_ALIGNMENT)) { | ||
454 | - cpu_restore_state(CPU(cpu), retaddr); | ||
455 | + cpu_restore_state(CPU(cpu), retaddr, true); | ||
456 | HELPER(exception_cause_vaddr)(env, | ||
457 | env->pc, LOAD_STORE_ALIGNMENT_CAUSE, addr); | ||
458 | } | ||
459 | @@ -XXX,XX +XXX,XX @@ void tlb_fill(CPUState *cs, target_ulong vaddr, int size, | ||
460 | paddr & TARGET_PAGE_MASK, | ||
461 | access, mmu_idx, page_size); | ||
462 | } else { | ||
463 | - cpu_restore_state(cs, retaddr); | ||
464 | + cpu_restore_state(cs, retaddr, true); | ||
465 | HELPER(exception_cause_vaddr)(env, env->pc, ret, vaddr); | ||
466 | } | ||
467 | } | ||
468 | -- | 139 | -- |
469 | 2.14.3 | 140 | 2.25.1 |
470 | 141 | ||
471 | 142 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
1 | 2 | ||
3 | When building using GCC 8.3.0 on loongarch64 (Loongnix) we get: | ||
4 | |||
5 | In file included from ../linux-user/signal.c:33: | ||
6 | ../linux-user/host/loongarch64/host-signal.h: In function ‘host_signal_write’: | ||
7 | ../linux-user/host/loongarch64/host-signal.h:57:9: error: a label can only be part of a statement and a declaration is not a statement | ||
8 | uint32_t sel = (insn >> 15) & 0b11111111111; | ||
9 | ^~~~~~~~ | ||
10 | |||
11 | We don't use the 'sel' variable more than once, so drop it. | ||
12 | |||
13 | Meson output for the record: | ||
14 | |||
15 | Host machine cpu family: loongarch64 | ||
16 | Host machine cpu: loongarch64 | ||
17 | C compiler for the host machine: cc (gcc 8.3.0 "cc (Loongnix 8.3.0-6.lnd.vec.27) 8.3.0") | ||
18 | C linker for the host machine: cc ld.bfd 2.31.1-system | ||
19 | |||
20 | Fixes: ad812c3bd65 ("linux-user: Implement CPU-specific signal handler for loongarch64 hosts") | ||
21 | Reported-by: Song Gao <gaosong@loongson.cn> | ||
22 | Suggested-by: Song Gao <gaosong@loongson.cn> | ||
23 | Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
24 | Reviewed-by: WANG Xuerui <git@xen0n.name> | ||
25 | Reviewed-by: Richard Henderson <richard.henderson@linaro.org> | ||
26 | Message-Id: <20220104215027.2180972-1-f4bug@amsat.org> | ||
27 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
28 | --- | ||
29 | linux-user/host/loongarch64/host-signal.h | 4 +--- | ||
30 | 1 file changed, 1 insertion(+), 3 deletions(-) | ||
31 | |||
32 | diff --git a/linux-user/host/loongarch64/host-signal.h b/linux-user/host/loongarch64/host-signal.h | ||
33 | index XXXXXXX..XXXXXXX 100644 | ||
34 | --- a/linux-user/host/loongarch64/host-signal.h | ||
35 | +++ b/linux-user/host/loongarch64/host-signal.h | ||
36 | @@ -XXX,XX +XXX,XX @@ static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc) | ||
37 | } | ||
38 | break; | ||
39 | case 0b001110: /* indexed, atomic, bounds-checking memory operations */ | ||
40 | - uint32_t sel = (insn >> 15) & 0b11111111111; | ||
41 | - | ||
42 | - switch (sel) { | ||
43 | + switch ((insn >> 15) & 0b11111111111) { | ||
44 | case 0b00000100000: /* stx.b */ | ||
45 | case 0b00000101000: /* stx.h */ | ||
46 | case 0b00000110000: /* stx.w */ | ||
47 | -- | ||
48 | 2.25.1 | ||
49 | |||
50 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Xiaoyao Li <xiaoyao.li@intel.com> | ||
1 | 2 | ||
3 | Remove qemu_run_machine_init_done_notifiers() since no implementation | ||
4 | and user. | ||
5 | |||
6 | Fixes: f66dc8737c9 ("vl: move all generic initialization out of vl.c") | ||
7 | Signed-off-by: Xiaoyao Li <xiaoyao.li@intel.com> | ||
8 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
9 | Message-Id: <20220104024136.1433545-1-xiaoyao.li@intel.com> | ||
10 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
11 | --- | ||
12 | include/sysemu/sysemu.h | 1 - | ||
13 | 1 file changed, 1 deletion(-) | ||
14 | |||
15 | diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/include/sysemu/sysemu.h | ||
18 | +++ b/include/sysemu/sysemu.h | ||
19 | @@ -XXX,XX +XXX,XX @@ extern bool qemu_uuid_set; | ||
20 | void qemu_add_exit_notifier(Notifier *notify); | ||
21 | void qemu_remove_exit_notifier(Notifier *notify); | ||
22 | |||
23 | -void qemu_run_machine_init_done_notifiers(void); | ||
24 | void qemu_add_machine_init_done_notifier(Notifier *notify); | ||
25 | void qemu_remove_machine_init_done_notifier(Notifier *notify); | ||
26 | |||
27 | -- | ||
28 | 2.25.1 | ||
29 | |||
30 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | For the ABIs in which the syscall return register is not | ||
2 | also the first function argument register, move the errno | ||
3 | value into the correct place. | ||
1 | 4 | ||
5 | Fixes: a3310c0397e2 ("linux-user: Move syscall error detection into safe_syscall_base") | ||
6 | Reported-by: Laurent Vivier <laurent@vivier.eu> | ||
7 | Tested-by: Laurent Vivier <laurent@vivier.eu> | ||
8 | Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> | ||
9 | Signed-off-by: Richard Henderson <richard.henderson@linaro.org> | ||
10 | Message-Id: <20220104190454.542225-1-richard.henderson@linaro.org> | ||
11 | --- | ||
12 | common-user/host/i386/safe-syscall.inc.S | 1 + | ||
13 | common-user/host/mips/safe-syscall.inc.S | 1 + | ||
14 | common-user/host/x86_64/safe-syscall.inc.S | 1 + | ||
15 | 3 files changed, 3 insertions(+) | ||
16 | |||
17 | diff --git a/common-user/host/i386/safe-syscall.inc.S b/common-user/host/i386/safe-syscall.inc.S | ||
18 | index XXXXXXX..XXXXXXX 100644 | ||
19 | --- a/common-user/host/i386/safe-syscall.inc.S | ||
20 | +++ b/common-user/host/i386/safe-syscall.inc.S | ||
21 | @@ -XXX,XX +XXX,XX @@ safe_syscall_end: | ||
22 | pop %ebp | ||
23 | .cfi_adjust_cfa_offset -4 | ||
24 | .cfi_restore ebp | ||
25 | + mov %eax, (%esp) | ||
26 | jmp safe_syscall_set_errno_tail | ||
27 | |||
28 | .cfi_endproc | ||
29 | diff --git a/common-user/host/mips/safe-syscall.inc.S b/common-user/host/mips/safe-syscall.inc.S | ||
30 | index XXXXXXX..XXXXXXX 100644 | ||
31 | --- a/common-user/host/mips/safe-syscall.inc.S | ||
32 | +++ b/common-user/host/mips/safe-syscall.inc.S | ||
33 | @@ -XXX,XX +XXX,XX @@ safe_syscall_end: | ||
34 | 1: USE_ALT_CP(t0) | ||
35 | SETUP_GPX(t1) | ||
36 | SETUP_GPX64(t0, t1) | ||
37 | + move a0, v0 | ||
38 | PTR_LA t9, safe_syscall_set_errno_tail | ||
39 | jr t9 | ||
40 | |||
41 | diff --git a/common-user/host/x86_64/safe-syscall.inc.S b/common-user/host/x86_64/safe-syscall.inc.S | ||
42 | index XXXXXXX..XXXXXXX 100644 | ||
43 | --- a/common-user/host/x86_64/safe-syscall.inc.S | ||
44 | +++ b/common-user/host/x86_64/safe-syscall.inc.S | ||
45 | @@ -XXX,XX +XXX,XX @@ safe_syscall_end: | ||
46 | 1: pop %rbp | ||
47 | .cfi_def_cfa_offset 8 | ||
48 | .cfi_restore rbp | ||
49 | + mov %eax, %edi | ||
50 | jmp safe_syscall_set_errno_tail | ||
51 | .cfi_endproc | ||
52 | |||
53 | -- | ||
54 | 2.25.1 | ||
55 | |||
56 | diff view generated by jsdifflib |