1
The following changes since commit 1316b1ddc8a05e418c8134243f8bff8cccbbccb1:
1
The following changes since commit 67e41fe0cfb62e6cdfa659f0155417d17e5274ea:
2
2
3
Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging (2019-07-12 15:38:22 +0100)
3
Merge tag 'pull-ppc-20220104' of https://github.com/legoater/qemu into staging (2022-01-04 07:23:27 -0800)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://github.com/rth7680/qemu.git tags/pull-tcg-20190714
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20220104
8
8
9
for you to fetch changes up to 52ba13f042714c4086416973fb88e2465e0888a1:
9
for you to fetch changes up to d7478d4229f0a2b2817a55487e6b17081099fae4:
10
10
11
tcg: Release mmap_lock on translation fault (2019-07-14 12:19:01 +0200)
11
common-user: Fix tail calls to safe_syscall_set_errno_tail (2022-01-04 15:41:03 -0800)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Fixes for 3 tcg bugs
14
Fix for safe_syscall_base.
15
Fix for folding of vector add/sub.
16
Fix build on loongarch64 with gcc 8.
17
Remove decl for qemu_run_machine_init_done_notifiers.
15
18
16
----------------------------------------------------------------
19
----------------------------------------------------------------
17
Richard Henderson (7):
20
Philippe Mathieu-Daudé (1):
18
tcg: Fix constant folding of INDEX_op_extract2_i32
21
linux-user: Fix trivial build error on loongarch64 hosts
19
tcg/aarch64: Fix output of extract2 opcodes
20
include/qemu/atomic.h: Add signal_barrier
21
tcg: Introduce set/clear_helper_retaddr
22
tcg: Remove cpu_ld*_code_ra
23
tcg: Remove duplicate #if !defined(CODE_ACCESS)
24
tcg: Release mmap_lock on translation fault
25
22
26
include/exec/cpu_ldst.h | 20 ++++++++
23
Richard Henderson (2):
27
include/exec/cpu_ldst_useronly_template.h | 40 ++++++++++------
24
tcg/optimize: Fix folding of vector ops
28
include/qemu/atomic.h | 11 +++++
25
common-user: Fix tail calls to safe_syscall_set_errno_tail
29
accel/tcg/user-exec.c | 77 +++++++++++++++++++++----------
30
target/arm/helper-a64.c | 8 ++--
31
target/arm/sve_helper.c | 43 +++++++++--------
32
tcg/aarch64/tcg-target.inc.c | 2 +-
33
tcg/optimize.c | 4 +-
34
8 files changed, 139 insertions(+), 66 deletions(-)
35
26
27
Xiaoyao Li (1):
28
sysemu: Cleanup qemu_run_machine_init_done_notifiers()
29
30
include/sysemu/sysemu.h | 1 -
31
linux-user/host/loongarch64/host-signal.h | 4 +--
32
tcg/optimize.c | 49 +++++++++++++++++++++++-------
33
common-user/host/i386/safe-syscall.inc.S | 1 +
34
common-user/host/mips/safe-syscall.inc.S | 1 +
35
common-user/host/x86_64/safe-syscall.inc.S | 1 +
36
6 files changed, 42 insertions(+), 15 deletions(-)
37
diff view generated by jsdifflib
1
On a 64-bit host, discard any replications of the 32-bit
1
Bitwise operations are easy to fold, because the operation is
2
sign bit when performing the shift and merge.
2
identical regardless of element size. But add and sub need
3
extra element size info that is not currently propagated.
3
4
4
Fixes: https://bugs.launchpad.net/bugs/1834496
5
Fixes: 2f9f08ba43d
5
Tested-by: Christophe Lyon <christophe.lyon@linaro.org>
6
Cc: qemu-stable@nongnu.org
6
Tested-by: Alex Bennée <alex.bennee@linaro.org>
7
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/799
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
---
10
tcg/optimize.c | 4 ++--
11
tcg/optimize.c | 49 ++++++++++++++++++++++++++++++++++++++-----------
11
1 file changed, 2 insertions(+), 2 deletions(-)
12
1 file changed, 38 insertions(+), 11 deletions(-)
12
13
13
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/optimize.c
16
--- a/tcg/optimize.c
16
+++ b/tcg/optimize.c
17
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
19
CASE_OP_32_64(mul):
20
return x * y;
21
22
- CASE_OP_32_64(and):
23
+ CASE_OP_32_64_VEC(and):
24
return x & y;
25
26
- CASE_OP_32_64(or):
27
+ CASE_OP_32_64_VEC(or):
28
return x | y;
29
30
- CASE_OP_32_64(xor):
31
+ CASE_OP_32_64_VEC(xor):
32
return x ^ y;
33
34
case INDEX_op_shl_i32:
35
@@ -XXX,XX +XXX,XX @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
36
case INDEX_op_rotl_i64:
37
return rol64(x, y & 63);
38
39
- CASE_OP_32_64(not):
40
+ CASE_OP_32_64_VEC(not):
41
return ~x;
42
43
CASE_OP_32_64(neg):
44
return -x;
45
46
- CASE_OP_32_64(andc):
47
+ CASE_OP_32_64_VEC(andc):
48
return x & ~y;
49
50
- CASE_OP_32_64(orc):
51
+ CASE_OP_32_64_VEC(orc):
52
return x | ~y;
53
54
CASE_OP_32_64(eqv):
55
@@ -XXX,XX +XXX,XX @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
56
return false;
57
}
58
59
+static bool fold_commutative(OptContext *ctx, TCGOp *op)
60
+{
61
+ swap_commutative(op->args[0], &op->args[1], &op->args[2]);
62
+ return false;
63
+}
64
+
65
static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
66
{
67
swap_commutative(op->args[0], &op->args[1], &op->args[2]);
68
@@ -XXX,XX +XXX,XX @@ static bool fold_add(OptContext *ctx, TCGOp *op)
69
return false;
70
}
71
72
+/* We cannot as yet do_constant_folding with vectors. */
73
+static bool fold_add_vec(OptContext *ctx, TCGOp *op)
74
+{
75
+ if (fold_commutative(ctx, op) ||
76
+ fold_xi_to_x(ctx, op, 0)) {
77
+ return true;
78
+ }
79
+ return false;
80
+}
81
+
82
static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
83
{
84
if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) &&
85
@@ -XXX,XX +XXX,XX @@ static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
86
return false;
87
}
88
89
-static bool fold_sub(OptContext *ctx, TCGOp *op)
90
+/* We cannot as yet do_constant_folding with vectors. */
91
+static bool fold_sub_vec(OptContext *ctx, TCGOp *op)
92
{
93
- if (fold_const2(ctx, op) ||
94
- fold_xx_to_i(ctx, op, 0) ||
95
+ if (fold_xx_to_i(ctx, op, 0) ||
96
fold_xi_to_x(ctx, op, 0) ||
97
fold_sub_to_neg(ctx, op)) {
98
return true;
99
@@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
100
return false;
101
}
102
103
+static bool fold_sub(OptContext *ctx, TCGOp *op)
104
+{
105
+ return fold_const2(ctx, op) || fold_sub_vec(ctx, op);
106
+}
107
+
108
static bool fold_sub2(OptContext *ctx, TCGOp *op)
109
{
110
return fold_addsub2(ctx, op, false);
17
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
111
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
18
if (opc == INDEX_op_extract2_i64) {
112
* Sorted alphabetically by opcode as much as possible.
19
tmp = (v1 >> op->args[3]) | (v2 << (64 - op->args[3]));
113
*/
20
} else {
114
switch (opc) {
21
- tmp = (v1 >> op->args[3]) | (v2 << (32 - op->args[3]));
115
- CASE_OP_32_64_VEC(add):
22
- tmp = (int32_t)tmp;
116
+ CASE_OP_32_64(add):
23
+ tmp = (int32_t)(((uint32_t)v1 >> op->args[3]) |
117
done = fold_add(&ctx, op);
24
+ ((uint32_t)v2 << (32 - op->args[3])));
118
break;
25
}
119
+ case INDEX_op_add_vec:
26
tcg_opt_gen_movi(s, op, op->args[0], tmp);
120
+ done = fold_add_vec(&ctx, op);
27
break;
121
+ break;
122
CASE_OP_32_64(add2):
123
done = fold_add2(&ctx, op);
124
break;
125
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
126
CASE_OP_32_64(sextract):
127
done = fold_sextract(&ctx, op);
128
break;
129
- CASE_OP_32_64_VEC(sub):
130
+ CASE_OP_32_64(sub):
131
done = fold_sub(&ctx, op);
132
break;
133
+ case INDEX_op_sub_vec:
134
+ done = fold_sub_vec(&ctx, op);
135
+ break;
136
CASE_OP_32_64(sub2):
137
done = fold_sub2(&ctx, op);
138
break;
28
--
139
--
29
2.17.1
140
2.25.1
30
141
31
142
diff view generated by jsdifflib
Deleted patch
1
This patch fixes two problems:
2
(1) The inputs to the EXTR insn were reversed,
3
(2) The input constraints use rZ, which means that we need to use
4
the REG0 macro in order to supply XZR for a constant 0 input.
5
1
6
Fixes: 464c2969d5d
7
Reported-by: Peter Maydell <peter.maydell@linaro.org>
8
Tested-by: Alex Bennée <alex.bennee@linaro.org>
9
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
10
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
13
tcg/aarch64/tcg-target.inc.c | 2 +-
14
1 file changed, 1 insertion(+), 1 deletion(-)
15
16
diff --git a/tcg/aarch64/tcg-target.inc.c b/tcg/aarch64/tcg-target.inc.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/tcg/aarch64/tcg-target.inc.c
19
+++ b/tcg/aarch64/tcg-target.inc.c
20
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
21
22
case INDEX_op_extract2_i64:
23
case INDEX_op_extract2_i32:
24
- tcg_out_extr(s, ext, a0, a1, a2, args[3]);
25
+ tcg_out_extr(s, ext, a0, REG0(2), REG0(1), args[3]);
26
break;
27
28
case INDEX_op_add2_i32:
29
--
30
2.17.1
31
32
diff view generated by jsdifflib
Deleted patch
1
We have some potential race conditions vs our user-exec signal
2
handler that will be solved with this barrier.
3
1
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
include/qemu/atomic.h | 11 +++++++++++
8
1 file changed, 11 insertions(+)
9
10
diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/include/qemu/atomic.h
13
+++ b/include/qemu/atomic.h
14
@@ -XXX,XX +XXX,XX @@
15
#define smp_read_barrier_depends() barrier()
16
#endif
17
18
+/*
19
+ * A signal barrier forces all pending local memory ops to be observed before
20
+ * a SIGSEGV is delivered to the *same* thread. In practice this is exactly
21
+ * the same as barrier(), but since we have the correct builtin, use it.
22
+ */
23
+#define signal_barrier() __atomic_signal_fence(__ATOMIC_SEQ_CST)
24
+
25
/* Sanity check that the size of an atomic operation isn't "overly large".
26
* Despite the fact that e.g. i686 has 64-bit atomic operations, we do not
27
* want to use them because we ought not need them, and this lets us do a
28
@@ -XXX,XX +XXX,XX @@
29
#define smp_read_barrier_depends() barrier()
30
#endif
31
32
+#ifndef signal_barrier
33
+#define signal_barrier() barrier()
34
+#endif
35
+
36
/* These will only be atomic if the processor does the fetch or store
37
* in a single issue memory operation
38
*/
39
--
40
2.17.1
41
42
diff view generated by jsdifflib
Deleted patch
1
At present we have a potential error in that helper_retaddr contains
2
data for handle_cpu_signal, but we have not ensured that those stores
3
will be scheduled properly before the operation that may fault.
4
1
5
It might be that these races are not in practice observable, due to
6
our use of -fno-strict-aliasing, but better safe than sorry.
7
8
Adjust all of the setters of helper_retaddr.
9
10
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
13
include/exec/cpu_ldst.h | 20 +++++++++++
14
include/exec/cpu_ldst_useronly_template.h | 12 +++----
15
accel/tcg/user-exec.c | 11 +++---
16
target/arm/helper-a64.c | 8 ++---
17
target/arm/sve_helper.c | 43 +++++++++++------------
18
5 files changed, 57 insertions(+), 37 deletions(-)
19
20
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/include/exec/cpu_ldst.h
23
+++ b/include/exec/cpu_ldst.h
24
@@ -XXX,XX +XXX,XX @@ typedef target_ulong abi_ptr;
25
26
extern __thread uintptr_t helper_retaddr;
27
28
+static inline void set_helper_retaddr(uintptr_t ra)
29
+{
30
+ helper_retaddr = ra;
31
+ /*
32
+ * Ensure that this write is visible to the SIGSEGV handler that
33
+ * may be invoked due to a subsequent invalid memory operation.
34
+ */
35
+ signal_barrier();
36
+}
37
+
38
+static inline void clear_helper_retaddr(void)
39
+{
40
+ /*
41
+ * Ensure that previous memory operations have succeeded before
42
+ * removing the data visible to the signal handler.
43
+ */
44
+ signal_barrier();
45
+ helper_retaddr = 0;
46
+}
47
+
48
/* In user-only mode we provide only the _code and _data accessors. */
49
50
#define MEMSUFFIX _data
51
diff --git a/include/exec/cpu_ldst_useronly_template.h b/include/exec/cpu_ldst_useronly_template.h
52
index XXXXXXX..XXXXXXX 100644
53
--- a/include/exec/cpu_ldst_useronly_template.h
54
+++ b/include/exec/cpu_ldst_useronly_template.h
55
@@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
56
uintptr_t retaddr)
57
{
58
RES_TYPE ret;
59
- helper_retaddr = retaddr;
60
+ set_helper_retaddr(retaddr);
61
ret = glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(env, ptr);
62
- helper_retaddr = 0;
63
+ clear_helper_retaddr();
64
return ret;
65
}
66
67
@@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
68
uintptr_t retaddr)
69
{
70
int ret;
71
- helper_retaddr = retaddr;
72
+ set_helper_retaddr(retaddr);
73
ret = glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(env, ptr);
74
- helper_retaddr = 0;
75
+ clear_helper_retaddr();
76
return ret;
77
}
78
#endif
79
@@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
80
RES_TYPE v,
81
uintptr_t retaddr)
82
{
83
- helper_retaddr = retaddr;
84
+ set_helper_retaddr(retaddr);
85
glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(env, ptr, v);
86
- helper_retaddr = 0;
87
+ clear_helper_retaddr();
88
}
89
#endif
90
91
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
92
index XXXXXXX..XXXXXXX 100644
93
--- a/accel/tcg/user-exec.c
94
+++ b/accel/tcg/user-exec.c
95
@@ -XXX,XX +XXX,XX @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info,
96
* currently executing TB was modified and must be exited
97
* immediately. Clear helper_retaddr for next execution.
98
*/
99
- helper_retaddr = 0;
100
+ clear_helper_retaddr();
101
cpu_exit_tb_from_sighandler(cpu, old_set);
102
/* NORETURN */
103
104
@@ -XXX,XX +XXX,XX @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info,
105
* an exception. Undo signal and retaddr state prior to longjmp.
106
*/
107
sigprocmask(SIG_SETMASK, old_set, NULL);
108
- helper_retaddr = 0;
109
+ clear_helper_retaddr();
110
111
cc = CPU_GET_CLASS(cpu);
112
access_type = is_write ? MMU_DATA_STORE : MMU_DATA_LOAD;
113
@@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
114
if (unlikely(addr & (size - 1))) {
115
cpu_loop_exit_atomic(env_cpu(env), retaddr);
116
}
117
- helper_retaddr = retaddr;
118
- return g2h(addr);
119
+ void *ret = g2h(addr);
120
+ set_helper_retaddr(retaddr);
121
+ return ret;
122
}
123
124
/* Macro to call the above, with local variables from the use context. */
125
#define ATOMIC_MMU_DECLS do {} while (0)
126
#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, DATA_SIZE, GETPC())
127
-#define ATOMIC_MMU_CLEANUP do { helper_retaddr = 0; } while (0)
128
+#define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
129
130
#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
131
#define EXTRA_ARGS
132
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
133
index XXXXXXX..XXXXXXX 100644
134
--- a/target/arm/helper-a64.c
135
+++ b/target/arm/helper-a64.c
136
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr,
137
/* ??? Enforce alignment. */
138
uint64_t *haddr = g2h(addr);
139
140
- helper_retaddr = ra;
141
+ set_helper_retaddr(ra);
142
o0 = ldq_le_p(haddr + 0);
143
o1 = ldq_le_p(haddr + 1);
144
oldv = int128_make128(o0, o1);
145
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr,
146
stq_le_p(haddr + 0, int128_getlo(newv));
147
stq_le_p(haddr + 1, int128_gethi(newv));
148
}
149
- helper_retaddr = 0;
150
+ clear_helper_retaddr();
151
#else
152
int mem_idx = cpu_mmu_index(env, false);
153
TCGMemOpIdx oi0 = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
154
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr,
155
/* ??? Enforce alignment. */
156
uint64_t *haddr = g2h(addr);
157
158
- helper_retaddr = ra;
159
+ set_helper_retaddr(ra);
160
o1 = ldq_be_p(haddr + 0);
161
o0 = ldq_be_p(haddr + 1);
162
oldv = int128_make128(o0, o1);
163
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr,
164
stq_be_p(haddr + 0, int128_gethi(newv));
165
stq_be_p(haddr + 1, int128_getlo(newv));
166
}
167
- helper_retaddr = 0;
168
+ clear_helper_retaddr();
169
#else
170
int mem_idx = cpu_mmu_index(env, false);
171
TCGMemOpIdx oi0 = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx);
172
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
173
index XXXXXXX..XXXXXXX 100644
174
--- a/target/arm/sve_helper.c
175
+++ b/target/arm/sve_helper.c
176
@@ -XXX,XX +XXX,XX @@ static intptr_t max_for_page(target_ulong base, intptr_t mem_off,
177
return MIN(split, mem_max - mem_off) + mem_off;
178
}
179
180
-static inline void set_helper_retaddr(uintptr_t ra)
181
-{
182
-#ifdef CONFIG_USER_ONLY
183
- helper_retaddr = ra;
184
+#ifndef CONFIG_USER_ONLY
185
+/* These are normally defined only for CONFIG_USER_ONLY in <exec/cpu_ldst.h> */
186
+static inline void set_helper_retaddr(uintptr_t ra) { }
187
+static inline void clear_helper_retaddr(void) { }
188
#endif
189
-}
190
191
/*
192
* The result of tlb_vaddr_to_host for user-only is just g2h(x),
193
@@ -XXX,XX +XXX,XX @@ static void sve_ld1_r(CPUARMState *env, void *vg, const target_ulong addr,
194
if (test_host_page(host)) {
195
mem_off = host_fn(vd, vg, host - mem_off, mem_off, mem_max);
196
tcg_debug_assert(mem_off == mem_max);
197
- set_helper_retaddr(0);
198
+ clear_helper_retaddr();
199
/* After having taken any fault, zero leading inactive elements. */
200
swap_memzero(vd, reg_off);
201
return;
202
@@ -XXX,XX +XXX,XX @@ static void sve_ld1_r(CPUARMState *env, void *vg, const target_ulong addr,
203
}
204
#endif
205
206
- set_helper_retaddr(0);
207
+ clear_helper_retaddr();
208
memcpy(vd, &scratch, reg_max);
209
}
210
211
@@ -XXX,XX +XXX,XX @@ static void sve_ld2_r(CPUARMState *env, void *vg, target_ulong addr,
212
addr += 2 * size;
213
} while (i & 15);
214
}
215
- set_helper_retaddr(0);
216
+ clear_helper_retaddr();
217
218
/* Wait until all exceptions have been raised to write back. */
219
memcpy(&env->vfp.zregs[rd], &scratch[0], oprsz);
220
@@ -XXX,XX +XXX,XX @@ static void sve_ld3_r(CPUARMState *env, void *vg, target_ulong addr,
221
addr += 3 * size;
222
} while (i & 15);
223
}
224
- set_helper_retaddr(0);
225
+ clear_helper_retaddr();
226
227
/* Wait until all exceptions have been raised to write back. */
228
memcpy(&env->vfp.zregs[rd], &scratch[0], oprsz);
229
@@ -XXX,XX +XXX,XX @@ static void sve_ld4_r(CPUARMState *env, void *vg, target_ulong addr,
230
addr += 4 * size;
231
} while (i & 15);
232
}
233
- set_helper_retaddr(0);
234
+ clear_helper_retaddr();
235
236
/* Wait until all exceptions have been raised to write back. */
237
memcpy(&env->vfp.zregs[rd], &scratch[0], oprsz);
238
@@ -XXX,XX +XXX,XX @@ static void sve_ldff1_r(CPUARMState *env, void *vg, const target_ulong addr,
239
if (test_host_page(host)) {
240
mem_off = host_fn(vd, vg, host - mem_off, mem_off, mem_max);
241
tcg_debug_assert(mem_off == mem_max);
242
- set_helper_retaddr(0);
243
+ clear_helper_retaddr();
244
/* After any fault, zero any leading inactive elements. */
245
swap_memzero(vd, reg_off);
246
return;
247
@@ -XXX,XX +XXX,XX @@ static void sve_ldff1_r(CPUARMState *env, void *vg, const target_ulong addr,
248
}
249
#endif
250
251
- set_helper_retaddr(0);
252
+ clear_helper_retaddr();
253
record_fault(env, reg_off, reg_max);
254
}
255
256
@@ -XXX,XX +XXX,XX @@ static void sve_st1_r(CPUARMState *env, void *vg, target_ulong addr,
257
addr += msize;
258
} while (i & 15);
259
}
260
- set_helper_retaddr(0);
261
+ clear_helper_retaddr();
262
}
263
264
static void sve_st2_r(CPUARMState *env, void *vg, target_ulong addr,
265
@@ -XXX,XX +XXX,XX @@ static void sve_st2_r(CPUARMState *env, void *vg, target_ulong addr,
266
addr += 2 * msize;
267
} while (i & 15);
268
}
269
- set_helper_retaddr(0);
270
+ clear_helper_retaddr();
271
}
272
273
static void sve_st3_r(CPUARMState *env, void *vg, target_ulong addr,
274
@@ -XXX,XX +XXX,XX @@ static void sve_st3_r(CPUARMState *env, void *vg, target_ulong addr,
275
addr += 3 * msize;
276
} while (i & 15);
277
}
278
- set_helper_retaddr(0);
279
+ clear_helper_retaddr();
280
}
281
282
static void sve_st4_r(CPUARMState *env, void *vg, target_ulong addr,
283
@@ -XXX,XX +XXX,XX @@ static void sve_st4_r(CPUARMState *env, void *vg, target_ulong addr,
284
addr += 4 * msize;
285
} while (i & 15);
286
}
287
- set_helper_retaddr(0);
288
+ clear_helper_retaddr();
289
}
290
291
#define DO_STN_1(N, NAME, ESIZE) \
292
@@ -XXX,XX +XXX,XX @@ static void sve_ld1_zs(CPUARMState *env, void *vd, void *vg, void *vm,
293
i += 4, pg >>= 4;
294
} while (i & 15);
295
}
296
- set_helper_retaddr(0);
297
+ clear_helper_retaddr();
298
299
/* Wait until all exceptions have been raised to write back. */
300
memcpy(vd, &scratch, oprsz);
301
@@ -XXX,XX +XXX,XX @@ static void sve_ld1_zd(CPUARMState *env, void *vd, void *vg, void *vm,
302
tlb_fn(env, &scratch, i * 8, base + (off << scale), oi, ra);
303
}
304
}
305
- set_helper_retaddr(0);
306
+ clear_helper_retaddr();
307
308
/* Wait until all exceptions have been raised to write back. */
309
memcpy(vd, &scratch, oprsz * 8);
310
@@ -XXX,XX +XXX,XX @@ static inline void sve_ldff1_zs(CPUARMState *env, void *vd, void *vg, void *vm,
311
tlb_fn(env, vd, reg_off, addr, oi, ra);
312
313
/* The rest of the reads will be non-faulting. */
314
- set_helper_retaddr(0);
315
+ clear_helper_retaddr();
316
}
317
318
/* After any fault, zero the leading predicated false elements. */
319
@@ -XXX,XX +XXX,XX @@ static inline void sve_ldff1_zd(CPUARMState *env, void *vd, void *vg, void *vm,
320
tlb_fn(env, vd, reg_off, addr, oi, ra);
321
322
/* The rest of the reads will be non-faulting. */
323
- set_helper_retaddr(0);
324
+ clear_helper_retaddr();
325
}
326
327
/* After any fault, zero the leading predicated false elements. */
328
@@ -XXX,XX +XXX,XX @@ static void sve_st1_zs(CPUARMState *env, void *vd, void *vg, void *vm,
329
i += 4, pg >>= 4;
330
} while (i & 15);
331
}
332
- set_helper_retaddr(0);
333
+ clear_helper_retaddr();
334
}
335
336
static void sve_st1_zd(CPUARMState *env, void *vd, void *vg, void *vm,
337
@@ -XXX,XX +XXX,XX @@ static void sve_st1_zd(CPUARMState *env, void *vd, void *vg, void *vm,
338
tlb_fn(env, vd, i * 8, base + (off << scale), oi, ra);
339
}
340
}
341
- set_helper_retaddr(0);
342
+ clear_helper_retaddr();
343
}
344
345
#define DO_ST1_ZPZ_S(MEM, OFS) \
346
--
347
2.17.1
348
349
diff view generated by jsdifflib
1
Turn helper_retaddr into a multi-state flag that may now also
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
indicate when we're performing a read on behalf of the translator.
3
In this case, release the mmap_lock before the longjmp back to
4
the main cpu loop, and thereby avoid a failing assert therein.
5
2
6
Fixes: https://bugs.launchpad.net/qemu/+bug/1832353
3
When building using GCC 8.3.0 on loongarch64 (Loongnix) we get:
7
Tested-by: Alex Bennée <alex.bennee@linaro.org>
4
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
In file included from ../linux-user/signal.c:33:
6
../linux-user/host/loongarch64/host-signal.h: In function ‘host_signal_write’:
7
../linux-user/host/loongarch64/host-signal.h:57:9: error: a label can only be part of a statement and a declaration is not a statement
8
uint32_t sel = (insn >> 15) & 0b11111111111;
9
^~~~~~~~
10
11
We don't use the 'sel' variable more than once, so drop it.
12
13
Meson output for the record:
14
15
Host machine cpu family: loongarch64
16
Host machine cpu: loongarch64
17
C compiler for the host machine: cc (gcc 8.3.0 "cc (Loongnix 8.3.0-6.lnd.vec.27) 8.3.0")
18
C linker for the host machine: cc ld.bfd 2.31.1-system
19
20
Fixes: ad812c3bd65 ("linux-user: Implement CPU-specific signal handler for loongarch64 hosts")
21
Reported-by: Song Gao <gaosong@loongson.cn>
22
Suggested-by: Song Gao <gaosong@loongson.cn>
23
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
24
Reviewed-by: WANG Xuerui <git@xen0n.name>
25
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
26
Message-Id: <20220104215027.2180972-1-f4bug@amsat.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
27
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
28
---
11
include/exec/cpu_ldst_useronly_template.h | 20 +++++--
29
linux-user/host/loongarch64/host-signal.h | 4 +---
12
accel/tcg/user-exec.c | 66 ++++++++++++++++-------
30
1 file changed, 1 insertion(+), 3 deletions(-)
13
2 files changed, 63 insertions(+), 23 deletions(-)
14
31
15
diff --git a/include/exec/cpu_ldst_useronly_template.h b/include/exec/cpu_ldst_useronly_template.h
32
diff --git a/linux-user/host/loongarch64/host-signal.h b/linux-user/host/loongarch64/host-signal.h
16
index XXXXXXX..XXXXXXX 100644
33
index XXXXXXX..XXXXXXX 100644
17
--- a/include/exec/cpu_ldst_useronly_template.h
34
--- a/linux-user/host/loongarch64/host-signal.h
18
+++ b/include/exec/cpu_ldst_useronly_template.h
35
+++ b/linux-user/host/loongarch64/host-signal.h
19
@@ -XXX,XX +XXX,XX @@
36
@@ -XXX,XX +XXX,XX @@ static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
20
static inline RES_TYPE
37
}
21
glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr)
38
break;
22
{
39
case 0b001110: /* indexed, atomic, bounds-checking memory operations */
23
-#if !defined(CODE_ACCESS)
40
- uint32_t sel = (insn >> 15) & 0b11111111111;
24
+#ifdef CODE_ACCESS
41
-
25
+ RES_TYPE ret;
42
- switch (sel) {
26
+ set_helper_retaddr(1);
43
+ switch ((insn >> 15) & 0b11111111111) {
27
+ ret = glue(glue(ld, USUFFIX), _p)(g2h(ptr));
44
case 0b00000100000: /* stx.b */
28
+ clear_helper_retaddr();
45
case 0b00000101000: /* stx.h */
29
+ return ret;
46
case 0b00000110000: /* stx.w */
30
+#else
31
trace_guest_mem_before_exec(
32
env_cpu(env), ptr,
33
trace_mem_build_info(SHIFT, false, MO_TE, false));
34
-#endif
35
return glue(glue(ld, USUFFIX), _p)(g2h(ptr));
36
+#endif
37
}
38
39
#ifndef CODE_ACCESS
40
@@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
41
static inline int
42
glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr)
43
{
44
-#if !defined(CODE_ACCESS)
45
+#ifdef CODE_ACCESS
46
+ int ret;
47
+ set_helper_retaddr(1);
48
+ ret = glue(glue(lds, SUFFIX), _p)(g2h(ptr));
49
+ clear_helper_retaddr();
50
+ return ret;
51
+#else
52
trace_guest_mem_before_exec(
53
env_cpu(env), ptr,
54
trace_mem_build_info(SHIFT, true, MO_TE, false));
55
-#endif
56
return glue(glue(lds, SUFFIX), _p)(g2h(ptr));
57
+#endif
58
}
59
60
#ifndef CODE_ACCESS
61
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
62
index XXXXXXX..XXXXXXX 100644
63
--- a/accel/tcg/user-exec.c
64
+++ b/accel/tcg/user-exec.c
65
@@ -XXX,XX +XXX,XX @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info,
66
CPUState *cpu = current_cpu;
67
CPUClass *cc;
68
unsigned long address = (unsigned long)info->si_addr;
69
- MMUAccessType access_type;
70
+ MMUAccessType access_type = is_write ? MMU_DATA_STORE : MMU_DATA_LOAD;
71
72
- /* We must handle PC addresses from two different sources:
73
- * a call return address and a signal frame address.
74
- *
75
- * Within cpu_restore_state_from_tb we assume the former and adjust
76
- * the address by -GETPC_ADJ so that the address is within the call
77
- * insn so that addr does not accidentally match the beginning of the
78
- * next guest insn.
79
- *
80
- * However, when the PC comes from the signal frame, it points to
81
- * the actual faulting host insn and not a call insn. Subtracting
82
- * GETPC_ADJ in that case may accidentally match the previous guest insn.
83
- *
84
- * So for the later case, adjust forward to compensate for what
85
- * will be done later by cpu_restore_state_from_tb.
86
- */
87
- if (helper_retaddr) {
88
+ switch (helper_retaddr) {
89
+ default:
90
+ /*
91
+ * Fault during host memory operation within a helper function.
92
+ * The helper's host return address, saved here, gives us a
93
+ * pointer into the generated code that will unwind to the
94
+ * correct guest pc.
95
+ */
96
pc = helper_retaddr;
97
- } else {
98
+ break;
99
+
100
+ case 0:
101
+ /*
102
+ * Fault during host memory operation within generated code.
103
+ * (Or, a unrelated bug within qemu, but we can't tell from here).
104
+ *
105
+ * We take the host pc from the signal frame. However, we cannot
106
+ * use that value directly. Within cpu_restore_state_from_tb, we
107
+ * assume PC comes from GETPC(), as used by the helper functions,
108
+ * so we adjust the address by -GETPC_ADJ to form an address that
109
+ * is within the call insn, so that the address does not accidentially
110
+ * match the beginning of the next guest insn. However, when the
111
+ * pc comes from the signal frame it points to the actual faulting
112
+ * host memory insn and not the return from a call insn.
113
+ *
114
+ * Therefore, adjust to compensate for what will be done later
115
+ * by cpu_restore_state_from_tb.
116
+ */
117
pc += GETPC_ADJ;
118
+ break;
119
+
120
+ case 1:
121
+ /*
122
+ * Fault during host read for translation, or loosely, "execution".
123
+ *
124
+ * The guest pc is already pointing to the start of the TB for which
125
+ * code is being generated. If the guest translator manages the
126
+ * page crossings correctly, this is exactly the correct address
127
+ * (and if the translator doesn't handle page boundaries correctly
128
+ * there's little we can do about that here). Therefore, do not
129
+ * trigger the unwinder.
130
+ *
131
+ * Like tb_gen_code, release the memory lock before cpu_loop_exit.
132
+ */
133
+ pc = 0;
134
+ access_type = MMU_INST_FETCH;
135
+ mmap_unlock();
136
+ break;
137
}
138
139
/* For synchronous signals we expect to be coming from the vCPU
140
@@ -XXX,XX +XXX,XX @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info,
141
clear_helper_retaddr();
142
143
cc = CPU_GET_CLASS(cpu);
144
- access_type = is_write ? MMU_DATA_STORE : MMU_DATA_LOAD;
145
cc->tlb_fill(cpu, address, 0, access_type, MMU_USER_IDX, false, pc);
146
g_assert_not_reached();
147
}
148
--
47
--
149
2.17.1
48
2.25.1
150
49
151
50
diff view generated by jsdifflib
1
These functions are not used, and are not usable in the
1
From: Xiaoyao Li <xiaoyao.li@intel.com>
2
context of code generation, because we never have a helper
3
return address to pass in to them.
4
2
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Remove qemu_run_machine_init_done_notifiers() since no implementation
4
and user.
5
6
Fixes: f66dc8737c9 ("vl: move all generic initialization out of vl.c")
7
Signed-off-by: Xiaoyao Li <xiaoyao.li@intel.com>
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Message-Id: <20220104024136.1433545-1-xiaoyao.li@intel.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
11
---
8
include/exec/cpu_ldst_useronly_template.h | 6 +++++-
12
include/sysemu/sysemu.h | 1 -
9
1 file changed, 5 insertions(+), 1 deletion(-)
13
1 file changed, 1 deletion(-)
10
14
11
diff --git a/include/exec/cpu_ldst_useronly_template.h b/include/exec/cpu_ldst_useronly_template.h
15
diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h
12
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
13
--- a/include/exec/cpu_ldst_useronly_template.h
17
--- a/include/sysemu/sysemu.h
14
+++ b/include/exec/cpu_ldst_useronly_template.h
18
+++ b/include/sysemu/sysemu.h
15
@@ -XXX,XX +XXX,XX @@ glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr)
19
@@ -XXX,XX +XXX,XX @@ extern bool qemu_uuid_set;
16
return glue(glue(ld, USUFFIX), _p)(g2h(ptr));
20
void qemu_add_exit_notifier(Notifier *notify);
17
}
21
void qemu_remove_exit_notifier(Notifier *notify);
18
22
19
+#ifndef CODE_ACCESS
23
-void qemu_run_machine_init_done_notifiers(void);
20
static inline RES_TYPE
24
void qemu_add_machine_init_done_notifier(Notifier *notify);
21
glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
25
void qemu_remove_machine_init_done_notifier(Notifier *notify);
22
abi_ptr ptr,
26
23
@@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
24
clear_helper_retaddr();
25
return ret;
26
}
27
+#endif
28
29
#if DATA_SIZE <= 2
30
static inline int
31
@@ -XXX,XX +XXX,XX @@ glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr)
32
return glue(glue(lds, SUFFIX), _p)(g2h(ptr));
33
}
34
35
+#ifndef CODE_ACCESS
36
static inline int
37
glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
38
abi_ptr ptr,
39
@@ -XXX,XX +XXX,XX @@ glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
40
clear_helper_retaddr();
41
return ret;
42
}
43
-#endif
44
+#endif /* CODE_ACCESS */
45
+#endif /* DATA_SIZE <= 2 */
46
47
#ifndef CODE_ACCESS
48
static inline void
49
--
27
--
50
2.17.1
28
2.25.1
51
29
52
30
diff view generated by jsdifflib
1
This code block is already surrounded by #ifndef CODE_ACCESS.
1
For the ABIs in which the syscall return register is not
2
also the first function argument register, move the errno
3
value into the correct place.
2
4
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Fixes: a3310c0397e2 ("linux-user: Move syscall error detection into safe_syscall_base")
6
Reported-by: Laurent Vivier <laurent@vivier.eu>
7
Tested-by: Laurent Vivier <laurent@vivier.eu>
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-Id: <20220104190454.542225-1-richard.henderson@linaro.org>
5
---
11
---
6
include/exec/cpu_ldst_useronly_template.h | 2 --
12
common-user/host/i386/safe-syscall.inc.S | 1 +
7
1 file changed, 2 deletions(-)
13
common-user/host/mips/safe-syscall.inc.S | 1 +
14
common-user/host/x86_64/safe-syscall.inc.S | 1 +
15
3 files changed, 3 insertions(+)
8
16
9
diff --git a/include/exec/cpu_ldst_useronly_template.h b/include/exec/cpu_ldst_useronly_template.h
17
diff --git a/common-user/host/i386/safe-syscall.inc.S b/common-user/host/i386/safe-syscall.inc.S
10
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
11
--- a/include/exec/cpu_ldst_useronly_template.h
19
--- a/common-user/host/i386/safe-syscall.inc.S
12
+++ b/include/exec/cpu_ldst_useronly_template.h
20
+++ b/common-user/host/i386/safe-syscall.inc.S
13
@@ -XXX,XX +XXX,XX @@ static inline void
21
@@ -XXX,XX +XXX,XX @@ safe_syscall_end:
14
glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr,
22
pop %ebp
15
RES_TYPE v)
23
.cfi_adjust_cfa_offset -4
16
{
24
.cfi_restore ebp
17
-#if !defined(CODE_ACCESS)
25
+ mov %eax, (%esp)
18
trace_guest_mem_before_exec(
26
jmp safe_syscall_set_errno_tail
19
env_cpu(env), ptr,
27
20
trace_mem_build_info(SHIFT, false, MO_TE, true));
28
.cfi_endproc
21
-#endif
29
diff --git a/common-user/host/mips/safe-syscall.inc.S b/common-user/host/mips/safe-syscall.inc.S
22
glue(glue(st, SUFFIX), _p)(g2h(ptr), v);
30
index XXXXXXX..XXXXXXX 100644
23
}
31
--- a/common-user/host/mips/safe-syscall.inc.S
32
+++ b/common-user/host/mips/safe-syscall.inc.S
33
@@ -XXX,XX +XXX,XX @@ safe_syscall_end:
34
1: USE_ALT_CP(t0)
35
SETUP_GPX(t1)
36
SETUP_GPX64(t0, t1)
37
+ move a0, v0
38
PTR_LA t9, safe_syscall_set_errno_tail
39
jr t9
40
41
diff --git a/common-user/host/x86_64/safe-syscall.inc.S b/common-user/host/x86_64/safe-syscall.inc.S
42
index XXXXXXX..XXXXXXX 100644
43
--- a/common-user/host/x86_64/safe-syscall.inc.S
44
+++ b/common-user/host/x86_64/safe-syscall.inc.S
45
@@ -XXX,XX +XXX,XX @@ safe_syscall_end:
46
1: pop %rbp
47
.cfi_def_cfa_offset 8
48
.cfi_restore rbp
49
+ mov %eax, %edi
50
jmp safe_syscall_set_errno_tail
51
.cfi_endproc
24
52
25
--
53
--
26
2.17.1
54
2.25.1
27
55
28
56
diff view generated by jsdifflib