1
The following changes since commit 1cbd2d914939ee6028e9688d4ba859a528c28405:
1
The following changes since commit 6587b0c1331d427b0939c37e763842550ed581db:
2
2
3
Merge remote-tracking branch 'remotes/jasowang/tags/net-pull-request' into staging (2021-06-04 13:38:49 +0100)
3
Merge remote-tracking branch 'remotes/ericb/tags/pull-nbd-2021-10-15' into staging (2021-10-15 14:16:28 -0700)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20210604
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20211016
8
8
9
for you to fetch changes up to 0006039e29b9e6118beab300146f7c4931f7a217:
9
for you to fetch changes up to 995b87dedc78b0467f5f18bbc3546072ba97516a:
10
10
11
tcg/arm: Implement TCG_TARGET_HAS_rotv_vec (2021-06-04 11:50:11 -0700)
11
Revert "cpu: Move cpu_common_props to hw/core/cpu.c" (2021-10-15 16:39:15 -0700)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Host vector support for arm neon.
14
Move gdb singlestep to generic code
15
Fix cpu_common_props
15
16
16
----------------------------------------------------------------
17
----------------------------------------------------------------
17
Richard Henderson (15):
18
Richard Henderson (24):
18
tcg: Change parameters for tcg_target_const_match
19
accel/tcg: Handle gdb singlestep in cpu_tb_exec
19
tcg/arm: Add host vector framework
20
target/alpha: Drop checks for singlestep_enabled
20
tcg/arm: Implement tcg_out_ld/st for vector types
21
target/avr: Drop checks for singlestep_enabled
21
tcg/arm: Implement tcg_out_mov for vector types
22
target/cris: Drop checks for singlestep_enabled
22
tcg/arm: Implement tcg_out_dup*_vec
23
target/hexagon: Drop checks for singlestep_enabled
23
tcg/arm: Implement minimal vector operations
24
target/arm: Drop checks for singlestep_enabled
24
tcg/arm: Implement andc, orc, abs, neg, not vector operations
25
target/hppa: Drop checks for singlestep_enabled
25
tcg/arm: Implement TCG_TARGET_HAS_shi_vec
26
target/i386: Check CF_NO_GOTO_TB for dc->jmp_opt
26
tcg/arm: Implement TCG_TARGET_HAS_mul_vec
27
target/i386: Drop check for singlestep_enabled
27
tcg/arm: Implement TCG_TARGET_HAS_sat_vec
28
target/m68k: Drop checks for singlestep_enabled
28
tcg/arm: Implement TCG_TARGET_HAS_minmax_vec
29
target/microblaze: Check CF_NO_GOTO_TB for DISAS_JUMP
29
tcg/arm: Implement TCG_TARGET_HAS_bitsel_vec
30
target/microblaze: Drop checks for singlestep_enabled
30
tcg/arm: Implement TCG_TARGET_HAS_shv_vec
31
target/mips: Fix single stepping
31
tcg/arm: Implement TCG_TARGET_HAS_roti_vec
32
target/mips: Drop exit checks for singlestep_enabled
32
tcg/arm: Implement TCG_TARGET_HAS_rotv_vec
33
target/openrisc: Drop checks for singlestep_enabled
34
target/ppc: Drop exit checks for singlestep_enabled
35
target/riscv: Remove dead code after exception
36
target/riscv: Remove exit_tb and lookup_and_goto_ptr
37
target/rx: Drop checks for singlestep_enabled
38
target/s390x: Drop check for singlestep_enabled
39
target/sh4: Drop check for singlestep_enabled
40
target/tricore: Drop check for singlestep_enabled
41
target/xtensa: Drop check for singlestep_enabled
42
Revert "cpu: Move cpu_common_props to hw/core/cpu.c"
33
43
34
tcg/arm/tcg-target-con-set.h | 10 +
44
include/hw/core/cpu.h | 1 +
35
tcg/arm/tcg-target-con-str.h | 3 +
45
target/i386/helper.h | 1 -
36
tcg/arm/tcg-target.h | 52 ++-
46
target/rx/helper.h | 1 -
37
tcg/arm/tcg-target.opc.h | 16 +
47
target/sh4/helper.h | 1 -
38
tcg/tcg.c | 5 +-
48
target/tricore/helper.h | 1 -
39
tcg/aarch64/tcg-target.c.inc | 5 +-
49
accel/tcg/cpu-exec.c | 11 ++++
40
tcg/arm/tcg-target.c.inc | 956 +++++++++++++++++++++++++++++++++++++++++--
50
cpu.c | 21 ++++++++
41
tcg/i386/tcg-target.c.inc | 4 +-
51
hw/core/cpu-common.c | 17 +-----
42
tcg/mips/tcg-target.c.inc | 5 +-
52
target/alpha/translate.c | 13 ++---
43
tcg/ppc/tcg-target.c.inc | 4 +-
53
target/arm/translate-a64.c | 10 +---
44
tcg/riscv/tcg-target.c.inc | 4 +-
54
target/arm/translate.c | 36 +++----------
45
tcg/s390/tcg-target.c.inc | 5 +-
55
target/avr/translate.c | 19 ++-----
46
tcg/sparc/tcg-target.c.inc | 5 +-
56
target/cris/translate.c | 16 ------
47
tcg/tci/tcg-target.c.inc | 6 +-
57
target/hexagon/translate.c | 12 +----
48
14 files changed, 1001 insertions(+), 79 deletions(-)
58
target/hppa/translate.c | 17 ++----
49
create mode 100644 tcg/arm/tcg-target.opc.h
59
target/i386/tcg/misc_helper.c | 8 ---
60
target/i386/tcg/translate.c | 9 ++--
61
target/m68k/translate.c | 44 ++++-----------
62
target/microblaze/translate.c | 18 ++-----
63
target/mips/tcg/translate.c | 75 ++++++++++++--------------
64
target/openrisc/translate.c | 18 ++-----
65
target/ppc/translate.c | 38 +++----------
66
target/riscv/translate.c | 27 +---------
67
target/rx/op_helper.c | 8 ---
68
target/rx/translate.c | 12 +----
69
target/s390x/tcg/translate.c | 8 +--
70
target/sh4/op_helper.c | 5 --
71
target/sh4/translate.c | 14 ++---
72
target/tricore/op_helper.c | 7 ---
73
target/tricore/translate.c | 14 +----
74
target/xtensa/translate.c | 25 +++------
75
target/riscv/insn_trans/trans_privileged.c.inc | 10 ++--
76
target/riscv/insn_trans/trans_rvi.c.inc | 8 ++-
77
target/riscv/insn_trans/trans_rvv.c.inc | 2 +-
78
34 files changed, 141 insertions(+), 386 deletions(-)
50
79
diff view generated by jsdifflib
1
Add registers and function stubs. The functionality
1
Currently the change in cpu_tb_exec is masked by the debug exception
2
is disabled via use_neon_instructions defined to 0.
2
being raised by the translators. But this allows us to remove that code.
3
3
4
We must still include results for the mandatory opcodes in
5
tcg_target_op_def, as all opcodes are checked during tcg init.
6
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
5
---
10
tcg/arm/tcg-target-con-set.h | 4 ++
6
accel/tcg/cpu-exec.c | 11 +++++++++++
11
tcg/arm/tcg-target-con-str.h | 1 +
7
1 file changed, 11 insertions(+)
12
tcg/arm/tcg-target.h | 48 ++++++++++++--
13
tcg/arm/tcg-target.opc.h | 12 ++++
14
tcg/arm/tcg-target.c.inc | 117 +++++++++++++++++++++++++++++------
15
5 files changed, 158 insertions(+), 24 deletions(-)
16
create mode 100644 tcg/arm/tcg-target.opc.h
17
8
18
diff --git a/tcg/arm/tcg-target-con-set.h b/tcg/arm/tcg-target-con-set.h
9
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
19
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
20
--- a/tcg/arm/tcg-target-con-set.h
11
--- a/accel/tcg/cpu-exec.c
21
+++ b/tcg/arm/tcg-target-con-set.h
12
+++ b/accel/tcg/cpu-exec.c
22
@@ -XXX,XX +XXX,XX @@ C_O0_I1(r)
13
@@ -XXX,XX +XXX,XX @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
23
C_O0_I2(r, r)
14
cc->set_pc(cpu, last_tb->pc);
24
C_O0_I2(r, rIN)
25
C_O0_I2(s, s)
26
+C_O0_I2(w, r)
27
C_O0_I3(s, s, s)
28
C_O0_I4(r, r, rI, rI)
29
C_O0_I4(s, s, s, s)
30
C_O1_I1(r, l)
31
C_O1_I1(r, r)
32
+C_O1_I1(w, r)
33
+C_O1_I1(w, wr)
34
C_O1_I2(r, 0, rZ)
35
C_O1_I2(r, l, l)
36
C_O1_I2(r, r, r)
37
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, r, rIK)
38
C_O1_I2(r, r, rIN)
39
C_O1_I2(r, r, ri)
40
C_O1_I2(r, rZ, rZ)
41
+C_O1_I2(w, w, w)
42
C_O1_I4(r, r, r, rI, rI)
43
C_O1_I4(r, r, rIN, rIK, 0)
44
C_O2_I1(r, r, l)
45
diff --git a/tcg/arm/tcg-target-con-str.h b/tcg/arm/tcg-target-con-str.h
46
index XXXXXXX..XXXXXXX 100644
47
--- a/tcg/arm/tcg-target-con-str.h
48
+++ b/tcg/arm/tcg-target-con-str.h
49
@@ -XXX,XX +XXX,XX @@
50
REGS('r', ALL_GENERAL_REGS)
51
REGS('l', ALL_QLOAD_REGS)
52
REGS('s', ALL_QSTORE_REGS)
53
+REGS('w', ALL_VECTOR_REGS)
54
55
/*
56
* Define constraint letters for constants:
57
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
58
index XXXXXXX..XXXXXXX 100644
59
--- a/tcg/arm/tcg-target.h
60
+++ b/tcg/arm/tcg-target.h
61
@@ -XXX,XX +XXX,XX @@ typedef enum {
62
TCG_REG_R13,
63
TCG_REG_R14,
64
TCG_REG_PC,
65
+
66
+ TCG_REG_Q0,
67
+ TCG_REG_Q1,
68
+ TCG_REG_Q2,
69
+ TCG_REG_Q3,
70
+ TCG_REG_Q4,
71
+ TCG_REG_Q5,
72
+ TCG_REG_Q6,
73
+ TCG_REG_Q7,
74
+ TCG_REG_Q8,
75
+ TCG_REG_Q9,
76
+ TCG_REG_Q10,
77
+ TCG_REG_Q11,
78
+ TCG_REG_Q12,
79
+ TCG_REG_Q13,
80
+ TCG_REG_Q14,
81
+ TCG_REG_Q15,
82
+
83
+ TCG_AREG0 = TCG_REG_R6,
84
+ TCG_REG_CALL_STACK = TCG_REG_R13,
85
} TCGReg;
86
87
-#define TCG_TARGET_NB_REGS 16
88
+#define TCG_TARGET_NB_REGS 32
89
90
#ifdef __ARM_ARCH_EXT_IDIV__
91
#define use_idiv_instructions 1
92
#else
93
extern bool use_idiv_instructions;
94
#endif
95
-
96
+#define use_neon_instructions 0
97
98
/* used for function call generation */
99
-#define TCG_REG_CALL_STACK        TCG_REG_R13
100
#define TCG_TARGET_STACK_ALIGN        8
101
#define TCG_TARGET_CALL_ALIGN_ARGS    1
102
#define TCG_TARGET_CALL_STACK_OFFSET    0
103
@@ -XXX,XX +XXX,XX @@ extern bool use_idiv_instructions;
104
#define TCG_TARGET_HAS_direct_jump 0
105
#define TCG_TARGET_HAS_qemu_st8_i32 0
106
107
-enum {
108
- TCG_AREG0 = TCG_REG_R6,
109
-};
110
+#define TCG_TARGET_HAS_v64 use_neon_instructions
111
+#define TCG_TARGET_HAS_v128 use_neon_instructions
112
+#define TCG_TARGET_HAS_v256 0
113
+
114
+#define TCG_TARGET_HAS_andc_vec 0
115
+#define TCG_TARGET_HAS_orc_vec 0
116
+#define TCG_TARGET_HAS_not_vec 0
117
+#define TCG_TARGET_HAS_neg_vec 0
118
+#define TCG_TARGET_HAS_abs_vec 0
119
+#define TCG_TARGET_HAS_roti_vec 0
120
+#define TCG_TARGET_HAS_rots_vec 0
121
+#define TCG_TARGET_HAS_rotv_vec 0
122
+#define TCG_TARGET_HAS_shi_vec 0
123
+#define TCG_TARGET_HAS_shs_vec 0
124
+#define TCG_TARGET_HAS_shv_vec 0
125
+#define TCG_TARGET_HAS_mul_vec 0
126
+#define TCG_TARGET_HAS_sat_vec 0
127
+#define TCG_TARGET_HAS_minmax_vec 0
128
+#define TCG_TARGET_HAS_bitsel_vec 0
129
+#define TCG_TARGET_HAS_cmpsel_vec 0
130
131
#define TCG_TARGET_DEFAULT_MO (0)
132
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
133
diff --git a/tcg/arm/tcg-target.opc.h b/tcg/arm/tcg-target.opc.h
134
new file mode 100644
135
index XXXXXXX..XXXXXXX
136
--- /dev/null
137
+++ b/tcg/arm/tcg-target.opc.h
138
@@ -XXX,XX +XXX,XX @@
139
+/*
140
+ * Copyright (c) 2019 Linaro
141
+ *
142
+ * This work is licensed under the terms of the GNU GPL, version 2 or
143
+ * (at your option) any later version.
144
+ *
145
+ * See the COPYING file in the top-level directory for details.
146
+ *
147
+ * Target-specific opcodes for host vector expansion. These will be
148
+ * emitted by tcg_expand_vec_op. For those familiar with GCC internals,
149
+ * consider these to be UNSPEC with names.
150
+ */
151
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
152
index XXXXXXX..XXXXXXX 100644
153
--- a/tcg/arm/tcg-target.c.inc
154
+++ b/tcg/arm/tcg-target.c.inc
155
@@ -XXX,XX +XXX,XX @@ bool use_idiv_instructions;
156
157
#ifdef CONFIG_DEBUG_TCG
158
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
159
- "%r0",
160
- "%r1",
161
- "%r2",
162
- "%r3",
163
- "%r4",
164
- "%r5",
165
- "%r6",
166
- "%r7",
167
- "%r8",
168
- "%r9",
169
- "%r10",
170
- "%r11",
171
- "%r12",
172
- "%r13",
173
- "%r14",
174
- "%pc",
175
+ "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
176
+ "%r8", "%r9", "%r10", "%r11", "%r12", "%sp", "%r14", "%pc",
177
+ "%q0", "%q1", "%q2", "%q3", "%q4", "%q5", "%q6", "%q7",
178
+ "%q8", "%q9", "%q10", "%q11", "%q12", "%q13", "%q14", "%q15",
179
};
180
#endif
181
182
@@ -XXX,XX +XXX,XX @@ static const int tcg_target_reg_alloc_order[] = {
183
TCG_REG_R3,
184
TCG_REG_R12,
185
TCG_REG_R14,
186
+
187
+ TCG_REG_Q0,
188
+ TCG_REG_Q1,
189
+ TCG_REG_Q2,
190
+ TCG_REG_Q3,
191
+ /* Q4 - Q7 are call-saved, and skipped. */
192
+ TCG_REG_Q8,
193
+ TCG_REG_Q9,
194
+ TCG_REG_Q10,
195
+ TCG_REG_Q11,
196
+ TCG_REG_Q12,
197
+ TCG_REG_Q13,
198
+ TCG_REG_Q14,
199
+ TCG_REG_Q15,
200
};
201
202
static const int tcg_target_call_iarg_regs[4] = {
203
@@ -XXX,XX +XXX,XX @@ static const int tcg_target_call_oarg_regs[2] = {
204
};
205
206
#define TCG_REG_TMP TCG_REG_R12
207
+#define TCG_VEC_TMP TCG_REG_Q15
208
209
enum arm_cond_code_e {
210
COND_EQ = 0x0,
211
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
212
#define TCG_CT_CONST_ZERO 0x800
213
214
#define ALL_GENERAL_REGS 0xffffu
215
+#define ALL_VECTOR_REGS 0xffff0000u
216
217
/*
218
* r0-r2 will be overwritten when reading the tlb entry (softmmu only)
219
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
220
case INDEX_op_qemu_st_i64:
221
return TARGET_LONG_BITS == 32 ? C_O0_I3(s, s, s) : C_O0_I4(s, s, s, s);
222
223
+ case INDEX_op_st_vec:
224
+ return C_O0_I2(w, r);
225
+ case INDEX_op_ld_vec:
226
+ case INDEX_op_dupm_vec:
227
+ return C_O1_I1(w, r);
228
+ case INDEX_op_dup_vec:
229
+ return C_O1_I1(w, wr);
230
+ case INDEX_op_dup2_vec:
231
+ case INDEX_op_add_vec:
232
+ case INDEX_op_sub_vec:
233
+ case INDEX_op_xor_vec:
234
+ case INDEX_op_or_vec:
235
+ case INDEX_op_and_vec:
236
+ case INDEX_op_cmp_vec:
237
+ return C_O1_I2(w, w, w);
238
+
239
default:
240
g_assert_not_reached();
241
}
242
@@ -XXX,XX +XXX,XX @@ static void tcg_target_init(TCGContext *s)
243
{
244
/* Only probe for the platform and capabilities if we havn't already
245
determined maximum values at compile time. */
246
-#ifndef use_idiv_instructions
247
+#if !defined(use_idiv_instructions) || !defined(use_neon_instructions)
248
{
249
unsigned long hwcap = qemu_getauxval(AT_HWCAP);
250
+#ifndef use_idiv_instructions
251
use_idiv_instructions = (hwcap & HWCAP_ARM_IDIVA) != 0;
252
+#endif
253
+#ifndef use_neon_instructions
254
+ use_neon_instructions = (hwcap & HWCAP_ARM_NEON) != 0;
255
+#endif
256
}
257
#endif
258
+
259
if (__ARM_ARCH < 7) {
260
const char *pl = (const char *)qemu_getauxval(AT_PLATFORM);
261
if (pl != NULL && pl[0] == 'v' && pl[1] >= '4' && pl[1] <= '9') {
262
@@ -XXX,XX +XXX,XX @@ static void tcg_target_init(TCGContext *s)
263
}
15
}
264
}
16
}
265
266
- tcg_target_available_regs[TCG_TYPE_I32] = 0xffff;
267
+ tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
268
269
tcg_target_call_clobber_regs = 0;
270
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
271
@@ -XXX,XX +XXX,XX @@ static void tcg_target_init(TCGContext *s)
272
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12);
273
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
274
275
+ if (use_neon_instructions) {
276
+ tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS;
277
+ tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
278
+
17
+
279
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q0);
18
+ /*
280
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q1);
19
+ * If gdb single-step, and we haven't raised another exception,
281
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q2);
20
+ * raise a debug exception. Single-step with another exception
282
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q3);
21
+ * is handled in cpu_handle_exception.
283
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q8);
22
+ */
284
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q9);
23
+ if (unlikely(cpu->singlestep_enabled) && cpu->exception_index == -1) {
285
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q10);
24
+ cpu->exception_index = EXCP_DEBUG;
286
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q11);
25
+ cpu_loop_exit(cpu);
287
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q12);
288
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q13);
289
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q14);
290
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q15);
291
+ }
26
+ }
292
+
27
+
293
s->reserved_regs = 0;
28
return last_tb;
294
tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
295
tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
296
tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC);
297
+ tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP);
298
}
29
}
299
30
300
static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
301
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_movi(TCGContext *s, TCGType type,
302
tcg_out_movi32(s, COND_AL, ret, arg);
303
}
304
305
+static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
306
+ TCGReg rd, TCGReg rs)
307
+{
308
+ g_assert_not_reached();
309
+}
310
+
311
+static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
312
+ TCGReg rd, TCGReg base, intptr_t offset)
313
+{
314
+ g_assert_not_reached();
315
+}
316
+
317
+static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
318
+ TCGReg rd, int64_t v64)
319
+{
320
+ g_assert_not_reached();
321
+}
322
+
323
+static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
324
+ unsigned vecl, unsigned vece,
325
+ const TCGArg *args, const int *const_args)
326
+{
327
+ g_assert_not_reached();
328
+}
329
+
330
+int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
331
+{
332
+ return 0;
333
+}
334
+
335
+void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
336
+ TCGArg a0, ...)
337
+{
338
+ g_assert_not_reached();
339
+}
340
+
341
static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
342
{
343
int i;
344
--
31
--
345
2.25.1
32
2.25.1
346
33
347
34
diff view generated by jsdifflib
New patch
1
GDB single-stepping is now handled generically.
1
2
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/alpha/translate.c | 13 +++----------
7
1 file changed, 3 insertions(+), 10 deletions(-)
8
9
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/alpha/translate.c
12
+++ b/target/alpha/translate.c
13
@@ -XXX,XX +XXX,XX @@ static void alpha_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
14
tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
15
/* FALLTHRU */
16
case DISAS_PC_UPDATED:
17
- if (!ctx->base.singlestep_enabled) {
18
- tcg_gen_lookup_and_goto_ptr();
19
- break;
20
- }
21
- /* FALLTHRU */
22
+ tcg_gen_lookup_and_goto_ptr();
23
+ break;
24
case DISAS_PC_UPDATED_NOCHAIN:
25
- if (ctx->base.singlestep_enabled) {
26
- gen_excp_1(EXCP_DEBUG, 0);
27
- } else {
28
- tcg_gen_exit_tb(NULL, 0);
29
- }
30
+ tcg_gen_exit_tb(NULL, 0);
31
break;
32
default:
33
g_assert_not_reached();
34
--
35
2.25.1
36
37
diff view generated by jsdifflib
New patch
1
GDB single-stepping is now handled generically.
1
2
3
Tested-by: Michael Rolnik <mrolnik@gmail.com>
4
Reviewed-by: Michael Rolnik <mrolnik@gmail.com>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
target/avr/translate.c | 19 ++++---------------
9
1 file changed, 4 insertions(+), 15 deletions(-)
10
11
diff --git a/target/avr/translate.c b/target/avr/translate.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/avr/translate.c
14
+++ b/target/avr/translate.c
15
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
16
tcg_gen_exit_tb(tb, n);
17
} else {
18
tcg_gen_movi_i32(cpu_pc, dest);
19
- if (ctx->base.singlestep_enabled) {
20
- gen_helper_debug(cpu_env);
21
- } else {
22
- tcg_gen_lookup_and_goto_ptr();
23
- }
24
+ tcg_gen_lookup_and_goto_ptr();
25
}
26
ctx->base.is_jmp = DISAS_NORETURN;
27
}
28
@@ -XXX,XX +XXX,XX @@ static void avr_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
29
tcg_gen_movi_tl(cpu_pc, ctx->npc);
30
/* fall through */
31
case DISAS_LOOKUP:
32
- if (!ctx->base.singlestep_enabled) {
33
- tcg_gen_lookup_and_goto_ptr();
34
- break;
35
- }
36
- /* fall through */
37
+ tcg_gen_lookup_and_goto_ptr();
38
+ break;
39
case DISAS_EXIT:
40
- if (ctx->base.singlestep_enabled) {
41
- gen_helper_debug(cpu_env);
42
- } else {
43
- tcg_gen_exit_tb(NULL, 0);
44
- }
45
+ tcg_gen_exit_tb(NULL, 0);
46
break;
47
default:
48
g_assert_not_reached();
49
--
50
2.25.1
51
52
diff view generated by jsdifflib
New patch
1
GDB single-stepping is now handled generically.
1
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
target/cris/translate.c | 16 ----------------
6
1 file changed, 16 deletions(-)
7
8
diff --git a/target/cris/translate.c b/target/cris/translate.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/target/cris/translate.c
11
+++ b/target/cris/translate.c
12
@@ -XXX,XX +XXX,XX @@ static void cris_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
13
}
14
}
15
16
- if (unlikely(dc->base.singlestep_enabled)) {
17
- switch (is_jmp) {
18
- case DISAS_TOO_MANY:
19
- case DISAS_UPDATE_NEXT:
20
- tcg_gen_movi_tl(env_pc, npc);
21
- /* fall through */
22
- case DISAS_JUMP:
23
- case DISAS_UPDATE:
24
- t_gen_raise_exception(EXCP_DEBUG);
25
- return;
26
- default:
27
- break;
28
- }
29
- g_assert_not_reached();
30
- }
31
-
32
switch (is_jmp) {
33
case DISAS_TOO_MANY:
34
gen_goto_tb(dc, 0, npc);
35
--
36
2.25.1
37
38
diff view generated by jsdifflib
New patch
1
GDB single-stepping is now handled generically.
1
2
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/hexagon/translate.c | 12 ++----------
7
1 file changed, 2 insertions(+), 10 deletions(-)
8
9
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/hexagon/translate.c
12
+++ b/target/hexagon/translate.c
13
@@ -XXX,XX +XXX,XX @@ static void gen_end_tb(DisasContext *ctx)
14
{
15
gen_exec_counters(ctx);
16
tcg_gen_mov_tl(hex_gpr[HEX_REG_PC], hex_next_PC);
17
- if (ctx->base.singlestep_enabled) {
18
- gen_exception_raw(EXCP_DEBUG);
19
- } else {
20
- tcg_gen_exit_tb(NULL, 0);
21
- }
22
+ tcg_gen_exit_tb(NULL, 0);
23
ctx->base.is_jmp = DISAS_NORETURN;
24
}
25
26
@@ -XXX,XX +XXX,XX @@ static void hexagon_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
27
case DISAS_TOO_MANY:
28
gen_exec_counters(ctx);
29
tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], ctx->base.pc_next);
30
- if (ctx->base.singlestep_enabled) {
31
- gen_exception_raw(EXCP_DEBUG);
32
- } else {
33
- tcg_gen_exit_tb(NULL, 0);
34
- }
35
+ tcg_gen_exit_tb(NULL, 0);
36
break;
37
case DISAS_NORETURN:
38
break;
39
--
40
2.25.1
41
42
diff view generated by jsdifflib
New patch
1
GDB single-stepping is now handled generically.
1
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
target/arm/translate-a64.c | 10 ++--------
6
target/arm/translate.c | 36 ++++++------------------------------
7
2 files changed, 8 insertions(+), 38 deletions(-)
8
9
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/arm/translate-a64.c
12
+++ b/target/arm/translate-a64.c
13
@@ -XXX,XX +XXX,XX @@ static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
14
gen_a64_set_pc_im(dest);
15
if (s->ss_active) {
16
gen_step_complete_exception(s);
17
- } else if (s->base.singlestep_enabled) {
18
- gen_exception_internal(EXCP_DEBUG);
19
} else {
20
tcg_gen_lookup_and_goto_ptr();
21
s->base.is_jmp = DISAS_NORETURN;
22
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
23
{
24
DisasContext *dc = container_of(dcbase, DisasContext, base);
25
26
- if (unlikely(dc->base.singlestep_enabled || dc->ss_active)) {
27
+ if (unlikely(dc->ss_active)) {
28
/* Note that this means single stepping WFI doesn't halt the CPU.
29
* For conditional branch insns this is harmless unreachable code as
30
* gen_goto_tb() has already handled emitting the debug exception
31
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
32
/* fall through */
33
case DISAS_EXIT:
34
case DISAS_JUMP:
35
- if (dc->base.singlestep_enabled) {
36
- gen_exception_internal(EXCP_DEBUG);
37
- } else {
38
- gen_step_complete_exception(dc);
39
- }
40
+ gen_step_complete_exception(dc);
41
break;
42
case DISAS_NORETURN:
43
break;
44
diff --git a/target/arm/translate.c b/target/arm/translate.c
45
index XXXXXXX..XXXXXXX 100644
46
--- a/target/arm/translate.c
47
+++ b/target/arm/translate.c
48
@@ -XXX,XX +XXX,XX @@ static void gen_exception_internal(int excp)
49
tcg_temp_free_i32(tcg_excp);
50
}
51
52
-static void gen_step_complete_exception(DisasContext *s)
53
+static void gen_singlestep_exception(DisasContext *s)
54
{
55
/* We just completed step of an insn. Move from Active-not-pending
56
* to Active-pending, and then also take the swstep exception.
57
@@ -XXX,XX +XXX,XX @@ static void gen_step_complete_exception(DisasContext *s)
58
s->base.is_jmp = DISAS_NORETURN;
59
}
60
61
-static void gen_singlestep_exception(DisasContext *s)
62
-{
63
- /* Generate the right kind of exception for singlestep, which is
64
- * either the architectural singlestep or EXCP_DEBUG for QEMU's
65
- * gdb singlestepping.
66
- */
67
- if (s->ss_active) {
68
- gen_step_complete_exception(s);
69
- } else {
70
- gen_exception_internal(EXCP_DEBUG);
71
- }
72
-}
73
-
74
-static inline bool is_singlestepping(DisasContext *s)
75
-{
76
- /* Return true if we are singlestepping either because of
77
- * architectural singlestep or QEMU gdbstub singlestep. This does
78
- * not include the command line '-singlestep' mode which is rather
79
- * misnamed as it only means "one instruction per TB" and doesn't
80
- * affect the code we generate.
81
- */
82
- return s->base.singlestep_enabled || s->ss_active;
83
-}
84
-
85
void clear_eci_state(DisasContext *s)
86
{
87
/*
88
@@ -XXX,XX +XXX,XX @@ static inline void gen_bx_excret_final_code(DisasContext *s)
89
/* Is the new PC value in the magic range indicating exception return? */
90
tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
91
/* No: end the TB as we would for a DISAS_JMP */
92
- if (is_singlestepping(s)) {
93
+ if (s->ss_active) {
94
gen_singlestep_exception(s);
95
} else {
96
tcg_gen_exit_tb(NULL, 0);
97
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
98
/* Jump, specifying which TB number to use if we gen_goto_tb() */
99
static inline void gen_jmp_tb(DisasContext *s, uint32_t dest, int tbno)
100
{
101
- if (unlikely(is_singlestepping(s))) {
102
+ if (unlikely(s->ss_active)) {
103
/* An indirect jump so that we still trigger the debug exception. */
104
gen_set_pc_im(s, dest);
105
s->base.is_jmp = DISAS_JUMP;
106
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
107
dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
108
109
/* If architectural single step active, limit to 1. */
110
- if (is_singlestepping(dc)) {
111
+ if (dc->ss_active) {
112
dc->base.max_insns = 1;
113
}
114
115
@@ -XXX,XX +XXX,XX @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
116
* insn codepath itself.
117
*/
118
gen_bx_excret_final_code(dc);
119
- } else if (unlikely(is_singlestepping(dc))) {
120
+ } else if (unlikely(dc->ss_active)) {
121
/* Unconditional and "condition passed" instruction codepath. */
122
switch (dc->base.is_jmp) {
123
case DISAS_SWI:
124
@@ -XXX,XX +XXX,XX @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
125
/* "Condition failed" instruction codepath for the branch/trap insn */
126
gen_set_label(dc->condlabel);
127
gen_set_condexec(dc);
128
- if (unlikely(is_singlestepping(dc))) {
129
+ if (unlikely(dc->ss_active)) {
130
gen_set_pc_im(dc, dc->base.pc_next);
131
gen_singlestep_exception(dc);
132
} else {
133
--
134
2.25.1
135
136
diff view generated by jsdifflib
New patch
1
GDB single-stepping is now handled generically.
1
2
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/hppa/translate.c | 17 ++++-------------
7
1 file changed, 4 insertions(+), 13 deletions(-)
8
9
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/hppa/translate.c
12
+++ b/target/hppa/translate.c
13
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *ctx, int which,
14
} else {
15
copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
16
copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
17
- if (ctx->base.singlestep_enabled) {
18
- gen_excp_1(EXCP_DEBUG);
19
- } else {
20
- tcg_gen_lookup_and_goto_ptr();
21
- }
22
+ tcg_gen_lookup_and_goto_ptr();
23
}
24
}
25
26
@@ -XXX,XX +XXX,XX @@ static bool do_rfi(DisasContext *ctx, bool rfi_r)
27
gen_helper_rfi(cpu_env);
28
}
29
/* Exit the TB to recognize new interrupts. */
30
- if (ctx->base.singlestep_enabled) {
31
- gen_excp_1(EXCP_DEBUG);
32
- } else {
33
- tcg_gen_exit_tb(NULL, 0);
34
- }
35
+ tcg_gen_exit_tb(NULL, 0);
36
ctx->base.is_jmp = DISAS_NORETURN;
37
38
return nullify_end(ctx);
39
@@ -XXX,XX +XXX,XX @@ static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
40
nullify_save(ctx);
41
/* FALLTHRU */
42
case DISAS_IAQ_N_UPDATED:
43
- if (ctx->base.singlestep_enabled) {
44
- gen_excp_1(EXCP_DEBUG);
45
- } else if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
46
+ if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
47
tcg_gen_lookup_and_goto_ptr();
48
+ break;
49
}
50
/* FALLTHRU */
51
case DISAS_EXIT:
52
--
53
2.25.1
54
55
diff view generated by jsdifflib
New patch
1
We were using singlestep_enabled as a proxy for whether
2
translator_use_goto_tb would always return false.
1
3
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/i386/tcg/translate.c | 5 +++--
7
1 file changed, 3 insertions(+), 2 deletions(-)
8
9
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/i386/tcg/translate.c
12
+++ b/target/i386/tcg/translate.c
13
@@ -XXX,XX +XXX,XX @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
14
DisasContext *dc = container_of(dcbase, DisasContext, base);
15
CPUX86State *env = cpu->env_ptr;
16
uint32_t flags = dc->base.tb->flags;
17
+ uint32_t cflags = tb_cflags(dc->base.tb);
18
int cpl = (flags >> HF_CPL_SHIFT) & 3;
19
int iopl = (flags >> IOPL_SHIFT) & 3;
20
21
@@ -XXX,XX +XXX,XX @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
22
dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
23
dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
24
dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
25
- dc->jmp_opt = !(dc->base.singlestep_enabled ||
26
+ dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) ||
27
(flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)));
28
/*
29
* If jmp_opt, we want to handle each string instruction individually.
30
* For icount also disable repz optimization so that each iteration
31
* is accounted separately.
32
*/
33
- dc->repz_opt = !dc->jmp_opt && !(tb_cflags(dc->base.tb) & CF_USE_ICOUNT);
34
+ dc->repz_opt = !dc->jmp_opt && !(cflags & CF_USE_ICOUNT);
35
36
dc->T0 = tcg_temp_new();
37
dc->T1 = tcg_temp_new();
38
--
39
2.25.1
40
41
diff view generated by jsdifflib
New patch
1
GDB single-stepping is now handled generically.
1
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
target/i386/helper.h | 1 -
6
target/i386/tcg/misc_helper.c | 8 --------
7
target/i386/tcg/translate.c | 4 +---
8
3 files changed, 1 insertion(+), 12 deletions(-)
9
10
diff --git a/target/i386/helper.h b/target/i386/helper.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/target/i386/helper.h
13
+++ b/target/i386/helper.h
14
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_2(syscall, void, env, int)
15
DEF_HELPER_2(sysret, void, env, int)
16
#endif
17
DEF_HELPER_FLAGS_2(pause, TCG_CALL_NO_WG, noreturn, env, int)
18
-DEF_HELPER_FLAGS_1(debug, TCG_CALL_NO_WG, noreturn, env)
19
DEF_HELPER_1(reset_rf, void, env)
20
DEF_HELPER_FLAGS_3(raise_interrupt, TCG_CALL_NO_WG, noreturn, env, int, int)
21
DEF_HELPER_FLAGS_2(raise_exception, TCG_CALL_NO_WG, noreturn, env, int)
22
diff --git a/target/i386/tcg/misc_helper.c b/target/i386/tcg/misc_helper.c
23
index XXXXXXX..XXXXXXX 100644
24
--- a/target/i386/tcg/misc_helper.c
25
+++ b/target/i386/tcg/misc_helper.c
26
@@ -XXX,XX +XXX,XX @@ void QEMU_NORETURN helper_pause(CPUX86State *env, int next_eip_addend)
27
do_pause(env);
28
}
29
30
-void QEMU_NORETURN helper_debug(CPUX86State *env)
31
-{
32
- CPUState *cs = env_cpu(env);
33
-
34
- cs->exception_index = EXCP_DEBUG;
35
- cpu_loop_exit(cs);
36
-}
37
-
38
uint64_t helper_rdpkru(CPUX86State *env, uint32_t ecx)
39
{
40
if ((env->cr[4] & CR4_PKE_MASK) == 0) {
41
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/target/i386/tcg/translate.c
44
+++ b/target/i386/tcg/translate.c
45
@@ -XXX,XX +XXX,XX @@ do_gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf, bool jr)
46
if (s->base.tb->flags & HF_RF_MASK) {
47
gen_helper_reset_rf(cpu_env);
48
}
49
- if (s->base.singlestep_enabled) {
50
- gen_helper_debug(cpu_env);
51
- } else if (recheck_tf) {
52
+ if (recheck_tf) {
53
gen_helper_rechecking_single_step(cpu_env);
54
tcg_gen_exit_tb(NULL, 0);
55
} else if (s->flags & HF_TF_MASK) {
56
--
57
2.25.1
58
59
diff view generated by jsdifflib
New patch
1
GDB single-stepping is now handled generically.
1
2
3
Acked-by: Laurent Vivier <laurent@vivier.eu>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/m68k/translate.c | 44 +++++++++--------------------------------
7
1 file changed, 9 insertions(+), 35 deletions(-)
8
9
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/m68k/translate.c
12
+++ b/target/m68k/translate.c
13
@@ -XXX,XX +XXX,XX @@ static void do_writebacks(DisasContext *s)
14
}
15
}
16
17
-static bool is_singlestepping(DisasContext *s)
18
-{
19
- /*
20
- * Return true if we are singlestepping either because of
21
- * architectural singlestep or QEMU gdbstub singlestep. This does
22
- * not include the command line '-singlestep' mode which is rather
23
- * misnamed as it only means "one instruction per TB" and doesn't
24
- * affect the code we generate.
25
- */
26
- return s->base.singlestep_enabled || s->ss_active;
27
-}
28
-
29
/* is_jmp field values */
30
#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
31
#define DISAS_EXIT DISAS_TARGET_1 /* cpu state was modified dynamically */
32
@@ -XXX,XX +XXX,XX @@ static void gen_exception(DisasContext *s, uint32_t dest, int nr)
33
s->base.is_jmp = DISAS_NORETURN;
34
}
35
36
-static void gen_singlestep_exception(DisasContext *s)
37
-{
38
- /*
39
- * Generate the right kind of exception for singlestep, which is
40
- * either the architectural singlestep or EXCP_DEBUG for QEMU's
41
- * gdb singlestepping.
42
- */
43
- if (s->ss_active) {
44
- gen_raise_exception(EXCP_TRACE);
45
- } else {
46
- gen_raise_exception(EXCP_DEBUG);
47
- }
48
-}
49
-
50
static inline void gen_addr_fault(DisasContext *s)
51
{
52
gen_exception(s, s->base.pc_next, EXCP_ADDRESS);
53
@@ -XXX,XX +XXX,XX @@ static void gen_exit_tb(DisasContext *s)
54
/* Generate a jump to an immediate address. */
55
static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest)
56
{
57
- if (unlikely(is_singlestepping(s))) {
58
+ if (unlikely(s->ss_active)) {
59
update_cc_op(s);
60
tcg_gen_movi_i32(QREG_PC, dest);
61
- gen_singlestep_exception(s);
62
+ gen_raise_exception(EXCP_TRACE);
63
} else if (translator_use_goto_tb(&s->base, dest)) {
64
tcg_gen_goto_tb(n);
65
tcg_gen_movi_i32(QREG_PC, dest);
66
@@ -XXX,XX +XXX,XX @@ static void m68k_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
67
68
dc->ss_active = (M68K_SR_TRACE(env->sr) == M68K_SR_TRACE_ANY_INS);
69
/* If architectural single step active, limit to 1 */
70
- if (is_singlestepping(dc)) {
71
+ if (dc->ss_active) {
72
dc->base.max_insns = 1;
73
}
74
}
75
@@ -XXX,XX +XXX,XX @@ static void m68k_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
76
break;
77
case DISAS_TOO_MANY:
78
update_cc_op(dc);
79
- if (is_singlestepping(dc)) {
80
+ if (dc->ss_active) {
81
tcg_gen_movi_i32(QREG_PC, dc->pc);
82
- gen_singlestep_exception(dc);
83
+ gen_raise_exception(EXCP_TRACE);
84
} else {
85
gen_jmp_tb(dc, 0, dc->pc);
86
}
87
break;
88
case DISAS_JUMP:
89
/* We updated CC_OP and PC in gen_jmp/gen_jmp_im. */
90
- if (is_singlestepping(dc)) {
91
- gen_singlestep_exception(dc);
92
+ if (dc->ss_active) {
93
+ gen_raise_exception(EXCP_TRACE);
94
} else {
95
tcg_gen_lookup_and_goto_ptr();
96
}
97
@@ -XXX,XX +XXX,XX @@ static void m68k_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
98
* We updated CC_OP and PC in gen_exit_tb, but also modified
99
* other state that may require returning to the main loop.
100
*/
101
- if (is_singlestepping(dc)) {
102
- gen_singlestep_exception(dc);
103
+ if (dc->ss_active) {
104
+ gen_raise_exception(EXCP_TRACE);
105
} else {
106
tcg_gen_exit_tb(NULL, 0);
107
}
108
--
109
2.25.1
110
111
diff view generated by jsdifflib
1
Implement via expansion, so don't actually set TCG_TARGET_HAS_rotv_vec.
1
We were using singlestep_enabled as a proxy for whether
2
translator_use_goto_tb would always return false.
2
3
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
5
---
6
tcg/arm/tcg-target.c.inc | 35 ++++++++++++++++++++++++++++++++++-
6
target/microblaze/translate.c | 4 ++--
7
1 file changed, 34 insertions(+), 1 deletion(-)
7
1 file changed, 2 insertions(+), 2 deletions(-)
8
8
9
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
9
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
10
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/arm/tcg-target.c.inc
11
--- a/target/microblaze/translate.c
12
+++ b/tcg/arm/tcg-target.c.inc
12
+++ b/target/microblaze/translate.c
13
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
13
@@ -XXX,XX +XXX,XX @@ static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
14
case INDEX_op_shrv_vec:
15
case INDEX_op_sarv_vec:
16
case INDEX_op_rotli_vec:
17
+ case INDEX_op_rotlv_vec:
18
+ case INDEX_op_rotrv_vec:
19
return -1;
20
default:
21
return 0;
22
@@ -XXX,XX +XXX,XX @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
23
TCGArg a0, ...)
24
{
25
va_list va;
26
- TCGv_vec v0, v1, v2, t1;
27
+ TCGv_vec v0, v1, v2, t1, t2, c1;
28
TCGArg a2;
29
30
va_start(va, a0);
31
@@ -XXX,XX +XXX,XX @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
32
tcg_temp_free_vec(t1);
33
break;
14
break;
34
15
35
+ case INDEX_op_rotlv_vec:
16
case DISAS_JUMP:
36
+ v2 = temp_tcgv_vec(arg_temp(a2));
17
- if (dc->jmp_dest != -1 && !cs->singlestep_enabled) {
37
+ t1 = tcg_temp_new_vec(type);
18
+ if (dc->jmp_dest != -1 && !(tb_cflags(dc->base.tb) & CF_NO_GOTO_TB)) {
38
+ c1 = tcg_constant_vec(type, vece, 8 << vece);
19
/* Direct jump. */
39
+ tcg_gen_sub_vec(vece, t1, v2, c1);
20
tcg_gen_discard_i32(cpu_btarget);
40
+ /* Right shifts are negative left shifts for NEON. */
21
41
+ vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1),
22
@@ -XXX,XX +XXX,XX @@ static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
42
+ tcgv_vec_arg(v1), tcgv_vec_arg(t1));
23
return;
43
+ vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0),
24
}
44
+ tcgv_vec_arg(v1), tcgv_vec_arg(v2));
25
45
+ tcg_gen_or_vec(vece, v0, v0, t1);
26
- /* Indirect jump (or direct jump w/ singlestep) */
46
+ tcg_temp_free_vec(t1);
27
+ /* Indirect jump (or direct jump w/ goto_tb disabled) */
47
+ break;
28
tcg_gen_mov_i32(cpu_pc, cpu_btarget);
48
+
29
tcg_gen_discard_i32(cpu_btarget);
49
+ case INDEX_op_rotrv_vec:
30
50
+ v2 = temp_tcgv_vec(arg_temp(a2));
51
+ t1 = tcg_temp_new_vec(type);
52
+ t2 = tcg_temp_new_vec(type);
53
+ c1 = tcg_constant_vec(type, vece, 8 << vece);
54
+ tcg_gen_neg_vec(vece, t1, v2);
55
+ tcg_gen_sub_vec(vece, t2, c1, v2);
56
+ /* Right shifts are negative left shifts for NEON. */
57
+ vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1),
58
+ tcgv_vec_arg(v1), tcgv_vec_arg(t1));
59
+ vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t2),
60
+ tcgv_vec_arg(v1), tcgv_vec_arg(t2));
61
+ tcg_gen_or_vec(vece, v0, t1, t2);
62
+ tcg_temp_free_vec(t1);
63
+ tcg_temp_free_vec(t2);
64
+ break;
65
+
66
default:
67
g_assert_not_reached();
68
}
69
--
31
--
70
2.25.1
32
2.25.1
71
33
72
34
diff view generated by jsdifflib
1
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
1
GDB single-stepping is now handled generically.
2
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
---
4
tcg/arm/tcg-target.h | 2 +-
5
target/microblaze/translate.c | 14 ++------------
5
tcg/arm/tcg-target.c.inc | 6 ++++++
6
1 file changed, 2 insertions(+), 12 deletions(-)
6
2 files changed, 7 insertions(+), 1 deletion(-)
7
7
8
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
8
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
9
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/arm/tcg-target.h
10
--- a/target/microblaze/translate.c
11
+++ b/tcg/arm/tcg-target.h
11
+++ b/target/microblaze/translate.c
12
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
12
@@ -XXX,XX +XXX,XX @@ static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
13
#define TCG_TARGET_HAS_shi_vec 1
13
14
#define TCG_TARGET_HAS_shs_vec 0
14
static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
15
#define TCG_TARGET_HAS_shv_vec 0
15
{
16
-#define TCG_TARGET_HAS_mul_vec 0
16
- if (dc->base.singlestep_enabled) {
17
+#define TCG_TARGET_HAS_mul_vec 1
17
- TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
18
#define TCG_TARGET_HAS_sat_vec 0
18
- tcg_gen_movi_i32(cpu_pc, dest);
19
#define TCG_TARGET_HAS_minmax_vec 0
19
- gen_helper_raise_exception(cpu_env, tmp);
20
#define TCG_TARGET_HAS_bitsel_vec 0
20
- tcg_temp_free_i32(tmp);
21
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
21
- } else if (translator_use_goto_tb(&dc->base, dest)) {
22
index XXXXXXX..XXXXXXX 100644
22
+ if (translator_use_goto_tb(&dc->base, dest)) {
23
--- a/tcg/arm/tcg-target.c.inc
23
tcg_gen_goto_tb(n);
24
+++ b/tcg/arm/tcg-target.c.inc
24
tcg_gen_movi_i32(cpu_pc, dest);
25
@@ -XXX,XX +XXX,XX @@ typedef enum {
25
tcg_gen_exit_tb(dc->base.tb, n);
26
INSN_VORN = 0xf2300110,
26
@@ -XXX,XX +XXX,XX @@ static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
27
INSN_VORR = 0xf2200110,
27
/* Indirect jump (or direct jump w/ goto_tb disabled) */
28
INSN_VSUB = 0xf3000800,
28
tcg_gen_mov_i32(cpu_pc, cpu_btarget);
29
+ INSN_VMUL = 0xf2000910,
29
tcg_gen_discard_i32(cpu_btarget);
30
30
-
31
INSN_VABS = 0xf3b10300,
31
- if (unlikely(cs->singlestep_enabled)) {
32
INSN_VMVN = 0xf3b00580,
32
- gen_raise_exception(dc, EXCP_DEBUG);
33
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
33
- } else {
34
return C_O1_I1(w, w);
34
- tcg_gen_lookup_and_goto_ptr();
35
case INDEX_op_dup2_vec:
35
- }
36
case INDEX_op_add_vec:
36
+ tcg_gen_lookup_and_goto_ptr();
37
+ case INDEX_op_mul_vec:
38
case INDEX_op_sub_vec:
39
case INDEX_op_xor_vec:
40
return C_O1_I2(w, w, w);
41
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
42
case INDEX_op_add_vec:
43
tcg_out_vreg3(s, INSN_VADD, q, vece, a0, a1, a2);
44
return;
37
return;
45
+ case INDEX_op_mul_vec:
38
46
+ tcg_out_vreg3(s, INSN_VMUL, q, vece, a0, a1, a2);
47
+ return;
48
case INDEX_op_sub_vec:
49
tcg_out_vreg3(s, INSN_VSUB, q, vece, a0, a1, a2);
50
return;
51
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
52
return 1;
53
case INDEX_op_abs_vec:
54
case INDEX_op_cmp_vec:
55
+ case INDEX_op_mul_vec:
56
case INDEX_op_neg_vec:
57
return vece < MO_64;
58
default:
39
default:
59
--
40
--
60
2.25.1
41
2.25.1
61
42
62
43
diff view generated by jsdifflib
1
This is minimum and maximum, signed and unsigned.
1
As per an ancient comment in mips_tr_translate_insn about the
2
expectations of gdb, when restarting the insn in a delay slot
3
we also re-execute the branch. Which means that we are
4
expected to execute two insns in this case.
2
5
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
This has been broken since 8b86d6d2580, where we forced max_insns
7
to 1 while single-stepping. This resulted in an exit from the
8
translator loop after the branch but before the delay slot is
9
translated.
10
11
Increase the max_insns to 2 for this case. In addition, bypass
12
the end-of-page check, for when the branch itself ends the page.
13
14
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
16
---
6
tcg/arm/tcg-target.h | 2 +-
17
target/mips/tcg/translate.c | 25 ++++++++++++++++---------
7
tcg/arm/tcg-target.c.inc | 24 ++++++++++++++++++++++++
18
1 file changed, 16 insertions(+), 9 deletions(-)
8
2 files changed, 25 insertions(+), 1 deletion(-)
9
19
10
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
20
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
11
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/arm/tcg-target.h
22
--- a/target/mips/tcg/translate.c
13
+++ b/tcg/arm/tcg-target.h
23
+++ b/target/mips/tcg/translate.c
14
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
24
@@ -XXX,XX +XXX,XX @@ static void mips_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
15
#define TCG_TARGET_HAS_shv_vec 0
25
ctx->default_tcg_memop_mask = (ctx->insn_flags & (ISA_MIPS_R6 |
16
#define TCG_TARGET_HAS_mul_vec 1
26
INSN_LOONGSON3A)) ? MO_UNALN : MO_ALIGN;
17
#define TCG_TARGET_HAS_sat_vec 1
27
18
-#define TCG_TARGET_HAS_minmax_vec 0
28
+ /*
19
+#define TCG_TARGET_HAS_minmax_vec 1
29
+ * Execute a branch and its delay slot as a single instruction.
20
#define TCG_TARGET_HAS_bitsel_vec 0
30
+ * This is what GDB expects and is consistent with what the
21
#define TCG_TARGET_HAS_cmpsel_vec 0
31
+ * hardware does (e.g. if a delay slot instruction faults, the
22
32
+ * reported PC is the PC of the branch).
23
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
33
+ */
24
index XXXXXXX..XXXXXXX 100644
34
+ if (ctx->base.singlestep_enabled && (ctx->hflags & MIPS_HFLAG_BMASK)) {
25
--- a/tcg/arm/tcg-target.c.inc
35
+ ctx->base.max_insns = 2;
26
+++ b/tcg/arm/tcg-target.c.inc
36
+ }
27
@@ -XXX,XX +XXX,XX @@ typedef enum {
37
+
28
INSN_VQADD_U = 0xf3000010,
38
LOG_DISAS("\ntb %p idx %d hflags %04x\n", ctx->base.tb, ctx->mem_idx,
29
INSN_VQSUB = 0xf2000210,
39
ctx->hflags);
30
INSN_VQSUB_U = 0xf3000210,
40
}
31
+ INSN_VMAX = 0xf2000600,
41
@@ -XXX,XX +XXX,XX @@ static void mips_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
32
+ INSN_VMAX_U = 0xf3000600,
42
if (ctx->base.is_jmp != DISAS_NEXT) {
33
+ INSN_VMIN = 0xf2000610,
34
+ INSN_VMIN_U = 0xf3000610,
35
36
INSN_VABS = 0xf3b10300,
37
INSN_VMVN = 0xf3b00580,
38
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
39
case INDEX_op_dup2_vec:
40
case INDEX_op_add_vec:
41
case INDEX_op_mul_vec:
42
+ case INDEX_op_smax_vec:
43
+ case INDEX_op_smin_vec:
44
case INDEX_op_ssadd_vec:
45
case INDEX_op_sssub_vec:
46
case INDEX_op_sub_vec:
47
+ case INDEX_op_umax_vec:
48
+ case INDEX_op_umin_vec:
49
case INDEX_op_usadd_vec:
50
case INDEX_op_ussub_vec:
51
case INDEX_op_xor_vec:
52
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
53
case INDEX_op_mul_vec:
54
tcg_out_vreg3(s, INSN_VMUL, q, vece, a0, a1, a2);
55
return;
43
return;
56
+ case INDEX_op_smax_vec:
44
}
57
+ tcg_out_vreg3(s, INSN_VMAX, q, vece, a0, a1, a2);
45
+
58
+ return;
46
/*
59
+ case INDEX_op_smin_vec:
47
- * Execute a branch and its delay slot as a single instruction.
60
+ tcg_out_vreg3(s, INSN_VMIN, q, vece, a0, a1, a2);
48
- * This is what GDB expects and is consistent with what the
61
+ return;
49
- * hardware does (e.g. if a delay slot instruction faults, the
62
case INDEX_op_sub_vec:
50
- * reported PC is the PC of the branch).
63
tcg_out_vreg3(s, INSN_VSUB, q, vece, a0, a1, a2);
51
+ * End the TB on (most) page crossings.
64
return;
52
+ * See mips_tr_init_disas_context about single-stepping a branch
65
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
53
+ * together with its delay slot.
66
case INDEX_op_sssub_vec:
54
*/
67
tcg_out_vreg3(s, INSN_VQSUB, q, vece, a0, a1, a2);
55
- if (ctx->base.singlestep_enabled &&
68
return;
56
- (ctx->hflags & MIPS_HFLAG_BMASK) == 0) {
69
+ case INDEX_op_umax_vec:
57
- ctx->base.is_jmp = DISAS_TOO_MANY;
70
+ tcg_out_vreg3(s, INSN_VMAX_U, q, vece, a0, a1, a2);
58
- }
71
+ return;
59
- if (ctx->base.pc_next - ctx->page_start >= TARGET_PAGE_SIZE) {
72
+ case INDEX_op_umin_vec:
60
+ if (ctx->base.pc_next - ctx->page_start >= TARGET_PAGE_SIZE
73
+ tcg_out_vreg3(s, INSN_VMIN_U, q, vece, a0, a1, a2);
61
+ && !ctx->base.singlestep_enabled) {
74
+ return;
62
ctx->base.is_jmp = DISAS_TOO_MANY;
75
case INDEX_op_usadd_vec:
63
}
76
tcg_out_vreg3(s, INSN_VQADD_U, q, vece, a0, a1, a2);
64
}
77
return;
78
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
79
case INDEX_op_cmp_vec:
80
case INDEX_op_mul_vec:
81
case INDEX_op_neg_vec:
82
+ case INDEX_op_smax_vec:
83
+ case INDEX_op_smin_vec:
84
+ case INDEX_op_umax_vec:
85
+ case INDEX_op_umin_vec:
86
return vece < MO_64;
87
default:
88
return 0;
89
--
65
--
90
2.25.1
66
2.25.1
91
67
92
68
diff view generated by jsdifflib
1
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
1
GDB single-stepping is now handled generically.
2
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
5
---
4
tcg/arm/tcg-target.c.inc | 70 ++++++++++++++++++++++++++++++++++++----
6
target/mips/tcg/translate.c | 50 +++++++++++++------------------------
5
1 file changed, 64 insertions(+), 6 deletions(-)
7
1 file changed, 18 insertions(+), 32 deletions(-)
6
8
7
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
9
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
8
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/arm/tcg-target.c.inc
11
--- a/target/mips/tcg/translate.c
10
+++ b/tcg/arm/tcg-target.c.inc
12
+++ b/target/mips/tcg/translate.c
11
@@ -XXX,XX +XXX,XX @@ typedef enum {
13
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
12
INSN_NOP_v6k = 0xe320f000,
14
tcg_gen_exit_tb(ctx->base.tb, n);
13
/* Otherwise the assembler uses mov r0,r0 */
15
} else {
14
INSN_NOP_v4 = (COND_AL << 28) | ARITH_MOV,
16
gen_save_pc(dest);
15
+
17
- if (ctx->base.singlestep_enabled) {
16
+ INSN_VLD1 = 0xf4200000, /* VLD1 (multiple single elements) */
18
- save_cpu_state(ctx, 0);
17
+ INSN_VST1 = 0xf4000000, /* VST1 (multiple single elements) */
19
- gen_helper_raise_exception_debug(cpu_env);
18
} ARMInsn;
20
- } else {
19
21
- tcg_gen_lookup_and_goto_ptr();
20
#define INSN_NOP (use_armv7_instructions ? INSN_NOP_v6k : INSN_NOP_v4)
22
- }
21
@@ -XXX,XX +XXX,XX @@ static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args,
23
+ tcg_gen_lookup_and_goto_ptr();
22
}
24
}
23
}
25
}
24
26
25
+/*
27
@@ -XXX,XX +XXX,XX @@ static void gen_branch(DisasContext *ctx, int insn_bytes)
26
+ * Note that TCGReg references Q-registers.
28
} else {
27
+ * Q-regno = 2 * D-regno, so shift left by 1 whlie inserting.
29
tcg_gen_mov_tl(cpu_PC, btarget);
28
+ */
30
}
29
+static uint32_t encode_vd(TCGReg rd)
31
- if (ctx->base.singlestep_enabled) {
30
+{
32
- save_cpu_state(ctx, 0);
31
+ tcg_debug_assert(rd >= TCG_REG_Q0);
33
- gen_helper_raise_exception_debug(cpu_env);
32
+ return (extract32(rd, 3, 1) << 22) | (extract32(rd, 0, 3) << 13);
34
- }
33
+}
35
tcg_gen_lookup_and_goto_ptr();
34
+
36
break;
35
+static void tcg_out_vldst(TCGContext *s, ARMInsn insn,
37
default:
36
+ TCGReg rd, TCGReg rn, int offset)
38
@@ -XXX,XX +XXX,XX @@ static void mips_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
37
+{
38
+ if (offset != 0) {
39
+ if (check_fit_imm(offset) || check_fit_imm(-offset)) {
40
+ tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
41
+ TCG_REG_TMP, rn, offset, true);
42
+ } else {
43
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset);
44
+ tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
45
+ TCG_REG_TMP, TCG_REG_TMP, rn, 0);
46
+ }
47
+ rn = TCG_REG_TMP;
48
+ }
49
+ tcg_out32(s, insn | (rn << 16) | encode_vd(rd) | 0xf);
50
+}
51
+
52
#ifdef CONFIG_SOFTMMU
53
#include "../tcg-ldst.c.inc"
54
55
@@ -XXX,XX +XXX,XX @@ static void tcg_target_init(TCGContext *s)
56
tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP);
57
}
58
59
-static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
60
- TCGReg arg1, intptr_t arg2)
61
+static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
62
+ TCGReg arg1, intptr_t arg2)
63
{
39
{
64
- tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
40
DisasContext *ctx = container_of(dcbase, DisasContext, base);
65
+ switch (type) {
41
66
+ case TCG_TYPE_I32:
42
- if (ctx->base.singlestep_enabled && ctx->base.is_jmp != DISAS_NORETURN) {
67
+ tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
43
- save_cpu_state(ctx, ctx->base.is_jmp != DISAS_EXIT);
68
+ return;
44
- gen_helper_raise_exception_debug(cpu_env);
69
+ case TCG_TYPE_V64:
45
- } else {
70
+ /* regs 1; size 8; align 8 */
46
- switch (ctx->base.is_jmp) {
71
+ tcg_out_vldst(s, INSN_VLD1 | 0x7d0, arg, arg1, arg2);
47
- case DISAS_STOP:
72
+ return;
48
- gen_save_pc(ctx->base.pc_next);
73
+ case TCG_TYPE_V128:
49
- tcg_gen_lookup_and_goto_ptr();
74
+ /* regs 2; size 8; align 16 */
50
- break;
75
+ tcg_out_vldst(s, INSN_VLD1 | 0xae0, arg, arg1, arg2);
51
- case DISAS_NEXT:
76
+ return;
52
- case DISAS_TOO_MANY:
53
- save_cpu_state(ctx, 0);
54
- gen_goto_tb(ctx, 0, ctx->base.pc_next);
55
- break;
56
- case DISAS_EXIT:
57
- tcg_gen_exit_tb(NULL, 0);
58
- break;
59
- case DISAS_NORETURN:
60
- break;
61
- default:
62
- g_assert_not_reached();
63
- }
64
+ switch (ctx->base.is_jmp) {
65
+ case DISAS_STOP:
66
+ gen_save_pc(ctx->base.pc_next);
67
+ tcg_gen_lookup_and_goto_ptr();
68
+ break;
69
+ case DISAS_NEXT:
70
+ case DISAS_TOO_MANY:
71
+ save_cpu_state(ctx, 0);
72
+ gen_goto_tb(ctx, 0, ctx->base.pc_next);
73
+ break;
74
+ case DISAS_EXIT:
75
+ tcg_gen_exit_tb(NULL, 0);
76
+ break;
77
+ case DISAS_NORETURN:
78
+ break;
77
+ default:
79
+ default:
78
+ g_assert_not_reached();
80
+ g_assert_not_reached();
79
+ }
81
}
80
}
82
}
81
83
82
-static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
83
- TCGReg arg1, intptr_t arg2)
84
+static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
85
+ TCGReg arg1, intptr_t arg2)
86
{
87
- tcg_out_st32(s, COND_AL, arg, arg1, arg2);
88
+ switch (type) {
89
+ case TCG_TYPE_I32:
90
+ tcg_out_st32(s, COND_AL, arg, arg1, arg2);
91
+ return;
92
+ case TCG_TYPE_V64:
93
+ /* regs 1; size 8; align 8 */
94
+ tcg_out_vldst(s, INSN_VST1 | 0x7d0, arg, arg1, arg2);
95
+ return;
96
+ case TCG_TYPE_V128:
97
+ /* regs 2; size 8; align 16 */
98
+ tcg_out_vldst(s, INSN_VST1 | 0xae0, arg, arg1, arg2);
99
+ return;
100
+ default:
101
+ g_assert_not_reached();
102
+ }
103
}
104
105
static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
106
--
84
--
107
2.25.1
85
2.25.1
108
86
109
87
diff view generated by jsdifflib
1
Implement via expansion, so don't actually set TCG_TARGET_HAS_roti_vec.
1
GDB single-stepping is now handled generically.
2
For NEON, this is shift-right followed by shift-left-and-insert.
3
2
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
tcg/arm/tcg-target-con-set.h | 1 +
6
target/openrisc/translate.c | 18 +++---------------
8
tcg/arm/tcg-target.opc.h | 1 +
7
1 file changed, 3 insertions(+), 15 deletions(-)
9
tcg/arm/tcg-target.c.inc | 15 +++++++++++++++
10
3 files changed, 17 insertions(+)
11
8
12
diff --git a/tcg/arm/tcg-target-con-set.h b/tcg/arm/tcg-target-con-set.h
9
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
13
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/arm/tcg-target-con-set.h
11
--- a/target/openrisc/translate.c
15
+++ b/tcg/arm/tcg-target-con-set.h
12
+++ b/target/openrisc/translate.c
16
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, r, rIK)
13
@@ -XXX,XX +XXX,XX @@ static void openrisc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
17
C_O1_I2(r, r, rIN)
14
/* The jump destination is indirect/computed; use jmp_pc. */
18
C_O1_I2(r, r, ri)
15
tcg_gen_mov_tl(cpu_pc, jmp_pc);
19
C_O1_I2(r, rZ, rZ)
16
tcg_gen_discard_tl(jmp_pc);
20
+C_O1_I2(w, 0, w)
17
- if (unlikely(dc->base.singlestep_enabled)) {
21
C_O1_I2(w, w, w)
18
- gen_exception(dc, EXCP_DEBUG);
22
C_O1_I2(w, w, wO)
19
- } else {
23
C_O1_I2(w, w, wV)
20
- tcg_gen_lookup_and_goto_ptr();
24
diff --git a/tcg/arm/tcg-target.opc.h b/tcg/arm/tcg-target.opc.h
21
- }
25
index XXXXXXX..XXXXXXX 100644
22
+ tcg_gen_lookup_and_goto_ptr();
26
--- a/tcg/arm/tcg-target.opc.h
23
break;
27
+++ b/tcg/arm/tcg-target.opc.h
24
}
28
@@ -XXX,XX +XXX,XX @@
25
/* The jump destination is direct; use jmp_pc_imm.
29
* consider these to be UNSPEC with names.
26
@@ -XXX,XX +XXX,XX @@ static void openrisc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
30
*/
27
break;
31
28
}
32
+DEF(arm_sli_vec, 1, 2, 1, IMPLVEC)
29
tcg_gen_movi_tl(cpu_pc, jmp_dest);
33
DEF(arm_sshl_vec, 1, 2, 0, IMPLVEC)
30
- if (unlikely(dc->base.singlestep_enabled)) {
34
DEF(arm_ushl_vec, 1, 2, 0, IMPLVEC)
31
- gen_exception(dc, EXCP_DEBUG);
35
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
32
- } else {
36
index XXXXXXX..XXXXXXX 100644
33
- tcg_gen_lookup_and_goto_ptr();
37
--- a/tcg/arm/tcg-target.c.inc
34
- }
38
+++ b/tcg/arm/tcg-target.c.inc
35
+ tcg_gen_lookup_and_goto_ptr();
39
@@ -XXX,XX +XXX,XX @@ typedef enum {
40
INSN_VSHLI = 0xf2800510, /* VSHL (immediate) */
41
INSN_VSARI = 0xf2800010, /* VSHR.S */
42
INSN_VSHRI = 0xf3800010, /* VSHR.U */
43
+ INSN_VSLI = 0xf3800510,
44
INSN_VSHL_S = 0xf2000400, /* VSHL.S (register) */
45
INSN_VSHL_U = 0xf3000400, /* VSHL.U (register) */
46
47
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
48
case INDEX_op_arm_sshl_vec:
49
case INDEX_op_arm_ushl_vec:
50
return C_O1_I2(w, w, w);
51
+ case INDEX_op_arm_sli_vec:
52
+ return C_O1_I2(w, 0, w);
53
case INDEX_op_or_vec:
54
case INDEX_op_andc_vec:
55
return C_O1_I2(w, w, wO);
56
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
57
case INDEX_op_sari_vec:
58
tcg_out_vshifti(s, INSN_VSARI, q, a0, a1, (16 << vece) - a2);
59
return;
60
+ case INDEX_op_arm_sli_vec:
61
+ tcg_out_vshifti(s, INSN_VSLI, q, a0, a2, args[3] + (8 << vece));
62
+ return;
63
64
case INDEX_op_andc_vec:
65
if (!const_args[2]) {
66
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
67
case INDEX_op_shlv_vec:
68
case INDEX_op_shrv_vec:
69
case INDEX_op_sarv_vec:
70
+ case INDEX_op_rotli_vec:
71
return -1;
72
default:
73
return 0;
74
@@ -XXX,XX +XXX,XX @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
75
tcg_temp_free_vec(t1);
76
break;
36
break;
77
37
78
+ case INDEX_op_rotli_vec:
38
case DISAS_EXIT:
79
+ t1 = tcg_temp_new_vec(type);
39
- if (unlikely(dc->base.singlestep_enabled)) {
80
+ tcg_gen_shri_vec(vece, t1, v1, -a2 & ((8 << vece) - 1));
40
- gen_exception(dc, EXCP_DEBUG);
81
+ vec_gen_4(INDEX_op_arm_sli_vec, type, vece,
41
- } else {
82
+ tcgv_vec_arg(v0), tcgv_vec_arg(t1), tcgv_vec_arg(v1), a2);
42
- tcg_gen_exit_tb(NULL, 0);
83
+ tcg_temp_free_vec(t1);
43
- }
84
+ break;
44
+ tcg_gen_exit_tb(NULL, 0);
85
+
45
break;
86
default:
46
default:
87
g_assert_not_reached();
47
g_assert_not_reached();
88
}
89
--
48
--
90
2.25.1
49
2.25.1
91
50
92
51
diff view generated by jsdifflib
1
This is saturating add and subtract, signed and unsigned.
1
GDB single-stepping is now handled generically.
2
Reuse gen_debug_exception to handle architectural debug exceptions.
2
3
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
5
---
6
tcg/arm/tcg-target.h | 2 +-
6
target/ppc/translate.c | 38 ++++++++------------------------------
7
tcg/arm/tcg-target.c.inc | 24 ++++++++++++++++++++++++
7
1 file changed, 8 insertions(+), 30 deletions(-)
8
2 files changed, 25 insertions(+), 1 deletion(-)
9
8
10
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
9
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
11
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/arm/tcg-target.h
11
--- a/target/ppc/translate.c
13
+++ b/tcg/arm/tcg-target.h
12
+++ b/target/ppc/translate.c
14
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
13
@@ -XXX,XX +XXX,XX @@
15
#define TCG_TARGET_HAS_shs_vec 0
14
16
#define TCG_TARGET_HAS_shv_vec 0
15
#define CPU_SINGLE_STEP 0x1
17
#define TCG_TARGET_HAS_mul_vec 1
16
#define CPU_BRANCH_STEP 0x2
18
-#define TCG_TARGET_HAS_sat_vec 0
17
-#define GDBSTUB_SINGLE_STEP 0x4
19
+#define TCG_TARGET_HAS_sat_vec 1
18
20
#define TCG_TARGET_HAS_minmax_vec 0
19
/* Include definitions for instructions classes and implementations flags */
21
#define TCG_TARGET_HAS_bitsel_vec 0
20
/* #define PPC_DEBUG_DISAS */
22
#define TCG_TARGET_HAS_cmpsel_vec 0
21
@@ -XXX,XX +XXX,XX @@ static uint32_t gen_prep_dbgex(DisasContext *ctx)
23
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
22
24
index XXXXXXX..XXXXXXX 100644
23
static void gen_debug_exception(DisasContext *ctx)
25
--- a/tcg/arm/tcg-target.c.inc
24
{
26
+++ b/tcg/arm/tcg-target.c.inc
25
- gen_helper_raise_exception(cpu_env, tcg_constant_i32(EXCP_DEBUG));
27
@@ -XXX,XX +XXX,XX @@ typedef enum {
26
+ gen_helper_raise_exception(cpu_env, tcg_constant_i32(gen_prep_dbgex(ctx)));
28
INSN_VORR = 0xf2200110,
27
ctx->base.is_jmp = DISAS_NORETURN;
29
INSN_VSUB = 0xf3000800,
28
}
30
INSN_VMUL = 0xf2000910,
29
31
+ INSN_VQADD = 0xf2000010,
30
@@ -XXX,XX +XXX,XX @@ static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
32
+ INSN_VQADD_U = 0xf3000010,
31
33
+ INSN_VQSUB = 0xf2000210,
32
static void gen_lookup_and_goto_ptr(DisasContext *ctx)
34
+ INSN_VQSUB_U = 0xf3000210,
33
{
35
34
- int sse = ctx->singlestep_enabled;
36
INSN_VABS = 0xf3b10300,
35
- if (unlikely(sse)) {
37
INSN_VMVN = 0xf3b00580,
36
- if (sse & GDBSTUB_SINGLE_STEP) {
38
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
37
- gen_debug_exception(ctx);
39
case INDEX_op_dup2_vec:
38
- } else if (sse & (CPU_SINGLE_STEP | CPU_BRANCH_STEP)) {
40
case INDEX_op_add_vec:
39
- gen_helper_raise_exception(cpu_env, tcg_constant_i32(gen_prep_dbgex(ctx)));
41
case INDEX_op_mul_vec:
40
- } else {
42
+ case INDEX_op_ssadd_vec:
41
- tcg_gen_exit_tb(NULL, 0);
43
+ case INDEX_op_sssub_vec:
42
- }
44
case INDEX_op_sub_vec:
43
+ if (unlikely(ctx->singlestep_enabled)) {
45
+ case INDEX_op_usadd_vec:
44
+ gen_debug_exception(ctx);
46
+ case INDEX_op_ussub_vec:
45
} else {
47
case INDEX_op_xor_vec:
46
tcg_gen_lookup_and_goto_ptr();
48
return C_O1_I2(w, w, w);
47
}
49
case INDEX_op_or_vec:
48
@@ -XXX,XX +XXX,XX @@ static void ppc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
50
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
49
ctx->singlestep_enabled = 0;
51
case INDEX_op_sub_vec:
50
if ((hflags >> HFLAGS_SE) & 1) {
52
tcg_out_vreg3(s, INSN_VSUB, q, vece, a0, a1, a2);
51
ctx->singlestep_enabled |= CPU_SINGLE_STEP;
53
return;
52
+ ctx->base.max_insns = 1;
54
+ case INDEX_op_ssadd_vec:
53
}
55
+ tcg_out_vreg3(s, INSN_VQADD, q, vece, a0, a1, a2);
54
if ((hflags >> HFLAGS_BE) & 1) {
55
ctx->singlestep_enabled |= CPU_BRANCH_STEP;
56
}
57
- if (unlikely(ctx->base.singlestep_enabled)) {
58
- ctx->singlestep_enabled |= GDBSTUB_SINGLE_STEP;
59
- }
60
-
61
- if (ctx->singlestep_enabled & (CPU_SINGLE_STEP | GDBSTUB_SINGLE_STEP)) {
62
- ctx->base.max_insns = 1;
63
- }
64
}
65
66
static void ppc_tr_tb_start(DisasContextBase *db, CPUState *cs)
67
@@ -XXX,XX +XXX,XX @@ static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
68
DisasContext *ctx = container_of(dcbase, DisasContext, base);
69
DisasJumpType is_jmp = ctx->base.is_jmp;
70
target_ulong nip = ctx->base.pc_next;
71
- int sse;
72
73
if (is_jmp == DISAS_NORETURN) {
74
/* We have already exited the TB. */
75
@@ -XXX,XX +XXX,XX @@ static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
76
}
77
78
/* Honor single stepping. */
79
- sse = ctx->singlestep_enabled & (CPU_SINGLE_STEP | GDBSTUB_SINGLE_STEP);
80
- if (unlikely(sse)) {
81
+ if (unlikely(ctx->singlestep_enabled & CPU_SINGLE_STEP)
82
+ && (nip <= 0x100 || nip > 0xf00)) {
83
switch (is_jmp) {
84
case DISAS_TOO_MANY:
85
case DISAS_EXIT_UPDATE:
86
@@ -XXX,XX +XXX,XX @@ static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
87
g_assert_not_reached();
88
}
89
90
- if (sse & GDBSTUB_SINGLE_STEP) {
91
- gen_debug_exception(ctx);
92
- return;
93
- }
94
- /* else CPU_SINGLE_STEP... */
95
- if (nip <= 0x100 || nip > 0xf00) {
96
- gen_helper_raise_exception(cpu_env, tcg_constant_i32(gen_prep_dbgex(ctx)));
97
- return;
98
- }
99
+ gen_debug_exception(ctx);
56
+ return;
100
+ return;
57
+ case INDEX_op_sssub_vec:
101
}
58
+ tcg_out_vreg3(s, INSN_VQSUB, q, vece, a0, a1, a2);
102
59
+ return;
103
switch (is_jmp) {
60
+ case INDEX_op_usadd_vec:
61
+ tcg_out_vreg3(s, INSN_VQADD_U, q, vece, a0, a1, a2);
62
+ return;
63
+ case INDEX_op_ussub_vec:
64
+ tcg_out_vreg3(s, INSN_VQSUB_U, q, vece, a0, a1, a2);
65
+ return;
66
case INDEX_op_xor_vec:
67
tcg_out_vreg3(s, INSN_VEOR, q, 0, a0, a1, a2);
68
return;
69
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
70
case INDEX_op_shli_vec:
71
case INDEX_op_shri_vec:
72
case INDEX_op_sari_vec:
73
+ case INDEX_op_ssadd_vec:
74
+ case INDEX_op_sssub_vec:
75
+ case INDEX_op_usadd_vec:
76
+ case INDEX_op_ussub_vec:
77
return 1;
78
case INDEX_op_abs_vec:
79
case INDEX_op_cmp_vec:
80
--
104
--
81
2.25.1
105
2.25.1
82
106
83
107
diff view generated by jsdifflib
1
The three vector shift by vector operations are all implemented via
1
We have already set DISAS_NORETURN in generate_exception,
2
expansion. Therefore do not actually set TCG_TARGET_HAS_shv_vec,
2
which makes the exit_tb unreachable.
3
as none of shlv_vec, shrv_vec, sarv_vec may actually appear in the
4
instruction stream, and therefore also do not appear in tcg_target_op_def.
5
3
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
6
---
9
tcg/arm/tcg-target.opc.h | 3 ++
7
target/riscv/insn_trans/trans_privileged.c.inc | 6 +-----
10
tcg/arm/tcg-target.c.inc | 61 +++++++++++++++++++++++++++++++++++++++-
8
1 file changed, 1 insertion(+), 5 deletions(-)
11
2 files changed, 63 insertions(+), 1 deletion(-)
12
9
13
diff --git a/tcg/arm/tcg-target.opc.h b/tcg/arm/tcg-target.opc.h
10
diff --git a/target/riscv/insn_trans/trans_privileged.c.inc b/target/riscv/insn_trans/trans_privileged.c.inc
14
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/arm/tcg-target.opc.h
12
--- a/target/riscv/insn_trans/trans_privileged.c.inc
16
+++ b/tcg/arm/tcg-target.opc.h
13
+++ b/target/riscv/insn_trans/trans_privileged.c.inc
17
@@ -XXX,XX +XXX,XX @@
14
@@ -XXX,XX +XXX,XX @@ static bool trans_ecall(DisasContext *ctx, arg_ecall *a)
18
* emitted by tcg_expand_vec_op. For those familiar with GCC internals,
15
{
19
* consider these to be UNSPEC with names.
16
/* always generates U-level ECALL, fixed in do_interrupt handler */
20
*/
17
generate_exception(ctx, RISCV_EXCP_U_ECALL);
21
+
18
- exit_tb(ctx); /* no chaining */
22
+DEF(arm_sshl_vec, 1, 2, 0, IMPLVEC)
19
- ctx->base.is_jmp = DISAS_NORETURN;
23
+DEF(arm_ushl_vec, 1, 2, 0, IMPLVEC)
20
return true;
24
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
21
}
25
index XXXXXXX..XXXXXXX 100644
22
26
--- a/tcg/arm/tcg-target.c.inc
23
@@ -XXX,XX +XXX,XX @@ static bool trans_ebreak(DisasContext *ctx, arg_ebreak *a)
27
+++ b/tcg/arm/tcg-target.c.inc
24
post = opcode_at(&ctx->base, post_addr);
28
@@ -XXX,XX +XXX,XX @@ typedef enum {
29
INSN_VSHLI = 0xf2800510, /* VSHL (immediate) */
30
INSN_VSARI = 0xf2800010, /* VSHR.S */
31
INSN_VSHRI = 0xf3800010, /* VSHR.U */
32
+ INSN_VSHL_S = 0xf2000400, /* VSHL.S (register) */
33
+ INSN_VSHL_U = 0xf3000400, /* VSHL.U (register) */
34
35
INSN_VBSL = 0xf3100110,
36
INSN_VBIT = 0xf3200110,
37
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
38
case INDEX_op_usadd_vec:
39
case INDEX_op_ussub_vec:
40
case INDEX_op_xor_vec:
41
+ case INDEX_op_arm_sshl_vec:
42
+ case INDEX_op_arm_ushl_vec:
43
return C_O1_I2(w, w, w);
44
case INDEX_op_or_vec:
45
case INDEX_op_andc_vec:
46
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
47
case INDEX_op_xor_vec:
48
tcg_out_vreg3(s, INSN_VEOR, q, 0, a0, a1, a2);
49
return;
50
+ case INDEX_op_arm_sshl_vec:
51
+ /*
52
+ * Note that Vm is the data and Vn is the shift count,
53
+ * therefore the arguments appear reversed.
54
+ */
55
+ tcg_out_vreg3(s, INSN_VSHL_S, q, vece, a0, a2, a1);
56
+ return;
57
+ case INDEX_op_arm_ushl_vec:
58
+ /* See above. */
59
+ tcg_out_vreg3(s, INSN_VSHL_U, q, vece, a0, a2, a1);
60
+ return;
61
case INDEX_op_shli_vec:
62
tcg_out_vshifti(s, INSN_VSHLI, q, a0, a1, a2 + (8 << vece));
63
return;
64
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
65
case INDEX_op_umax_vec:
66
case INDEX_op_umin_vec:
67
return vece < MO_64;
68
+ case INDEX_op_shlv_vec:
69
+ case INDEX_op_shrv_vec:
70
+ case INDEX_op_sarv_vec:
71
+ return -1;
72
default:
73
return 0;
74
}
25
}
75
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
26
76
void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
27
- if (pre == 0x01f01013 && ebreak == 0x00100073 && post == 0x40705013) {
77
TCGArg a0, ...)
28
+ if (pre == 0x01f01013 && ebreak == 0x00100073 && post == 0x40705013) {
78
{
29
generate_exception(ctx, RISCV_EXCP_SEMIHOST);
79
- g_assert_not_reached();
30
} else {
80
+ va_list va;
31
generate_exception(ctx, RISCV_EXCP_BREAKPOINT);
81
+ TCGv_vec v0, v1, v2, t1;
32
}
82
+ TCGArg a2;
33
- exit_tb(ctx); /* no chaining */
83
+
34
- ctx->base.is_jmp = DISAS_NORETURN;
84
+ va_start(va, a0);
35
return true;
85
+ v0 = temp_tcgv_vec(arg_temp(a0));
86
+ v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
87
+ a2 = va_arg(va, TCGArg);
88
+ va_end(va);
89
+
90
+ switch (opc) {
91
+ case INDEX_op_shlv_vec:
92
+ /*
93
+ * Merely propagate shlv_vec to arm_ushl_vec.
94
+ * In this way we don't set TCG_TARGET_HAS_shv_vec
95
+ * because everything is done via expansion.
96
+ */
97
+ v2 = temp_tcgv_vec(arg_temp(a2));
98
+ vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0),
99
+ tcgv_vec_arg(v1), tcgv_vec_arg(v2));
100
+ break;
101
+
102
+ case INDEX_op_shrv_vec:
103
+ case INDEX_op_sarv_vec:
104
+ /* Right shifts are negative left shifts for NEON. */
105
+ v2 = temp_tcgv_vec(arg_temp(a2));
106
+ t1 = tcg_temp_new_vec(type);
107
+ tcg_gen_neg_vec(vece, t1, v2);
108
+ if (opc == INDEX_op_shrv_vec) {
109
+ opc = INDEX_op_arm_ushl_vec;
110
+ } else {
111
+ opc = INDEX_op_arm_sshl_vec;
112
+ }
113
+ vec_gen_3(opc, type, vece, tcgv_vec_arg(v0),
114
+ tcgv_vec_arg(v1), tcgv_vec_arg(t1));
115
+ tcg_temp_free_vec(t1);
116
+ break;
117
+
118
+ default:
119
+ g_assert_not_reached();
120
+ }
121
}
36
}
122
37
123
static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
124
--
38
--
125
2.25.1
39
2.25.1
126
40
127
41
diff view generated by jsdifflib
1
Change the return value to bool, because that's what is should
1
GDB single-stepping is now handled generically, which means
2
have been from the start. Pass the ct mask instead of the whole
2
we don't need to do anything in the wrappers.
3
TCGArgConstraint, as that's the only part that's relevant.
4
3
5
Change the value argument to int64_t. We will need the extra
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
width for 32-bit hosts wanting to match vector constants.
7
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
6
---
11
tcg/tcg.c | 5 ++---
7
target/riscv/translate.c | 27 +------------------
12
tcg/aarch64/tcg-target.c.inc | 5 +----
8
.../riscv/insn_trans/trans_privileged.c.inc | 4 +--
13
tcg/arm/tcg-target.c.inc | 5 +----
9
target/riscv/insn_trans/trans_rvi.c.inc | 8 +++---
14
tcg/i386/tcg-target.c.inc | 4 +---
10
target/riscv/insn_trans/trans_rvv.c.inc | 2 +-
15
tcg/mips/tcg-target.c.inc | 5 +----
11
4 files changed, 7 insertions(+), 34 deletions(-)
16
tcg/ppc/tcg-target.c.inc | 4 +---
17
tcg/riscv/tcg-target.c.inc | 4 +---
18
tcg/s390/tcg-target.c.inc | 5 +----
19
tcg/sparc/tcg-target.c.inc | 5 +----
20
tcg/tci/tcg-target.c.inc | 6 ++----
21
10 files changed, 12 insertions(+), 36 deletions(-)
22
12
23
diff --git a/tcg/tcg.c b/tcg/tcg.c
13
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
24
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
25
--- a/tcg/tcg.c
15
--- a/target/riscv/translate.c
26
+++ b/tcg/tcg.c
16
+++ b/target/riscv/translate.c
27
@@ -XXX,XX +XXX,XX @@ static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
17
@@ -XXX,XX +XXX,XX @@ static void generate_exception_mtval(DisasContext *ctx, int excp)
28
static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
18
ctx->base.is_jmp = DISAS_NORETURN;
29
TCGReg base, intptr_t ofs);
19
}
30
static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target);
20
31
-static int tcg_target_const_match(tcg_target_long val, TCGType type,
21
-static void gen_exception_debug(void)
32
- const TCGArgConstraint *arg_ct);
22
-{
33
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct);
23
- gen_helper_raise_exception(cpu_env, tcg_constant_i32(EXCP_DEBUG));
34
#ifdef TCG_TARGET_NEED_LDST_LABELS
24
-}
35
static int tcg_out_ldst_finalize(TCGContext *s);
25
-
36
#endif
26
-/* Wrapper around tcg_gen_exit_tb that handles single stepping */
37
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
27
-static void exit_tb(DisasContext *ctx)
38
ts = arg_temp(arg);
28
-{
39
29
- if (ctx->base.singlestep_enabled) {
40
if (ts->val_type == TEMP_VAL_CONST
30
- gen_exception_debug();
41
- && tcg_target_const_match(ts->val, ts->type, arg_ct)) {
31
- } else {
42
+ && tcg_target_const_match(ts->val, ts->type, arg_ct->ct)) {
32
- tcg_gen_exit_tb(NULL, 0);
43
/* constant is OK for instruction */
33
- }
44
const_args[i] = 1;
34
-}
45
new_args[i] = ts->val;
35
-
46
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
36
-/* Wrapper around tcg_gen_lookup_and_goto_ptr that handles single stepping */
47
index XXXXXXX..XXXXXXX 100644
37
-static void lookup_and_goto_ptr(DisasContext *ctx)
48
--- a/tcg/aarch64/tcg-target.c.inc
38
-{
49
+++ b/tcg/aarch64/tcg-target.c.inc
39
- if (ctx->base.singlestep_enabled) {
50
@@ -XXX,XX +XXX,XX @@ static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8)
40
- gen_exception_debug();
41
- } else {
42
- tcg_gen_lookup_and_goto_ptr();
43
- }
44
-}
45
-
46
static void gen_exception_illegal(DisasContext *ctx)
47
{
48
generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST);
49
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
50
tcg_gen_exit_tb(ctx->base.tb, n);
51
} else {
52
tcg_gen_movi_tl(cpu_pc, dest);
53
- lookup_and_goto_ptr(ctx);
54
+ tcg_gen_lookup_and_goto_ptr();
51
}
55
}
52
}
56
}
53
57
54
-static int tcg_target_const_match(tcg_target_long val, TCGType type,
58
diff --git a/target/riscv/insn_trans/trans_privileged.c.inc b/target/riscv/insn_trans/trans_privileged.c.inc
55
- const TCGArgConstraint *arg_ct)
59
index XXXXXXX..XXXXXXX 100644
56
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
60
--- a/target/riscv/insn_trans/trans_privileged.c.inc
61
+++ b/target/riscv/insn_trans/trans_privileged.c.inc
62
@@ -XXX,XX +XXX,XX @@ static bool trans_sret(DisasContext *ctx, arg_sret *a)
63
64
if (has_ext(ctx, RVS)) {
65
gen_helper_sret(cpu_pc, cpu_env, cpu_pc);
66
- exit_tb(ctx); /* no chaining */
67
+ tcg_gen_exit_tb(NULL, 0); /* no chaining */
68
ctx->base.is_jmp = DISAS_NORETURN;
69
} else {
70
return false;
71
@@ -XXX,XX +XXX,XX @@ static bool trans_mret(DisasContext *ctx, arg_mret *a)
72
#ifndef CONFIG_USER_ONLY
73
tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
74
gen_helper_mret(cpu_pc, cpu_env, cpu_pc);
75
- exit_tb(ctx); /* no chaining */
76
+ tcg_gen_exit_tb(NULL, 0); /* no chaining */
77
ctx->base.is_jmp = DISAS_NORETURN;
78
return true;
79
#else
80
diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc
81
index XXXXXXX..XXXXXXX 100644
82
--- a/target/riscv/insn_trans/trans_rvi.c.inc
83
+++ b/target/riscv/insn_trans/trans_rvi.c.inc
84
@@ -XXX,XX +XXX,XX @@ static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
85
if (a->rd != 0) {
86
tcg_gen_movi_tl(cpu_gpr[a->rd], ctx->pc_succ_insn);
87
}
88
-
89
- /* No chaining with JALR. */
90
- lookup_and_goto_ptr(ctx);
91
+ tcg_gen_lookup_and_goto_ptr();
92
93
if (misaligned) {
94
gen_set_label(misaligned);
95
@@ -XXX,XX +XXX,XX @@ static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a)
96
* however we need to end the translation block
97
*/
98
tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
99
- exit_tb(ctx);
100
+ tcg_gen_exit_tb(NULL, 0);
101
ctx->base.is_jmp = DISAS_NORETURN;
102
return true;
103
}
104
@@ -XXX,XX +XXX,XX @@ static bool do_csr_post(DisasContext *ctx)
57
{
105
{
58
- int ct = arg_ct->ct;
106
/* We may have changed important cpu state -- exit to main loop. */
59
-
107
tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
60
if (ct & TCG_CT_CONST) {
108
- exit_tb(ctx);
61
return 1;
109
+ tcg_gen_exit_tb(NULL, 0);
62
}
110
ctx->base.is_jmp = DISAS_NORETURN;
63
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
111
return true;
112
}
113
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
64
index XXXXXXX..XXXXXXX 100644
114
index XXXXXXX..XXXXXXX 100644
65
--- a/tcg/arm/tcg-target.c.inc
115
--- a/target/riscv/insn_trans/trans_rvv.c.inc
66
+++ b/tcg/arm/tcg-target.c.inc
116
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
67
@@ -XXX,XX +XXX,XX @@ static inline int check_fit_imm(uint32_t imm)
117
@@ -XXX,XX +XXX,XX @@ static bool trans_vsetvl(DisasContext *ctx, arg_vsetvl *a)
68
* mov operand2: values represented with x << (2 * y), x < 0x100
118
gen_set_gpr(ctx, a->rd, dst);
69
* add, sub, eor...: ditto
119
70
*/
120
tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
71
-static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
121
- lookup_and_goto_ptr(ctx);
72
- const TCGArgConstraint *arg_ct)
122
+ tcg_gen_lookup_and_goto_ptr();
73
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
123
ctx->base.is_jmp = DISAS_NORETURN;
74
{
124
return true;
75
- int ct;
76
- ct = arg_ct->ct;
77
if (ct & TCG_CT_CONST) {
78
return 1;
79
} else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val)) {
80
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
81
index XXXXXXX..XXXXXXX 100644
82
--- a/tcg/i386/tcg-target.c.inc
83
+++ b/tcg/i386/tcg-target.c.inc
84
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
85
}
125
}
86
87
/* test if a constant matches the constraint */
88
-static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
89
- const TCGArgConstraint *arg_ct)
90
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
91
{
92
- int ct = arg_ct->ct;
93
if (ct & TCG_CT_CONST) {
94
return 1;
95
}
96
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
97
index XXXXXXX..XXXXXXX 100644
98
--- a/tcg/mips/tcg-target.c.inc
99
+++ b/tcg/mips/tcg-target.c.inc
100
@@ -XXX,XX +XXX,XX @@ static inline bool is_p2m1(tcg_target_long val)
101
}
102
103
/* test if a constant matches the constraint */
104
-static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
105
- const TCGArgConstraint *arg_ct)
106
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
107
{
108
- int ct;
109
- ct = arg_ct->ct;
110
if (ct & TCG_CT_CONST) {
111
return 1;
112
} else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
113
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
114
index XXXXXXX..XXXXXXX 100644
115
--- a/tcg/ppc/tcg-target.c.inc
116
+++ b/tcg/ppc/tcg-target.c.inc
117
@@ -XXX,XX +XXX,XX @@ static bool reloc_pc14(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
118
}
119
120
/* test if a constant matches the constraint */
121
-static int tcg_target_const_match(tcg_target_long val, TCGType type,
122
- const TCGArgConstraint *arg_ct)
123
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
124
{
125
- int ct = arg_ct->ct;
126
if (ct & TCG_CT_CONST) {
127
return 1;
128
}
129
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
130
index XXXXXXX..XXXXXXX 100644
131
--- a/tcg/riscv/tcg-target.c.inc
132
+++ b/tcg/riscv/tcg-target.c.inc
133
@@ -XXX,XX +XXX,XX @@ static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len)
134
}
135
136
/* test if a constant matches the constraint */
137
-static int tcg_target_const_match(tcg_target_long val, TCGType type,
138
- const TCGArgConstraint *arg_ct)
139
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
140
{
141
- int ct = arg_ct->ct;
142
if (ct & TCG_CT_CONST) {
143
return 1;
144
}
145
diff --git a/tcg/s390/tcg-target.c.inc b/tcg/s390/tcg-target.c.inc
146
index XXXXXXX..XXXXXXX 100644
147
--- a/tcg/s390/tcg-target.c.inc
148
+++ b/tcg/s390/tcg-target.c.inc
149
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *src_rw, int type,
150
}
151
152
/* Test if a constant matches the constraint. */
153
-static int tcg_target_const_match(tcg_target_long val, TCGType type,
154
- const TCGArgConstraint *arg_ct)
155
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
156
{
157
- int ct = arg_ct->ct;
158
-
159
if (ct & TCG_CT_CONST) {
160
return 1;
161
}
162
diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc
163
index XXXXXXX..XXXXXXX 100644
164
--- a/tcg/sparc/tcg-target.c.inc
165
+++ b/tcg/sparc/tcg-target.c.inc
166
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *src_rw, int type,
167
}
168
169
/* test if a constant matches the constraint */
170
-static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
171
- const TCGArgConstraint *arg_ct)
172
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
173
{
174
- int ct = arg_ct->ct;
175
-
176
if (ct & TCG_CT_CONST) {
177
return 1;
178
}
179
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
180
index XXXXXXX..XXXXXXX 100644
181
--- a/tcg/tci/tcg-target.c.inc
182
+++ b/tcg/tci/tcg-target.c.inc
183
@@ -XXX,XX +XXX,XX @@ static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
184
}
185
186
/* Test if a constant matches the constraint. */
187
-static int tcg_target_const_match(tcg_target_long val, TCGType type,
188
- const TCGArgConstraint *arg_ct)
189
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
190
{
191
- /* No need to return 0 or 1, 0 or != 0 is good enough. */
192
- return arg_ct->ct & TCG_CT_CONST;
193
+ return ct & TCG_CT_CONST;
194
}
195
196
static void tcg_target_init(TCGContext *s)
197
--
126
--
198
2.25.1
127
2.25.1
199
128
200
129
diff view generated by jsdifflib
1
This consists of the three immediate shifts: shli, shri, sari.
1
GDB single-stepping is now handled generically.
2
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
5
---
6
tcg/arm/tcg-target.h | 2 +-
6
target/rx/helper.h | 1 -
7
tcg/arm/tcg-target.c.inc | 27 +++++++++++++++++++++++++++
7
target/rx/op_helper.c | 8 --------
8
2 files changed, 28 insertions(+), 1 deletion(-)
8
target/rx/translate.c | 12 ++----------
9
3 files changed, 2 insertions(+), 19 deletions(-)
9
10
10
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
11
diff --git a/target/rx/helper.h b/target/rx/helper.h
11
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/arm/tcg-target.h
13
--- a/target/rx/helper.h
13
+++ b/tcg/arm/tcg-target.h
14
+++ b/target/rx/helper.h
14
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
15
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_1(raise_illegal_instruction, noreturn, env)
15
#define TCG_TARGET_HAS_roti_vec 0
16
DEF_HELPER_1(raise_access_fault, noreturn, env)
16
#define TCG_TARGET_HAS_rots_vec 0
17
DEF_HELPER_1(raise_privilege_violation, noreturn, env)
17
#define TCG_TARGET_HAS_rotv_vec 0
18
DEF_HELPER_1(wait, noreturn, env)
18
-#define TCG_TARGET_HAS_shi_vec 0
19
-DEF_HELPER_1(debug, noreturn, env)
19
+#define TCG_TARGET_HAS_shi_vec 1
20
DEF_HELPER_2(rxint, noreturn, env, i32)
20
#define TCG_TARGET_HAS_shs_vec 0
21
DEF_HELPER_1(rxbrk, noreturn, env)
21
#define TCG_TARGET_HAS_shv_vec 0
22
DEF_HELPER_FLAGS_3(fadd, TCG_CALL_NO_WG, f32, env, f32, f32)
22
#define TCG_TARGET_HAS_mul_vec 0
23
diff --git a/target/rx/op_helper.c b/target/rx/op_helper.c
23
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
24
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
25
--- a/tcg/arm/tcg-target.c.inc
25
--- a/target/rx/op_helper.c
26
+++ b/tcg/arm/tcg-target.c.inc
26
+++ b/target/rx/op_helper.c
27
@@ -XXX,XX +XXX,XX @@ typedef enum {
27
@@ -XXX,XX +XXX,XX @@ void QEMU_NORETURN helper_wait(CPURXState *env)
28
INSN_VCGE_U = 0xf3000310,
28
raise_exception(env, EXCP_HLT, 0);
29
INSN_VCGT_U = 0xf3000300,
30
31
+ INSN_VSHLI = 0xf2800510, /* VSHL (immediate) */
32
+ INSN_VSARI = 0xf2800010, /* VSHR.S */
33
+ INSN_VSHRI = 0xf3800010, /* VSHR.U */
34
+
35
INSN_VTST = 0xf2000810,
36
37
INSN_VDUP_G = 0xee800b10, /* VDUP (ARM core register) */
38
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vmovi(TCGContext *s, TCGReg rd,
39
| (extract32(imm8, 7, 1) << 24));
40
}
29
}
41
30
42
+static void tcg_out_vshifti(TCGContext *s, ARMInsn insn, int q,
31
-void QEMU_NORETURN helper_debug(CPURXState *env)
43
+ TCGReg rd, TCGReg rm, int l_imm6)
32
-{
44
+{
33
- CPUState *cs = env_cpu(env);
45
+ tcg_out32(s, insn | (q << 6) | encode_vd(rd) | encode_vm(rm) |
34
-
46
+ (extract32(l_imm6, 6, 1) << 7) |
35
- cs->exception_index = EXCP_DEBUG;
47
+ (extract32(l_imm6, 0, 6) << 16));
36
- cpu_loop_exit(cs);
48
+}
37
-}
49
+
38
-
50
static void tcg_out_vldst(TCGContext *s, ARMInsn insn,
39
void QEMU_NORETURN helper_rxint(CPURXState *env, uint32_t vec)
51
TCGReg rd, TCGReg rn, int offset)
52
{
40
{
53
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
41
raise_exception(env, 0x100 + vec, 0);
54
case INDEX_op_abs_vec:
42
diff --git a/target/rx/translate.c b/target/rx/translate.c
55
case INDEX_op_neg_vec:
43
index XXXXXXX..XXXXXXX 100644
56
case INDEX_op_not_vec:
44
--- a/target/rx/translate.c
57
+ case INDEX_op_shli_vec:
45
+++ b/target/rx/translate.c
58
+ case INDEX_op_shri_vec:
46
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
59
+ case INDEX_op_sari_vec:
47
tcg_gen_exit_tb(dc->base.tb, n);
60
return C_O1_I1(w, w);
48
} else {
61
case INDEX_op_dup2_vec:
49
tcg_gen_movi_i32(cpu_pc, dest);
62
case INDEX_op_add_vec:
50
- if (dc->base.singlestep_enabled) {
63
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
51
- gen_helper_debug(cpu_env);
64
case INDEX_op_xor_vec:
52
- } else {
65
tcg_out_vreg3(s, INSN_VEOR, q, 0, a0, a1, a2);
53
- tcg_gen_lookup_and_goto_ptr();
66
return;
54
- }
67
+ case INDEX_op_shli_vec:
55
+ tcg_gen_lookup_and_goto_ptr();
68
+ tcg_out_vshifti(s, INSN_VSHLI, q, a0, a1, a2 + (8 << vece));
56
}
69
+ return;
57
dc->base.is_jmp = DISAS_NORETURN;
70
+ case INDEX_op_shri_vec:
58
}
71
+ tcg_out_vshifti(s, INSN_VSHRI, q, a0, a1, (16 << vece) - a2);
59
@@ -XXX,XX +XXX,XX @@ static void rx_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
72
+ return;
60
gen_goto_tb(ctx, 0, dcbase->pc_next);
73
+ case INDEX_op_sari_vec:
61
break;
74
+ tcg_out_vshifti(s, INSN_VSARI, q, a0, a1, (16 << vece) - a2);
62
case DISAS_JUMP:
75
+ return;
63
- if (ctx->base.singlestep_enabled) {
76
64
- gen_helper_debug(cpu_env);
77
case INDEX_op_andc_vec:
65
- } else {
78
if (!const_args[2]) {
66
- tcg_gen_lookup_and_goto_ptr();
79
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
67
- }
80
case INDEX_op_orc_vec:
68
+ tcg_gen_lookup_and_goto_ptr();
81
case INDEX_op_xor_vec:
69
break;
82
case INDEX_op_not_vec:
70
case DISAS_UPDATE:
83
+ case INDEX_op_shli_vec:
71
tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
84
+ case INDEX_op_shri_vec:
85
+ case INDEX_op_sari_vec:
86
return 1;
87
case INDEX_op_abs_vec:
88
case INDEX_op_cmp_vec:
89
--
72
--
90
2.25.1
73
2.25.1
91
74
92
75
diff view generated by jsdifflib
1
Most of dupi is copied from tcg/aarch64, which has the same
1
GDB single-stepping is now handled generically.
2
encoding for AdvSimdExpandImm.
3
2
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
4
---
7
tcg/arm/tcg-target.c.inc | 283 +++++++++++++++++++++++++++++++++++++--
5
target/s390x/tcg/translate.c | 8 ++------
8
1 file changed, 275 insertions(+), 8 deletions(-)
6
1 file changed, 2 insertions(+), 6 deletions(-)
9
7
10
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
8
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
11
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/arm/tcg-target.c.inc
10
--- a/target/s390x/tcg/translate.c
13
+++ b/tcg/arm/tcg-target.c.inc
11
+++ b/target/s390x/tcg/translate.c
14
@@ -XXX,XX +XXX,XX @@ typedef enum {
12
@@ -XXX,XX +XXX,XX @@ struct DisasContext {
15
13
uint64_t pc_tmp;
16
INSN_VORR = 0xf2200110,
14
uint32_t ilen;
17
15
enum cc_op cc_op;
18
+ INSN_VDUP_G = 0xee800b10, /* VDUP (ARM core register) */
16
- bool do_debug;
19
+ INSN_VDUP_S = 0xf3b00c00, /* VDUP (scalar) */
20
+ INSN_VLDR_D = 0xed100b00, /* VLDR.64 */
21
INSN_VLD1 = 0xf4200000, /* VLD1 (multiple single elements) */
22
+ INSN_VLD1R = 0xf4a00c00, /* VLD1 (single element to all lanes) */
23
INSN_VST1 = 0xf4000000, /* VST1 (multiple single elements) */
24
+ INSN_VMOVI = 0xf2800010, /* VMOV (immediate) */
25
} ARMInsn;
26
27
#define INSN_NOP (use_armv7_instructions ? INSN_NOP_v6k : INSN_NOP_v4)
28
@@ -XXX,XX +XXX,XX @@ static const uint8_t tcg_cond_to_arm_cond[] = {
29
[TCG_COND_GTU] = COND_HI,
30
};
17
};
31
18
32
+static int encode_imm(uint32_t imm);
19
/* Information carried about a condition to be evaluated. */
33
+
20
@@ -XXX,XX +XXX,XX @@ static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
34
+/* TCG private relocation type: add with pc+imm8 */
21
35
+#define R_ARM_PC8 11
22
dc->cc_op = CC_OP_DYNAMIC;
36
+
23
dc->ex_value = dc->base.tb->cs_base;
37
+/* TCG private relocation type: vldr with imm8 << 2 */
24
- dc->do_debug = dc->base.singlestep_enabled;
38
+#define R_ARM_PC11 12
39
+
40
static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
41
{
42
const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
43
@@ -XXX,XX +XXX,XX @@ static bool reloc_pc13(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
44
return false;
45
}
25
}
46
26
47
+static bool reloc_pc11(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
27
static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
48
+{
28
@@ -XXX,XX +XXX,XX @@ static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
49
+ const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
29
/* FALLTHRU */
50
+ ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) / 4;
30
case DISAS_PC_CC_UPDATED:
51
+
31
/* Exit the TB, either by raising a debug exception or by return. */
52
+ if (offset >= -0xff && offset <= 0xff) {
32
- if (dc->do_debug) {
53
+ tcg_insn_unit insn = *src_rw;
33
- gen_exception(EXCP_DEBUG);
54
+ bool u = (offset >= 0);
34
- } else if ((dc->base.tb->flags & FLAG_MASK_PER) ||
55
+ if (!u) {
35
- dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) {
56
+ offset = -offset;
36
+ if ((dc->base.tb->flags & FLAG_MASK_PER) ||
57
+ }
37
+ dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) {
58
+ insn = deposit32(insn, 23, 1, u);
38
tcg_gen_exit_tb(NULL, 0);
59
+ insn = deposit32(insn, 0, 8, offset);
39
} else {
60
+ *src_rw = insn;
40
tcg_gen_lookup_and_goto_ptr();
61
+ return true;
62
+ }
63
+ return false;
64
+}
65
+
66
+static bool reloc_pc8(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
67
+{
68
+ const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
69
+ ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8;
70
+ int rot = encode_imm(offset);
71
+
72
+ if (rot >= 0) {
73
+ *src_rw = deposit32(*src_rw, 0, 12, rol32(offset, rot) | (rot << 7));
74
+ return true;
75
+ }
76
+ return false;
77
+}
78
+
79
static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
80
intptr_t value, intptr_t addend)
81
{
82
tcg_debug_assert(addend == 0);
83
-
84
- if (type == R_ARM_PC24) {
85
+ switch (type) {
86
+ case R_ARM_PC24:
87
return reloc_pc24(code_ptr, (const tcg_insn_unit *)value);
88
- } else if (type == R_ARM_PC13) {
89
+ case R_ARM_PC13:
90
return reloc_pc13(code_ptr, (const tcg_insn_unit *)value);
91
- } else {
92
+ case R_ARM_PC11:
93
+ return reloc_pc11(code_ptr, (const tcg_insn_unit *)value);
94
+ case R_ARM_PC8:
95
+ return reloc_pc8(code_ptr, (const tcg_insn_unit *)value);
96
+ default:
97
g_assert_not_reached();
98
}
99
}
100
@@ -XXX,XX +XXX,XX @@ static inline uint32_t rotl(uint32_t val, int n)
101
102
/* ARM immediates for ALU instructions are made of an unsigned 8-bit
103
right-rotated by an even amount between 0 and 30. */
104
-static inline int encode_imm(uint32_t imm)
105
+static int encode_imm(uint32_t imm)
106
{
107
int shift;
108
109
@@ -XXX,XX +XXX,XX @@ static inline int check_fit_imm(uint32_t imm)
110
return encode_imm(imm) >= 0;
111
}
112
113
+/* Return true if v16 is a valid 16-bit shifted immediate. */
114
+static bool is_shimm16(uint16_t v16, int *cmode, int *imm8)
115
+{
116
+ if (v16 == (v16 & 0xff)) {
117
+ *cmode = 0x8;
118
+ *imm8 = v16 & 0xff;
119
+ return true;
120
+ } else if (v16 == (v16 & 0xff00)) {
121
+ *cmode = 0xa;
122
+ *imm8 = v16 >> 8;
123
+ return true;
124
+ }
125
+ return false;
126
+}
127
+
128
+/* Return true if v32 is a valid 32-bit shifted immediate. */
129
+static bool is_shimm32(uint32_t v32, int *cmode, int *imm8)
130
+{
131
+ if (v32 == (v32 & 0xff)) {
132
+ *cmode = 0x0;
133
+ *imm8 = v32 & 0xff;
134
+ return true;
135
+ } else if (v32 == (v32 & 0xff00)) {
136
+ *cmode = 0x2;
137
+ *imm8 = (v32 >> 8) & 0xff;
138
+ return true;
139
+ } else if (v32 == (v32 & 0xff0000)) {
140
+ *cmode = 0x4;
141
+ *imm8 = (v32 >> 16) & 0xff;
142
+ return true;
143
+ } else if (v32 == (v32 & 0xff000000)) {
144
+ *cmode = 0x6;
145
+ *imm8 = v32 >> 24;
146
+ return true;
147
+ }
148
+ return false;
149
+}
150
+
151
+/* Return true if v32 is a valid 32-bit shifting ones immediate. */
152
+static bool is_soimm32(uint32_t v32, int *cmode, int *imm8)
153
+{
154
+ if ((v32 & 0xffff00ff) == 0xff) {
155
+ *cmode = 0xc;
156
+ *imm8 = (v32 >> 8) & 0xff;
157
+ return true;
158
+ } else if ((v32 & 0xff00ffff) == 0xffff) {
159
+ *cmode = 0xd;
160
+ *imm8 = (v32 >> 16) & 0xff;
161
+ return true;
162
+ }
163
+ return false;
164
+}
165
+
166
+/*
167
+ * Return non-zero if v32 can be formed by MOVI+ORR.
168
+ * Place the parameters for MOVI in (cmode, imm8).
169
+ * Return the cmode for ORR; the imm8 can be had via extraction from v32.
170
+ */
171
+static int is_shimm32_pair(uint32_t v32, int *cmode, int *imm8)
172
+{
173
+ int i;
174
+
175
+ for (i = 6; i > 0; i -= 2) {
176
+ /* Mask out one byte we can add with ORR. */
177
+ uint32_t tmp = v32 & ~(0xffu << (i * 4));
178
+ if (is_shimm32(tmp, cmode, imm8) ||
179
+ is_soimm32(tmp, cmode, imm8)) {
180
+ break;
181
+ }
182
+ }
183
+ return i;
184
+}
185
+
186
/* Test if a constant matches the constraint.
187
* TODO: define constraints for:
188
*
189
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vreg3(TCGContext *s, ARMInsn insn, int q, int vece,
190
encode_vd(d) | encode_vn(n) | encode_vm(m));
191
}
192
193
+static void tcg_out_vmovi(TCGContext *s, TCGReg rd,
194
+ int q, int op, int cmode, uint8_t imm8)
195
+{
196
+ tcg_out32(s, INSN_VMOVI | encode_vd(rd) | (q << 6) | (op << 5)
197
+ | (cmode << 8) | extract32(imm8, 0, 4)
198
+ | (extract32(imm8, 4, 3) << 16)
199
+ | (extract32(imm8, 7, 1) << 24));
200
+}
201
+
202
static void tcg_out_vldst(TCGContext *s, ARMInsn insn,
203
TCGReg rd, TCGReg rn, int offset)
204
{
205
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type,
206
tcg_out_movi32(s, COND_AL, ret, arg);
207
}
208
209
+/* Type is always V128, with I64 elements. */
210
+static void tcg_out_dup2_vec(TCGContext *s, TCGReg rd, TCGReg rl, TCGReg rh)
211
+{
212
+ /* Move high element into place first. */
213
+ /* VMOV Dd+1, Ds */
214
+ tcg_out_vreg3(s, INSN_VORR | (1 << 12), 0, 0, rd, rh, rh);
215
+ /* Move low element into place; tcg_out_mov will check for nop. */
216
+ tcg_out_mov(s, TCG_TYPE_V64, rd, rl);
217
+}
218
+
219
static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
220
TCGReg rd, TCGReg rs)
221
{
222
- g_assert_not_reached();
223
+ int q = type - TCG_TYPE_V64;
224
+
225
+ if (vece == MO_64) {
226
+ if (type == TCG_TYPE_V128) {
227
+ tcg_out_dup2_vec(s, rd, rs, rs);
228
+ } else {
229
+ tcg_out_mov(s, TCG_TYPE_V64, rd, rs);
230
+ }
231
+ } else if (rs < TCG_REG_Q0) {
232
+ int b = (vece == MO_8);
233
+ int e = (vece == MO_16);
234
+ tcg_out32(s, INSN_VDUP_G | (b << 22) | (q << 21) | (e << 5) |
235
+ encode_vn(rd) | (rs << 12));
236
+ } else {
237
+ int imm4 = 1 << vece;
238
+ tcg_out32(s, INSN_VDUP_S | (imm4 << 16) | (q << 6) |
239
+ encode_vd(rd) | encode_vm(rs));
240
+ }
241
+ return true;
242
}
243
244
static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
245
TCGReg rd, TCGReg base, intptr_t offset)
246
{
247
- g_assert_not_reached();
248
+ if (vece == MO_64) {
249
+ tcg_out_ld(s, TCG_TYPE_V64, rd, base, offset);
250
+ if (type == TCG_TYPE_V128) {
251
+ tcg_out_dup2_vec(s, rd, rd, rd);
252
+ }
253
+ } else {
254
+ int q = type - TCG_TYPE_V64;
255
+ tcg_out_vldst(s, INSN_VLD1R | (vece << 6) | (q << 5),
256
+ rd, base, offset);
257
+ }
258
+ return true;
259
}
260
261
static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
262
TCGReg rd, int64_t v64)
263
{
264
- g_assert_not_reached();
265
+ int q = type - TCG_TYPE_V64;
266
+ int cmode, imm8, i;
267
+
268
+ /* Test all bytes equal first. */
269
+ if (vece == MO_8) {
270
+ tcg_out_vmovi(s, rd, q, 0, 0xe, v64);
271
+ return;
272
+ }
273
+
274
+ /*
275
+ * Test all bytes 0x00 or 0xff second. This can match cases that
276
+ * might otherwise take 2 or 3 insns for MO_16 or MO_32 below.
277
+ */
278
+ for (i = imm8 = 0; i < 8; i++) {
279
+ uint8_t byte = v64 >> (i * 8);
280
+ if (byte == 0xff) {
281
+ imm8 |= 1 << i;
282
+ } else if (byte != 0) {
283
+ goto fail_bytes;
284
+ }
285
+ }
286
+ tcg_out_vmovi(s, rd, q, 1, 0xe, imm8);
287
+ return;
288
+ fail_bytes:
289
+
290
+ /*
291
+ * Tests for various replications. For each element width, if we
292
+ * cannot find an expansion there's no point checking a larger
293
+ * width because we already know by replication it cannot match.
294
+ */
295
+ if (vece == MO_16) {
296
+ uint16_t v16 = v64;
297
+
298
+ if (is_shimm16(v16, &cmode, &imm8)) {
299
+ tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
300
+ return;
301
+ }
302
+ if (is_shimm16(~v16, &cmode, &imm8)) {
303
+ tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
304
+ return;
305
+ }
306
+
307
+ /*
308
+ * Otherwise, all remaining constants can be loaded in two insns:
309
+ * rd = v16 & 0xff, rd |= v16 & 0xff00.
310
+ */
311
+ tcg_out_vmovi(s, rd, q, 0, 0x8, v16 & 0xff);
312
+ tcg_out_vmovi(s, rd, q, 0, 0xb, v16 >> 8); /* VORRI */
313
+ return;
314
+ }
315
+
316
+ if (vece == MO_32) {
317
+ uint32_t v32 = v64;
318
+
319
+ if (is_shimm32(v32, &cmode, &imm8) ||
320
+ is_soimm32(v32, &cmode, &imm8)) {
321
+ tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
322
+ return;
323
+ }
324
+ if (is_shimm32(~v32, &cmode, &imm8) ||
325
+ is_soimm32(~v32, &cmode, &imm8)) {
326
+ tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
327
+ return;
328
+ }
329
+
330
+ /*
331
+ * Restrict the set of constants to those we can load with
332
+ * two instructions. Others we load from the pool.
333
+ */
334
+ i = is_shimm32_pair(v32, &cmode, &imm8);
335
+ if (i) {
336
+ tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
337
+ tcg_out_vmovi(s, rd, q, 0, i | 1, extract32(v32, i * 4, 8));
338
+ return;
339
+ }
340
+ i = is_shimm32_pair(~v32, &cmode, &imm8);
341
+ if (i) {
342
+ tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
343
+ tcg_out_vmovi(s, rd, q, 1, i | 1, extract32(~v32, i * 4, 8));
344
+ return;
345
+ }
346
+ }
347
+
348
+ /*
349
+ * As a last resort, load from the constant pool.
350
+ */
351
+ if (!q || vece == MO_64) {
352
+ new_pool_l2(s, R_ARM_PC11, s->code_ptr, 0, v64, v64 >> 32);
353
+ /* VLDR Dd, [pc + offset] */
354
+ tcg_out32(s, INSN_VLDR_D | encode_vd(rd) | (0xf << 16));
355
+ if (q) {
356
+ tcg_out_dup2_vec(s, rd, rd, rd);
357
+ }
358
+ } else {
359
+ new_pool_label(s, (uint32_t)v64, R_ARM_PC8, s->code_ptr, 0);
360
+ /* add tmp, pc, offset */
361
+ tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_TMP, TCG_REG_PC, 0);
362
+ tcg_out_dupm_vec(s, type, MO_32, rd, TCG_REG_TMP, 0);
363
+ }
364
}
365
366
static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
367
--
41
--
368
2.25.1
42
2.25.1
369
43
370
44
diff view generated by jsdifflib
1
These logical and arithmetic operations are optional, but are
1
GDB single-stepping is now handled generically.
2
trivial to accomplish with the existing infrastructure.
3
2
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
tcg/arm/tcg-target-con-set.h | 1 +
6
target/sh4/helper.h | 1 -
8
tcg/arm/tcg-target.h | 10 +++++-----
7
target/sh4/op_helper.c | 5 -----
9
tcg/arm/tcg-target.c.inc | 38 ++++++++++++++++++++++++++++++++++++
8
target/sh4/translate.c | 14 +++-----------
10
3 files changed, 44 insertions(+), 5 deletions(-)
9
3 files changed, 3 insertions(+), 17 deletions(-)
11
10
12
diff --git a/tcg/arm/tcg-target-con-set.h b/tcg/arm/tcg-target-con-set.h
11
diff --git a/target/sh4/helper.h b/target/sh4/helper.h
13
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/arm/tcg-target-con-set.h
13
--- a/target/sh4/helper.h
15
+++ b/tcg/arm/tcg-target-con-set.h
14
+++ b/target/sh4/helper.h
16
@@ -XXX,XX +XXX,XX @@ C_O0_I4(s, s, s, s)
15
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_1(raise_illegal_instruction, noreturn, env)
17
C_O1_I1(r, l)
16
DEF_HELPER_1(raise_slot_illegal_instruction, noreturn, env)
18
C_O1_I1(r, r)
17
DEF_HELPER_1(raise_fpu_disable, noreturn, env)
19
C_O1_I1(w, r)
18
DEF_HELPER_1(raise_slot_fpu_disable, noreturn, env)
20
+C_O1_I1(w, w)
19
-DEF_HELPER_1(debug, noreturn, env)
21
C_O1_I1(w, wr)
20
DEF_HELPER_1(sleep, noreturn, env)
22
C_O1_I2(r, 0, rZ)
21
DEF_HELPER_2(trapa, noreturn, env, i32)
23
C_O1_I2(r, l, l)
22
DEF_HELPER_1(exclusive, noreturn, env)
24
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
23
diff --git a/target/sh4/op_helper.c b/target/sh4/op_helper.c
25
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
26
--- a/tcg/arm/tcg-target.h
25
--- a/target/sh4/op_helper.c
27
+++ b/tcg/arm/tcg-target.h
26
+++ b/target/sh4/op_helper.c
28
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
27
@@ -XXX,XX +XXX,XX @@ void helper_raise_slot_fpu_disable(CPUSH4State *env)
29
#define TCG_TARGET_HAS_v128 use_neon_instructions
28
raise_exception(env, 0x820, 0);
30
#define TCG_TARGET_HAS_v256 0
29
}
31
30
32
-#define TCG_TARGET_HAS_andc_vec 0
31
-void helper_debug(CPUSH4State *env)
33
-#define TCG_TARGET_HAS_orc_vec 0
32
-{
34
-#define TCG_TARGET_HAS_not_vec 0
33
- raise_exception(env, EXCP_DEBUG, 0);
35
-#define TCG_TARGET_HAS_neg_vec 0
34
-}
36
-#define TCG_TARGET_HAS_abs_vec 0
35
-
37
+#define TCG_TARGET_HAS_andc_vec 1
36
void helper_sleep(CPUSH4State *env)
38
+#define TCG_TARGET_HAS_orc_vec 1
37
{
39
+#define TCG_TARGET_HAS_not_vec 1
38
CPUState *cs = env_cpu(env);
40
+#define TCG_TARGET_HAS_neg_vec 1
39
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
41
+#define TCG_TARGET_HAS_abs_vec 1
42
#define TCG_TARGET_HAS_roti_vec 0
43
#define TCG_TARGET_HAS_rots_vec 0
44
#define TCG_TARGET_HAS_rotv_vec 0
45
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
46
index XXXXXXX..XXXXXXX 100644
40
index XXXXXXX..XXXXXXX 100644
47
--- a/tcg/arm/tcg-target.c.inc
41
--- a/target/sh4/translate.c
48
+++ b/tcg/arm/tcg-target.c.inc
42
+++ b/target/sh4/translate.c
49
@@ -XXX,XX +XXX,XX @@ typedef enum {
43
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
50
44
tcg_gen_exit_tb(ctx->base.tb, n);
51
INSN_VADD = 0xf2000800,
45
} else {
52
INSN_VAND = 0xf2000110,
46
tcg_gen_movi_i32(cpu_pc, dest);
53
+ INSN_VBIC = 0xf2100110,
47
- if (ctx->base.singlestep_enabled) {
54
INSN_VEOR = 0xf3000110,
48
- gen_helper_debug(cpu_env);
55
+ INSN_VORN = 0xf2300110,
49
- } else if (use_exit_tb(ctx)) {
56
INSN_VORR = 0xf2200110,
50
+ if (use_exit_tb(ctx)) {
57
INSN_VSUB = 0xf3000800,
51
tcg_gen_exit_tb(NULL, 0);
58
52
} else {
59
+ INSN_VABS = 0xf3b10300,
53
tcg_gen_lookup_and_goto_ptr();
60
INSN_VMVN = 0xf3b00580,
54
@@ -XXX,XX +XXX,XX @@ static void gen_jump(DisasContext * ctx)
61
+ INSN_VNEG = 0xf3b10380,
55
     delayed jump as immediate jump are conditinal jumps */
62
56
    tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
63
INSN_VCEQ0 = 0xf3b10100,
57
tcg_gen_discard_i32(cpu_delayed_pc);
64
INSN_VCGT0 = 0xf3b10000,
58
- if (ctx->base.singlestep_enabled) {
65
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
59
- gen_helper_debug(cpu_env);
66
return C_O1_I1(w, r);
60
- } else if (use_exit_tb(ctx)) {
67
case INDEX_op_dup_vec:
61
+ if (use_exit_tb(ctx)) {
68
return C_O1_I1(w, wr);
62
tcg_gen_exit_tb(NULL, 0);
69
+ case INDEX_op_abs_vec:
63
} else {
70
+ case INDEX_op_neg_vec:
64
tcg_gen_lookup_and_goto_ptr();
71
+ case INDEX_op_not_vec:
65
@@ -XXX,XX +XXX,XX @@ static void sh4_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
72
+ return C_O1_I1(w, w);
66
switch (ctx->base.is_jmp) {
73
case INDEX_op_dup2_vec:
67
case DISAS_STOP:
74
case INDEX_op_add_vec:
68
gen_save_cpu_state(ctx, true);
75
case INDEX_op_sub_vec:
69
- if (ctx->base.singlestep_enabled) {
76
case INDEX_op_xor_vec:
70
- gen_helper_debug(cpu_env);
77
return C_O1_I2(w, w, w);
71
- } else {
78
case INDEX_op_or_vec:
72
- tcg_gen_exit_tb(NULL, 0);
79
+ case INDEX_op_andc_vec:
73
- }
80
return C_O1_I2(w, w, wO);
74
+ tcg_gen_exit_tb(NULL, 0);
81
case INDEX_op_and_vec:
75
break;
82
+ case INDEX_op_orc_vec:
76
case DISAS_NEXT:
83
return C_O1_I2(w, w, wV);
77
case DISAS_TOO_MANY:
84
case INDEX_op_cmp_vec:
85
return C_O1_I2(w, w, wZ);
86
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
87
case INDEX_op_dup2_vec:
88
tcg_out_dup2_vec(s, a0, a1, a2);
89
return;
90
+ case INDEX_op_abs_vec:
91
+ tcg_out_vreg2(s, INSN_VABS, q, vece, a0, a1);
92
+ return;
93
+ case INDEX_op_neg_vec:
94
+ tcg_out_vreg2(s, INSN_VNEG, q, vece, a0, a1);
95
+ return;
96
+ case INDEX_op_not_vec:
97
+ tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a1);
98
+ return;
99
case INDEX_op_add_vec:
100
tcg_out_vreg3(s, INSN_VADD, q, vece, a0, a1, a2);
101
return;
102
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
103
tcg_out_vreg3(s, INSN_VEOR, q, 0, a0, a1, a2);
104
return;
105
106
+ case INDEX_op_andc_vec:
107
+ if (!const_args[2]) {
108
+ tcg_out_vreg3(s, INSN_VBIC, q, 0, a0, a1, a2);
109
+ return;
110
+ }
111
+ a2 = ~a2;
112
+ /* fall through */
113
case INDEX_op_and_vec:
114
if (const_args[2]) {
115
is_shimm1632(~a2, &cmode, &imm8);
116
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
117
tcg_out_vreg3(s, INSN_VAND, q, 0, a0, a1, a2);
118
return;
119
120
+ case INDEX_op_orc_vec:
121
+ if (!const_args[2]) {
122
+ tcg_out_vreg3(s, INSN_VORN, q, 0, a0, a1, a2);
123
+ return;
124
+ }
125
+ a2 = ~a2;
126
+ /* fall through */
127
case INDEX_op_or_vec:
128
if (const_args[2]) {
129
is_shimm1632(a2, &cmode, &imm8);
130
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
131
case INDEX_op_add_vec:
132
case INDEX_op_sub_vec:
133
case INDEX_op_and_vec:
134
+ case INDEX_op_andc_vec:
135
case INDEX_op_or_vec:
136
+ case INDEX_op_orc_vec:
137
case INDEX_op_xor_vec:
138
+ case INDEX_op_not_vec:
139
return 1;
140
+ case INDEX_op_abs_vec:
141
case INDEX_op_cmp_vec:
142
+ case INDEX_op_neg_vec:
143
return vece < MO_64;
144
default:
145
return 0;
146
--
78
--
147
2.25.1
79
2.25.1
148
80
149
81
diff view generated by jsdifflib
1
Implementing dup2, add, sub, and, or, xor as the minimal set.
1
GDB single-stepping is now handled generically.
2
This allows us to actually enable neon in the header file.
3
2
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
tcg/arm/tcg-target-con-set.h | 3 +
6
target/tricore/helper.h | 1 -
8
tcg/arm/tcg-target-con-str.h | 2 +
7
target/tricore/op_helper.c | 7 -------
9
tcg/arm/tcg-target.h | 6 +-
8
target/tricore/translate.c | 14 +-------------
10
tcg/arm/tcg-target.c.inc | 201 +++++++++++++++++++++++++++++++++--
9
3 files changed, 1 insertion(+), 21 deletions(-)
11
4 files changed, 204 insertions(+), 8 deletions(-)
12
10
13
diff --git a/tcg/arm/tcg-target-con-set.h b/tcg/arm/tcg-target-con-set.h
11
diff --git a/target/tricore/helper.h b/target/tricore/helper.h
14
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/arm/tcg-target-con-set.h
13
--- a/target/tricore/helper.h
16
+++ b/tcg/arm/tcg-target-con-set.h
14
+++ b/target/tricore/helper.h
17
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, r, rIN)
15
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_2(psw_write, void, env, i32)
18
C_O1_I2(r, r, ri)
16
DEF_HELPER_1(psw_read, i32, env)
19
C_O1_I2(r, rZ, rZ)
17
/* Exceptions */
20
C_O1_I2(w, w, w)
18
DEF_HELPER_3(raise_exception_sync, noreturn, env, i32, i32)
21
+C_O1_I2(w, w, wO)
19
-DEF_HELPER_2(qemu_excp, noreturn, env, i32)
22
+C_O1_I2(w, w, wV)
20
diff --git a/target/tricore/op_helper.c b/target/tricore/op_helper.c
23
+C_O1_I2(w, w, wZ)
24
C_O1_I4(r, r, r, rI, rI)
25
C_O1_I4(r, r, rIN, rIK, 0)
26
C_O2_I1(r, r, l)
27
diff --git a/tcg/arm/tcg-target-con-str.h b/tcg/arm/tcg-target-con-str.h
28
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
29
--- a/tcg/arm/tcg-target-con-str.h
22
--- a/target/tricore/op_helper.c
30
+++ b/tcg/arm/tcg-target-con-str.h
23
+++ b/target/tricore/op_helper.c
31
@@ -XXX,XX +XXX,XX @@ REGS('w', ALL_VECTOR_REGS)
24
@@ -XXX,XX +XXX,XX @@ static void raise_exception_sync_helper(CPUTriCoreState *env, uint32_t class,
32
CONST('I', TCG_CT_CONST_ARM)
25
raise_exception_sync_internal(env, class, tin, pc, 0);
33
CONST('K', TCG_CT_CONST_INV)
26
}
34
CONST('N', TCG_CT_CONST_NEG)
27
35
+CONST('O', TCG_CT_CONST_ORRI)
28
-void helper_qemu_excp(CPUTriCoreState *env, uint32_t excp)
36
+CONST('V', TCG_CT_CONST_ANDI)
29
-{
37
CONST('Z', TCG_CT_CONST_ZERO)
30
- CPUState *cs = env_cpu(env);
38
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
31
- cs->exception_index = excp;
32
- cpu_loop_exit(cs);
33
-}
34
-
35
/* Addressing mode helper */
36
37
static uint16_t reverse16(uint16_t val)
38
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
39
index XXXXXXX..XXXXXXX 100644
39
index XXXXXXX..XXXXXXX 100644
40
--- a/tcg/arm/tcg-target.h
40
--- a/target/tricore/translate.c
41
+++ b/tcg/arm/tcg-target.h
41
+++ b/target/tricore/translate.c
42
@@ -XXX,XX +XXX,XX @@ typedef enum {
42
@@ -XXX,XX +XXX,XX @@ static inline void gen_save_pc(target_ulong pc)
43
#else
43
tcg_gen_movi_tl(cpu_PC, pc);
44
extern bool use_idiv_instructions;
45
#endif
46
-#define use_neon_instructions 0
47
+#ifdef __ARM_NEON__
48
+#define use_neon_instructions 1
49
+#else
50
+extern bool use_neon_instructions;
51
+#endif
52
53
/* used for function call generation */
54
#define TCG_TARGET_STACK_ALIGN        8
55
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
56
index XXXXXXX..XXXXXXX 100644
57
--- a/tcg/arm/tcg-target.c.inc
58
+++ b/tcg/arm/tcg-target.c.inc
59
@@ -XXX,XX +XXX,XX @@ int arm_arch = __ARM_ARCH;
60
#ifndef use_idiv_instructions
61
bool use_idiv_instructions;
62
#endif
63
+#ifndef use_neon_instructions
64
+bool use_neon_instructions;
65
+#endif
66
67
/* ??? Ought to think about changing CONFIG_SOFTMMU to always defined. */
68
#ifdef CONFIG_SOFTMMU
69
@@ -XXX,XX +XXX,XX @@ typedef enum {
70
/* Otherwise the assembler uses mov r0,r0 */
71
INSN_NOP_v4 = (COND_AL << 28) | ARITH_MOV,
72
73
+ INSN_VADD = 0xf2000800,
74
+ INSN_VAND = 0xf2000110,
75
+ INSN_VEOR = 0xf3000110,
76
INSN_VORR = 0xf2200110,
77
+ INSN_VSUB = 0xf3000800,
78
+
79
+ INSN_VMVN = 0xf3b00580,
80
+
81
+ INSN_VCEQ0 = 0xf3b10100,
82
+ INSN_VCGT0 = 0xf3b10000,
83
+ INSN_VCGE0 = 0xf3b10080,
84
+ INSN_VCLE0 = 0xf3b10180,
85
+ INSN_VCLT0 = 0xf3b10200,
86
+
87
+ INSN_VCEQ = 0xf3000810,
88
+ INSN_VCGE = 0xf2000310,
89
+ INSN_VCGT = 0xf2000300,
90
+ INSN_VCGE_U = 0xf3000310,
91
+ INSN_VCGT_U = 0xf3000300,
92
+
93
+ INSN_VTST = 0xf2000810,
94
95
INSN_VDUP_G = 0xee800b10, /* VDUP (ARM core register) */
96
INSN_VDUP_S = 0xf3b00c00, /* VDUP (scalar) */
97
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
98
#define TCG_CT_CONST_INV 0x200
99
#define TCG_CT_CONST_NEG 0x400
100
#define TCG_CT_CONST_ZERO 0x800
101
+#define TCG_CT_CONST_ORRI 0x1000
102
+#define TCG_CT_CONST_ANDI 0x2000
103
104
#define ALL_GENERAL_REGS 0xffffu
105
#define ALL_VECTOR_REGS 0xffff0000u
106
@@ -XXX,XX +XXX,XX @@ static int is_shimm32_pair(uint32_t v32, int *cmode, int *imm8)
107
return i;
108
}
44
}
109
45
110
+/* Return true if V is a valid 16-bit or 32-bit shifted immediate. */
46
-static void generate_qemu_excp(DisasContext *ctx, int excp)
111
+static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8)
47
-{
112
+{
48
- TCGv_i32 tmp = tcg_const_i32(excp);
113
+ if (v32 == deposit32(v32, 16, 16, v32)) {
49
- gen_helper_qemu_excp(cpu_env, tmp);
114
+ return is_shimm16(v32, cmode, imm8);
50
- ctx->base.is_jmp = DISAS_NORETURN;
115
+ } else {
51
- tcg_temp_free(tmp);
116
+ return is_shimm32(v32, cmode, imm8);
52
-}
117
+ }
53
-
118
+}
54
static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
119
+
120
/* Test if a constant matches the constraint.
121
* TODO: define constraints for:
122
*
123
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
124
return 1;
125
} else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
126
return 1;
127
- } else {
128
- return 0;
129
}
130
+
131
+ switch (ct & (TCG_CT_CONST_ORRI | TCG_CT_CONST_ANDI)) {
132
+ case 0:
133
+ break;
134
+ case TCG_CT_CONST_ANDI:
135
+ val = ~val;
136
+ /* fallthru */
137
+ case TCG_CT_CONST_ORRI:
138
+ if (val == deposit64(val, 32, 32, val)) {
139
+ int cmode, imm8;
140
+ return is_shimm1632(val, &cmode, &imm8);
141
+ }
142
+ break;
143
+ default:
144
+ /* Both bits should not be set for the same insn. */
145
+ g_assert_not_reached();
146
+ }
147
+
148
+ return 0;
149
}
150
151
static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset)
152
@@ -XXX,XX +XXX,XX @@ static uint32_t encode_vm(TCGReg rm)
153
return (extract32(rm, 3, 1) << 5) | (extract32(rm, 0, 3) << 1);
154
}
155
156
+static void tcg_out_vreg2(TCGContext *s, ARMInsn insn, int q, int vece,
157
+ TCGReg d, TCGReg m)
158
+{
159
+ tcg_out32(s, insn | (vece << 18) | (q << 6) |
160
+ encode_vd(d) | encode_vm(m));
161
+}
162
+
163
static void tcg_out_vreg3(TCGContext *s, ARMInsn insn, int q, int vece,
164
TCGReg d, TCGReg n, TCGReg m)
165
{
55
{
166
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
56
if (translator_use_goto_tb(&ctx->base, dest)) {
167
case INDEX_op_add_vec:
57
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
168
case INDEX_op_sub_vec:
58
tcg_gen_exit_tb(ctx->base.tb, n);
169
case INDEX_op_xor_vec:
59
} else {
170
- case INDEX_op_or_vec:
60
gen_save_pc(dest);
171
- case INDEX_op_and_vec:
61
- if (ctx->base.singlestep_enabled) {
172
- case INDEX_op_cmp_vec:
62
- generate_qemu_excp(ctx, EXCP_DEBUG);
173
return C_O1_I2(w, w, w);
63
- } else {
174
+ case INDEX_op_or_vec:
64
- tcg_gen_lookup_and_goto_ptr();
175
+ return C_O1_I2(w, w, wO);
65
- }
176
+ case INDEX_op_and_vec:
66
+ tcg_gen_lookup_and_goto_ptr();
177
+ return C_O1_I2(w, w, wV);
178
+ case INDEX_op_cmp_vec:
179
+ return C_O1_I2(w, w, wZ);
180
181
default:
182
g_assert_not_reached();
183
@@ -XXX,XX +XXX,XX @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
184
}
67
}
185
}
68
}
186
69
187
+static const ARMInsn vec_cmp_insn[16] = {
188
+ [TCG_COND_EQ] = INSN_VCEQ,
189
+ [TCG_COND_GT] = INSN_VCGT,
190
+ [TCG_COND_GE] = INSN_VCGE,
191
+ [TCG_COND_GTU] = INSN_VCGT_U,
192
+ [TCG_COND_GEU] = INSN_VCGE_U,
193
+};
194
+
195
+static const ARMInsn vec_cmp0_insn[16] = {
196
+ [TCG_COND_EQ] = INSN_VCEQ0,
197
+ [TCG_COND_GT] = INSN_VCGT0,
198
+ [TCG_COND_GE] = INSN_VCGE0,
199
+ [TCG_COND_LT] = INSN_VCLT0,
200
+ [TCG_COND_LE] = INSN_VCLE0,
201
+};
202
+
203
static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
204
unsigned vecl, unsigned vece,
205
const TCGArg *args, const int *const_args)
206
{
207
- g_assert_not_reached();
208
+ TCGType type = vecl + TCG_TYPE_V64;
209
+ unsigned q = vecl;
210
+ TCGArg a0, a1, a2;
211
+ int cmode, imm8;
212
+
213
+ a0 = args[0];
214
+ a1 = args[1];
215
+ a2 = args[2];
216
+
217
+ switch (opc) {
218
+ case INDEX_op_ld_vec:
219
+ tcg_out_ld(s, type, a0, a1, a2);
220
+ return;
221
+ case INDEX_op_st_vec:
222
+ tcg_out_st(s, type, a0, a1, a2);
223
+ return;
224
+ case INDEX_op_dupm_vec:
225
+ tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
226
+ return;
227
+ case INDEX_op_dup2_vec:
228
+ tcg_out_dup2_vec(s, a0, a1, a2);
229
+ return;
230
+ case INDEX_op_add_vec:
231
+ tcg_out_vreg3(s, INSN_VADD, q, vece, a0, a1, a2);
232
+ return;
233
+ case INDEX_op_sub_vec:
234
+ tcg_out_vreg3(s, INSN_VSUB, q, vece, a0, a1, a2);
235
+ return;
236
+ case INDEX_op_xor_vec:
237
+ tcg_out_vreg3(s, INSN_VEOR, q, 0, a0, a1, a2);
238
+ return;
239
+
240
+ case INDEX_op_and_vec:
241
+ if (const_args[2]) {
242
+ is_shimm1632(~a2, &cmode, &imm8);
243
+ if (a0 == a1) {
244
+ tcg_out_vmovi(s, a0, q, 1, cmode | 1, imm8); /* VBICI */
245
+ return;
246
+ }
247
+ tcg_out_vmovi(s, a0, q, 1, cmode, imm8); /* VMVNI */
248
+ a2 = a0;
249
+ }
250
+ tcg_out_vreg3(s, INSN_VAND, q, 0, a0, a1, a2);
251
+ return;
252
+
253
+ case INDEX_op_or_vec:
254
+ if (const_args[2]) {
255
+ is_shimm1632(a2, &cmode, &imm8);
256
+ if (a0 == a1) {
257
+ tcg_out_vmovi(s, a0, q, 0, cmode | 1, imm8); /* VORRI */
258
+ return;
259
+ }
260
+ tcg_out_vmovi(s, a0, q, 0, cmode, imm8); /* VMOVI */
261
+ a2 = a0;
262
+ }
263
+ tcg_out_vreg3(s, INSN_VORR, q, 0, a0, a1, a2);
264
+ return;
265
+
266
+ case INDEX_op_cmp_vec:
267
+ {
268
+ TCGCond cond = args[3];
269
+
270
+ if (cond == TCG_COND_NE) {
271
+ if (const_args[2]) {
272
+ tcg_out_vreg3(s, INSN_VTST, q, vece, a0, a1, a1);
273
+ } else {
274
+ tcg_out_vreg3(s, INSN_VCEQ, q, vece, a0, a1, a2);
275
+ tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a0);
276
+ }
277
+ } else {
278
+ ARMInsn insn;
279
+
280
+ if (const_args[2]) {
281
+ insn = vec_cmp0_insn[cond];
282
+ if (insn) {
283
+ tcg_out_vreg2(s, insn, q, vece, a0, a1);
284
+ return;
285
+ }
286
+ tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP, 0);
287
+ a2 = TCG_VEC_TMP;
288
+ }
289
+ insn = vec_cmp_insn[cond];
290
+ if (insn == 0) {
291
+ TCGArg t;
292
+ t = a1, a1 = a2, a2 = t;
293
+ cond = tcg_swap_cond(cond);
294
+ insn = vec_cmp_insn[cond];
295
+ tcg_debug_assert(insn != 0);
296
+ }
297
+ tcg_out_vreg3(s, insn, q, vece, a0, a1, a2);
298
+ }
299
+ }
300
+ return;
301
+
302
+ case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */
303
+ case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */
304
+ default:
305
+ g_assert_not_reached();
306
+ }
307
}
308
309
int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
310
{
311
- return 0;
312
+ switch (opc) {
313
+ case INDEX_op_add_vec:
314
+ case INDEX_op_sub_vec:
315
+ case INDEX_op_and_vec:
316
+ case INDEX_op_or_vec:
317
+ case INDEX_op_xor_vec:
318
+ return 1;
319
+ case INDEX_op_cmp_vec:
320
+ return vece < MO_64;
321
+ default:
322
+ return 0;
323
+ }
324
}
325
326
void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
327
--
70
--
328
2.25.1
71
2.25.1
329
72
330
73
diff view generated by jsdifflib
1
NEON has 3 instructions implementing this 4 argument operation,
1
GDB single-stepping is now handled generically.
2
with each insn overlapping a different logical input onto the
3
destination register.
4
2
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
4
---
8
tcg/arm/tcg-target-con-set.h | 1 +
5
target/xtensa/translate.c | 25 ++++++++-----------------
9
tcg/arm/tcg-target.h | 2 +-
6
1 file changed, 8 insertions(+), 17 deletions(-)
10
tcg/arm/tcg-target.c.inc | 22 ++++++++++++++++++++--
11
3 files changed, 22 insertions(+), 3 deletions(-)
12
7
13
diff --git a/tcg/arm/tcg-target-con-set.h b/tcg/arm/tcg-target-con-set.h
8
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
14
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/arm/tcg-target-con-set.h
10
--- a/target/xtensa/translate.c
16
+++ b/tcg/arm/tcg-target-con-set.h
11
+++ b/target/xtensa/translate.c
17
@@ -XXX,XX +XXX,XX @@ C_O1_I2(w, w, w)
12
@@ -XXX,XX +XXX,XX @@ static void gen_jump_slot(DisasContext *dc, TCGv dest, int slot)
18
C_O1_I2(w, w, wO)
13
if (dc->icount) {
19
C_O1_I2(w, w, wV)
14
tcg_gen_mov_i32(cpu_SR[ICOUNT], dc->next_icount);
20
C_O1_I2(w, w, wZ)
15
}
21
+C_O1_I3(w, w, w, w)
16
- if (dc->base.singlestep_enabled) {
22
C_O1_I4(r, r, r, rI, rI)
17
- gen_exception(dc, EXCP_DEBUG);
23
C_O1_I4(r, r, rIN, rIK, 0)
18
+ if (dc->op_flags & XTENSA_OP_POSTPROCESS) {
24
C_O2_I1(r, r, l)
19
+ slot = gen_postprocess(dc, slot);
25
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
20
+ }
26
index XXXXXXX..XXXXXXX 100644
21
+ if (slot >= 0) {
27
--- a/tcg/arm/tcg-target.h
22
+ tcg_gen_goto_tb(slot);
28
+++ b/tcg/arm/tcg-target.h
23
+ tcg_gen_exit_tb(dc->base.tb, slot);
29
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
24
} else {
30
#define TCG_TARGET_HAS_mul_vec 1
25
- if (dc->op_flags & XTENSA_OP_POSTPROCESS) {
31
#define TCG_TARGET_HAS_sat_vec 1
26
- slot = gen_postprocess(dc, slot);
32
#define TCG_TARGET_HAS_minmax_vec 1
27
- }
33
-#define TCG_TARGET_HAS_bitsel_vec 0
28
- if (slot >= 0) {
34
+#define TCG_TARGET_HAS_bitsel_vec 1
29
- tcg_gen_goto_tb(slot);
35
#define TCG_TARGET_HAS_cmpsel_vec 0
30
- tcg_gen_exit_tb(dc->base.tb, slot);
36
31
- } else {
37
#define TCG_TARGET_DEFAULT_MO (0)
32
- tcg_gen_exit_tb(NULL, 0);
38
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
33
- }
39
index XXXXXXX..XXXXXXX 100644
34
+ tcg_gen_exit_tb(NULL, 0);
40
--- a/tcg/arm/tcg-target.c.inc
35
}
41
+++ b/tcg/arm/tcg-target.c.inc
36
dc->base.is_jmp = DISAS_NORETURN;
42
@@ -XXX,XX +XXX,XX @@ typedef enum {
37
}
43
INSN_VSARI = 0xf2800010, /* VSHR.S */
38
@@ -XXX,XX +XXX,XX @@ static void xtensa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
44
INSN_VSHRI = 0xf3800010, /* VSHR.U */
39
case DISAS_NORETURN:
45
40
break;
46
+ INSN_VBSL = 0xf3100110,
41
case DISAS_TOO_MANY:
47
+ INSN_VBIT = 0xf3200110,
42
- if (dc->base.singlestep_enabled) {
48
+ INSN_VBIF = 0xf3300110,
43
- tcg_gen_movi_i32(cpu_pc, dc->pc);
49
+
44
- gen_exception(dc, EXCP_DEBUG);
50
INSN_VTST = 0xf2000810,
45
- } else {
51
46
- gen_jumpi(dc, dc->pc, 0);
52
INSN_VDUP_G = 0xee800b10, /* VDUP (ARM core register) */
47
- }
53
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
48
+ gen_jumpi(dc, dc->pc, 0);
54
return C_O1_I2(w, w, wV);
49
break;
55
case INDEX_op_cmp_vec:
56
return C_O1_I2(w, w, wZ);
57
-
58
+ case INDEX_op_bitsel_vec:
59
+ return C_O1_I3(w, w, w, w);
60
default:
50
default:
61
g_assert_not_reached();
51
g_assert_not_reached();
62
}
63
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
64
{
65
TCGType type = vecl + TCG_TYPE_V64;
66
unsigned q = vecl;
67
- TCGArg a0, a1, a2;
68
+ TCGArg a0, a1, a2, a3;
69
int cmode, imm8;
70
71
a0 = args[0];
72
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
73
}
74
return;
75
76
+ case INDEX_op_bitsel_vec:
77
+ a3 = args[3];
78
+ if (a0 == a3) {
79
+ tcg_out_vreg3(s, INSN_VBIT, q, 0, a0, a2, a1);
80
+ } else if (a0 == a2) {
81
+ tcg_out_vreg3(s, INSN_VBIF, q, 0, a0, a3, a1);
82
+ } else {
83
+ tcg_out_mov(s, type, a0, a1);
84
+ tcg_out_vreg3(s, INSN_VBSL, q, 0, a0, a2, a3);
85
+ }
86
+ return;
87
+
88
case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */
89
case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */
90
default:
91
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
92
case INDEX_op_sssub_vec:
93
case INDEX_op_usadd_vec:
94
case INDEX_op_ussub_vec:
95
+ case INDEX_op_bitsel_vec:
96
return 1;
97
case INDEX_op_abs_vec:
98
case INDEX_op_cmp_vec:
99
--
52
--
100
2.25.1
53
2.25.1
101
54
102
55
diff view generated by jsdifflib
1
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
1
This reverts commit 1b36e4f5a5de585210ea95f2257839c2312be28f.
2
3
Despite a comment saying why cpu_common_props cannot be placed in
4
a file that is compiled once, it was moved anyway. Revert that.
5
6
Since then, Property is not defined in hw/core/cpu.h, so it is now
7
easier to declare a function to install the properties rather than
8
the Property array itself.
9
10
Cc: Eduardo Habkost <ehabkost@redhat.com>
11
Suggested-by: Peter Maydell <peter.maydell@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
13
---
4
tcg/arm/tcg-target.c.inc | 52 +++++++++++++++++++++++++++++++++++-----
14
include/hw/core/cpu.h | 1 +
5
1 file changed, 46 insertions(+), 6 deletions(-)
15
cpu.c | 21 +++++++++++++++++++++
16
hw/core/cpu-common.c | 17 +----------------
17
3 files changed, 23 insertions(+), 16 deletions(-)
6
18
7
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
19
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
8
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/arm/tcg-target.c.inc
21
--- a/include/hw/core/cpu.h
10
+++ b/tcg/arm/tcg-target.c.inc
22
+++ b/include/hw/core/cpu.h
11
@@ -XXX,XX +XXX,XX @@ typedef enum {
23
@@ -XXX,XX +XXX,XX @@ void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...)
12
/* Otherwise the assembler uses mov r0,r0 */
24
GCC_FMT_ATTR(2, 3);
13
INSN_NOP_v4 = (COND_AL << 28) | ARITH_MOV,
25
14
26
/* $(top_srcdir)/cpu.c */
15
+ INSN_VORR = 0xf2200110,
27
+void cpu_class_init_props(DeviceClass *dc);
28
void cpu_exec_initfn(CPUState *cpu);
29
void cpu_exec_realizefn(CPUState *cpu, Error **errp);
30
void cpu_exec_unrealizefn(CPUState *cpu);
31
diff --git a/cpu.c b/cpu.c
32
index XXXXXXX..XXXXXXX 100644
33
--- a/cpu.c
34
+++ b/cpu.c
35
@@ -XXX,XX +XXX,XX @@ void cpu_exec_unrealizefn(CPUState *cpu)
36
cpu_list_remove(cpu);
37
}
38
39
+static Property cpu_common_props[] = {
40
+#ifndef CONFIG_USER_ONLY
41
+ /*
42
+ * Create a memory property for softmmu CPU object,
43
+ * so users can wire up its memory. (This can't go in hw/core/cpu.c
44
+ * because that file is compiled only once for both user-mode
45
+ * and system builds.) The default if no link is set up is to use
46
+ * the system address space.
47
+ */
48
+ DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION,
49
+ MemoryRegion *),
50
+#endif
51
+ DEFINE_PROP_BOOL("start-powered-off", CPUState, start_powered_off, false),
52
+ DEFINE_PROP_END_OF_LIST(),
53
+};
16
+
54
+
17
INSN_VLD1 = 0xf4200000, /* VLD1 (multiple single elements) */
55
+void cpu_class_init_props(DeviceClass *dc)
18
INSN_VST1 = 0xf4000000, /* VST1 (multiple single elements) */
19
} ARMInsn;
20
@@ -XXX,XX +XXX,XX @@ static uint32_t encode_vd(TCGReg rd)
21
return (extract32(rd, 3, 1) << 22) | (extract32(rd, 0, 3) << 13);
22
}
23
24
+static uint32_t encode_vn(TCGReg rn)
25
+{
56
+{
26
+ tcg_debug_assert(rn >= TCG_REG_Q0);
57
+ device_class_set_props(dc, cpu_common_props);
27
+ return (extract32(rn, 3, 1) << 7) | (extract32(rn, 0, 3) << 17);
28
+}
58
+}
29
+
59
+
30
+static uint32_t encode_vm(TCGReg rm)
60
void cpu_exec_initfn(CPUState *cpu)
31
+{
32
+ tcg_debug_assert(rm >= TCG_REG_Q0);
33
+ return (extract32(rm, 3, 1) << 5) | (extract32(rm, 0, 3) << 1);
34
+}
35
+
36
+static void tcg_out_vreg3(TCGContext *s, ARMInsn insn, int q, int vece,
37
+ TCGReg d, TCGReg n, TCGReg m)
38
+{
39
+ tcg_out32(s, insn | (vece << 20) | (q << 6) |
40
+ encode_vd(d) | encode_vn(n) | encode_vm(m));
41
+}
42
+
43
static void tcg_out_vldst(TCGContext *s, ARMInsn insn,
44
TCGReg rd, TCGReg rn, int offset)
45
{
61
{
46
@@ -XXX,XX +XXX,XX @@ static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
62
cpu->as = NULL;
47
return false;
63
diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c
64
index XXXXXXX..XXXXXXX 100644
65
--- a/hw/core/cpu-common.c
66
+++ b/hw/core/cpu-common.c
67
@@ -XXX,XX +XXX,XX @@ static int64_t cpu_common_get_arch_id(CPUState *cpu)
68
return cpu->cpu_index;
48
}
69
}
49
70
50
-static inline bool tcg_out_mov(TCGContext *s, TCGType type,
71
-static Property cpu_common_props[] = {
51
- TCGReg ret, TCGReg arg)
72
-#ifndef CONFIG_USER_ONLY
52
+static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
73
- /* Create a memory property for softmmu CPU object,
74
- * so users can wire up its memory. (This can't go in hw/core/cpu.c
75
- * because that file is compiled only once for both user-mode
76
- * and system builds.) The default if no link is set up is to use
77
- * the system address space.
78
- */
79
- DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION,
80
- MemoryRegion *),
81
-#endif
82
- DEFINE_PROP_BOOL("start-powered-off", CPUState, start_powered_off, false),
83
- DEFINE_PROP_END_OF_LIST(),
84
-};
85
-
86
static void cpu_class_init(ObjectClass *klass, void *data)
53
{
87
{
54
- tcg_out_mov_reg(s, COND_AL, ret, arg);
88
DeviceClass *dc = DEVICE_CLASS(klass);
55
- return true;
89
@@ -XXX,XX +XXX,XX @@ static void cpu_class_init(ObjectClass *klass, void *data)
56
+ if (ret == arg) {
90
dc->realize = cpu_common_realizefn;
57
+ return true;
91
dc->unrealize = cpu_common_unrealizefn;
58
+ }
92
dc->reset = cpu_common_reset;
59
+ switch (type) {
93
- device_class_set_props(dc, cpu_common_props);
60
+ case TCG_TYPE_I32:
94
+ cpu_class_init_props(dc);
61
+ if (ret < TCG_REG_Q0 && arg < TCG_REG_Q0) {
95
/*
62
+ tcg_out_mov_reg(s, COND_AL, ret, arg);
96
* Reason: CPUs still need special care by board code: wiring up
63
+ return true;
97
* IRQs, adding reset handlers, halting non-first CPUs, ...
64
+ }
65
+ return false;
66
+
67
+ case TCG_TYPE_V64:
68
+ case TCG_TYPE_V128:
69
+ /* "VMOV D,N" is an alias for "VORR D,N,N". */
70
+ tcg_out_vreg3(s, INSN_VORR, type - TCG_TYPE_V64, 0, ret, arg, arg);
71
+ return true;
72
+
73
+ default:
74
+ g_assert_not_reached();
75
+ }
76
}
77
78
-static inline void tcg_out_movi(TCGContext *s, TCGType type,
79
- TCGReg ret, tcg_target_long arg)
80
+static void tcg_out_movi(TCGContext *s, TCGType type,
81
+ TCGReg ret, tcg_target_long arg)
82
{
83
+ tcg_debug_assert(type == TCG_TYPE_I32);
84
+ tcg_debug_assert(ret < TCG_REG_Q0);
85
tcg_out_movi32(s, COND_AL, ret, arg);
86
}
87
88
--
98
--
89
2.25.1
99
2.25.1
90
100
91
101
diff view generated by jsdifflib