1
The following changes since commit 6587b0c1331d427b0939c37e763842550ed581db:
1
Second pull for this week, since this set is large enough by itself.
2
2
3
Merge remote-tracking branch 'remotes/ericb/tags/pull-nbd-2021-10-15' into staging (2021-10-15 14:16:28 -0700)
3
4
r~
5
6
7
The following changes since commit 7c9236d6d61f30583d5d860097d88dbf0fe487bf:
8
9
Merge tag 'pull-tcg-20230116' of https://gitlab.com/rth7680/qemu into staging (2023-01-17 10:24:16 +0000)
4
10
5
are available in the Git repository at:
11
are available in the Git repository at:
6
12
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20211016
13
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20230117
8
14
9
for you to fetch changes up to 995b87dedc78b0467f5f18bbc3546072ba97516a:
15
for you to fetch changes up to 493c9b19a7fb7f387c4fcf57d3836504d5242bf5:
10
16
11
Revert "cpu: Move cpu_common_props to hw/core/cpu.c" (2021-10-15 16:39:15 -0700)
17
tcg/riscv: Implement direct branch for goto_tb (2023-01-17 22:36:17 +0000)
12
18
13
----------------------------------------------------------------
19
----------------------------------------------------------------
14
Move gdb singlestep to generic code
20
tcg: Fix race conditions in (most) goto_tb implementations
15
Fix cpu_common_props
16
21
17
----------------------------------------------------------------
22
----------------------------------------------------------------
18
Richard Henderson (24):
23
Richard Henderson (22):
19
accel/tcg: Handle gdb singlestep in cpu_tb_exec
24
tcg: Split out tcg_out_exit_tb
20
target/alpha: Drop checks for singlestep_enabled
25
tcg/i386: Remove unused goto_tb code for indirect jump
21
target/avr: Drop checks for singlestep_enabled
26
tcg/ppc: Remove unused goto_tb code for indirect jump
22
target/cris: Drop checks for singlestep_enabled
27
tcg/sparc64: Remove unused goto_tb code for indirect jump
23
target/hexagon: Drop checks for singlestep_enabled
28
tcg: Replace asserts on tcg_jmp_insn_offset
24
target/arm: Drop checks for singlestep_enabled
29
tcg: Introduce set_jmp_insn_offset
25
target/hppa: Drop checks for singlestep_enabled
30
tcg: Introduce get_jmp_target_addr
26
target/i386: Check CF_NO_GOTO_TB for dc->jmp_opt
31
tcg: Split out tcg_out_goto_tb
27
target/i386: Drop check for singlestep_enabled
32
tcg: Rename TB_JMP_RESET_OFFSET_INVALID to TB_JMP_OFFSET_INVALID
28
target/m68k: Drop checks for singlestep_enabled
33
tcg: Add gen_tb to TCGContext
29
target/microblaze: Check CF_NO_GOTO_TB for DISAS_JUMP
34
tcg: Add TranslationBlock.jmp_insn_offset
30
target/microblaze: Drop checks for singlestep_enabled
35
tcg: Change tb_target_set_jmp_target arguments
31
target/mips: Fix single stepping
36
tcg: Move tb_target_set_jmp_target declaration to tcg.h
32
target/mips: Drop exit checks for singlestep_enabled
37
tcg: Always define tb_target_set_jmp_target
33
target/openrisc: Drop checks for singlestep_enabled
38
tcg: Remove TCG_TARGET_HAS_direct_jump
34
target/ppc: Drop exit checks for singlestep_enabled
39
tcg/aarch64: Reorg goto_tb implementation
35
target/riscv: Remove dead code after exception
40
tcg/ppc: Reorg goto_tb implementation
36
target/riscv: Remove exit_tb and lookup_and_goto_ptr
41
tcg/sparc64: Remove USE_REG_TB
37
target/rx: Drop checks for singlestep_enabled
42
tcg/sparc64: Reorg goto_tb implementation
38
target/s390x: Drop check for singlestep_enabled
43
tcg/arm: Implement direct branch for goto_tb
39
target/sh4: Drop check for singlestep_enabled
44
tcg/riscv: Introduce OPC_NOP
40
target/tricore: Drop check for singlestep_enabled
45
tcg/riscv: Implement direct branch for goto_tb
41
target/xtensa: Drop check for singlestep_enabled
42
Revert "cpu: Move cpu_common_props to hw/core/cpu.c"
43
46
44
include/hw/core/cpu.h | 1 +
47
include/exec/exec-all.h | 5 +-
45
target/i386/helper.h | 1 -
48
include/tcg/tcg.h | 14 ++-
46
target/rx/helper.h | 1 -
49
tcg/aarch64/tcg-target.h | 6 +-
47
target/sh4/helper.h | 1 -
50
tcg/arm/tcg-target.h | 5 -
48
target/tricore/helper.h | 1 -
51
tcg/i386/tcg-target.h | 9 --
49
accel/tcg/cpu-exec.c | 11 ++++
52
tcg/loongarch64/tcg-target.h | 3 -
50
cpu.c | 21 ++++++++
53
tcg/mips/tcg-target.h | 5 -
51
hw/core/cpu-common.c | 17 +-----
54
tcg/ppc/tcg-target.h | 7 +-
52
target/alpha/translate.c | 13 ++---
55
tcg/riscv/tcg-target.h | 4 -
53
target/arm/translate-a64.c | 10 +---
56
tcg/s390x/tcg-target.h | 11 ---
54
target/arm/translate.c | 36 +++----------
57
tcg/sparc64/tcg-target.h | 4 -
55
target/avr/translate.c | 19 ++-----
58
tcg/tci/tcg-target.h | 4 -
56
target/cris/translate.c | 16 ------
59
accel/tcg/cpu-exec.c | 21 ++--
57
target/hexagon/translate.c | 12 +----
60
accel/tcg/translate-all.c | 10 +-
58
target/hppa/translate.c | 17 ++----
61
tcg/tcg-op.c | 14 +--
59
target/i386/tcg/misc_helper.c | 8 ---
62
tcg/tcg.c | 42 +++++---
60
target/i386/tcg/translate.c | 9 ++--
63
tcg/aarch64/tcg-target.c.inc | 106 ++++++++++-----------
61
target/m68k/translate.c | 44 ++++-----------
64
tcg/arm/tcg-target.c.inc | 89 +++++++++++------
62
target/microblaze/translate.c | 18 ++-----
65
tcg/i386/tcg-target.c.inc | 68 +++++++------
63
target/mips/tcg/translate.c | 75 ++++++++++++--------------
66
tcg/loongarch64/tcg-target.c.inc | 66 +++++++------
64
target/openrisc/translate.c | 18 ++-----
67
tcg/mips/tcg-target.c.inc | 59 +++++++-----
65
target/ppc/translate.c | 38 +++----------
68
tcg/ppc/tcg-target.c.inc | 193 ++++++++++++-------------------------
66
target/riscv/translate.c | 27 +---------
69
tcg/riscv/tcg-target.c.inc | 65 +++++++++----
67
target/rx/op_helper.c | 8 ---
70
tcg/s390x/tcg-target.c.inc | 67 ++++++++-----
68
target/rx/translate.c | 12 +----
71
tcg/sparc64/tcg-target.c.inc | 201 +++++++++++++++------------------------
69
target/s390x/tcg/translate.c | 8 +--
72
tcg/tci/tcg-target.c.inc | 31 +++---
70
target/sh4/op_helper.c | 5 --
73
26 files changed, 528 insertions(+), 581 deletions(-)
71
target/sh4/translate.c | 14 ++---
72
target/tricore/op_helper.c | 7 ---
73
target/tricore/translate.c | 14 +----
74
target/xtensa/translate.c | 25 +++------
75
target/riscv/insn_trans/trans_privileged.c.inc | 10 ++--
76
target/riscv/insn_trans/trans_rvi.c.inc | 8 ++-
77
target/riscv/insn_trans/trans_rvv.c.inc | 2 +-
78
34 files changed, 141 insertions(+), 386 deletions(-)
79
diff view generated by jsdifflib
1
GDB single-stepping is now handled generically.
1
The INDEX_op_exit_tb opcode needs no register allocation.
2
Split out a dedicated helper function for it.
2
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
7
---
5
target/xtensa/translate.c | 25 ++++++++-----------------
8
tcg/tcg.c | 4 ++++
6
1 file changed, 8 insertions(+), 17 deletions(-)
9
tcg/aarch64/tcg-target.c.inc | 22 ++++++++++--------
10
tcg/arm/tcg-target.c.inc | 11 +++++----
11
tcg/i386/tcg-target.c.inc | 21 +++++++++--------
12
tcg/loongarch64/tcg-target.c.inc | 22 ++++++++++--------
13
tcg/mips/tcg-target.c.inc | 33 +++++++++++++--------------
14
tcg/ppc/tcg-target.c.inc | 11 +++++----
15
tcg/riscv/tcg-target.c.inc | 22 ++++++++++--------
16
tcg/s390x/tcg-target.c.inc | 23 ++++++++++---------
17
tcg/sparc64/tcg-target.c.inc | 39 +++++++++++++++++---------------
18
tcg/tci/tcg-target.c.inc | 10 ++++----
19
11 files changed, 121 insertions(+), 97 deletions(-)
7
20
8
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
21
diff --git a/tcg/tcg.c b/tcg/tcg.c
9
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
10
--- a/target/xtensa/translate.c
23
--- a/tcg/tcg.c
11
+++ b/target/xtensa/translate.c
24
+++ b/tcg/tcg.c
12
@@ -XXX,XX +XXX,XX @@ static void gen_jump_slot(DisasContext *dc, TCGv dest, int slot)
25
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
13
if (dc->icount) {
26
static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
14
tcg_gen_mov_i32(cpu_SR[ICOUNT], dc->next_icount);
27
static void tcg_out_movi(TCGContext *s, TCGType type,
15
}
28
TCGReg ret, tcg_target_long arg);
16
- if (dc->base.singlestep_enabled) {
29
+static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg);
17
- gen_exception(dc, EXCP_DEBUG);
30
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
18
+ if (dc->op_flags & XTENSA_OP_POSTPROCESS) {
31
const TCGArg args[TCG_MAX_OP_ARGS],
19
+ slot = gen_postprocess(dc, slot);
32
const int const_args[TCG_MAX_OP_ARGS]);
20
+ }
33
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start)
21
+ if (slot >= 0) {
34
case INDEX_op_call:
22
+ tcg_gen_goto_tb(slot);
35
tcg_reg_alloc_call(s, op);
23
+ tcg_gen_exit_tb(dc->base.tb, slot);
36
break;
24
} else {
37
+ case INDEX_op_exit_tb:
25
- if (dc->op_flags & XTENSA_OP_POSTPROCESS) {
38
+ tcg_out_exit_tb(s, op->args[0]);
26
- slot = gen_postprocess(dc, slot);
39
+ break;
27
- }
40
case INDEX_op_dup2_vec:
28
- if (slot >= 0) {
41
if (tcg_reg_alloc_dup2(s, op)) {
29
- tcg_gen_goto_tb(slot);
42
break;
30
- tcg_gen_exit_tb(dc->base.tb, slot);
43
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
44
index XXXXXXX..XXXXXXX 100644
45
--- a/tcg/aarch64/tcg-target.c.inc
46
+++ b/tcg/aarch64/tcg-target.c.inc
47
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
48
49
static const tcg_insn_unit *tb_ret_addr;
50
51
+static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
52
+{
53
+ /* Reuse the zeroing that exists for goto_ptr. */
54
+ if (a0 == 0) {
55
+ tcg_out_goto_long(s, tcg_code_gen_epilogue);
56
+ } else {
57
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X0, a0);
58
+ tcg_out_goto_long(s, tb_ret_addr);
59
+ }
60
+}
61
+
62
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
63
const TCGArg args[TCG_MAX_OP_ARGS],
64
const int const_args[TCG_MAX_OP_ARGS])
65
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
66
#define REG0(I) (const_args[I] ? TCG_REG_XZR : (TCGReg)args[I])
67
68
switch (opc) {
69
- case INDEX_op_exit_tb:
70
- /* Reuse the zeroing that exists for goto_ptr. */
71
- if (a0 == 0) {
72
- tcg_out_goto_long(s, tcg_code_gen_epilogue);
31
- } else {
73
- } else {
32
- tcg_gen_exit_tb(NULL, 0);
74
- tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X0, a0);
33
- }
75
- tcg_out_goto_long(s, tb_ret_addr);
34
+ tcg_gen_exit_tb(NULL, 0);
76
- }
35
}
77
- break;
36
dc->base.is_jmp = DISAS_NORETURN;
78
-
79
case INDEX_op_goto_tb:
80
tcg_debug_assert(s->tb_jmp_insn_offset != NULL);
81
/*
82
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
83
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
84
case INDEX_op_mov_i64:
85
case INDEX_op_call: /* Always emitted via tcg_out_call. */
86
+ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
87
default:
88
g_assert_not_reached();
89
}
90
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
91
index XXXXXXX..XXXXXXX 100644
92
--- a/tcg/arm/tcg-target.c.inc
93
+++ b/tcg/arm/tcg-target.c.inc
94
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
95
96
static void tcg_out_epilogue(TCGContext *s);
97
98
+static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
99
+{
100
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, arg);
101
+ tcg_out_epilogue(s);
102
+}
103
+
104
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
105
const TCGArg args[TCG_MAX_OP_ARGS],
106
const int const_args[TCG_MAX_OP_ARGS])
107
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
108
int c;
109
110
switch (opc) {
111
- case INDEX_op_exit_tb:
112
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, args[0]);
113
- tcg_out_epilogue(s);
114
- break;
115
case INDEX_op_goto_tb:
116
{
117
/* Indirect jump method */
118
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
119
120
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
121
case INDEX_op_call: /* Always emitted via tcg_out_call. */
122
+ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
123
default:
124
tcg_abort();
125
}
126
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
127
index XXXXXXX..XXXXXXX 100644
128
--- a/tcg/i386/tcg-target.c.inc
129
+++ b/tcg/i386/tcg-target.c.inc
130
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
131
#endif
37
}
132
}
38
@@ -XXX,XX +XXX,XX @@ static void xtensa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
133
39
case DISAS_NORETURN:
134
+static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
40
break;
135
+{
41
case DISAS_TOO_MANY:
136
+ /* Reuse the zeroing that exists for goto_ptr. */
42
- if (dc->base.singlestep_enabled) {
137
+ if (a0 == 0) {
43
- tcg_gen_movi_i32(cpu_pc, dc->pc);
138
+ tcg_out_jmp(s, tcg_code_gen_epilogue);
44
- gen_exception(dc, EXCP_DEBUG);
139
+ } else {
140
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_EAX, a0);
141
+ tcg_out_jmp(s, tb_ret_addr);
142
+ }
143
+}
144
+
145
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
146
const TCGArg args[TCG_MAX_OP_ARGS],
147
const int const_args[TCG_MAX_OP_ARGS])
148
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
149
const_a2 = const_args[2];
150
151
switch (opc) {
152
- case INDEX_op_exit_tb:
153
- /* Reuse the zeroing that exists for goto_ptr. */
154
- if (a0 == 0) {
155
- tcg_out_jmp(s, tcg_code_gen_epilogue);
45
- } else {
156
- } else {
46
- gen_jumpi(dc, dc->pc, 0);
157
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_EAX, a0);
47
- }
158
- tcg_out_jmp(s, tb_ret_addr);
48
+ gen_jumpi(dc, dc->pc, 0);
159
- }
49
break;
160
- break;
161
case INDEX_op_goto_tb:
162
if (s->tb_jmp_insn_offset) {
163
/* direct jump method */
164
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
165
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
166
case INDEX_op_mov_i64:
167
case INDEX_op_call: /* Always emitted via tcg_out_call. */
168
+ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
169
default:
170
tcg_abort();
171
}
172
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
173
index XXXXXXX..XXXXXXX 100644
174
--- a/tcg/loongarch64/tcg-target.c.inc
175
+++ b/tcg/loongarch64/tcg-target.c.inc
176
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
177
178
static const tcg_insn_unit *tb_ret_addr;
179
180
+static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
181
+{
182
+ /* Reuse the zeroing that exists for goto_ptr. */
183
+ if (a0 == 0) {
184
+ tcg_out_call_int(s, tcg_code_gen_epilogue, true);
185
+ } else {
186
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0);
187
+ tcg_out_call_int(s, tb_ret_addr, true);
188
+ }
189
+}
190
+
191
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
192
const TCGArg args[TCG_MAX_OP_ARGS],
193
const int const_args[TCG_MAX_OP_ARGS])
194
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
195
int c2 = const_args[2];
196
197
switch (opc) {
198
- case INDEX_op_exit_tb:
199
- /* Reuse the zeroing that exists for goto_ptr. */
200
- if (a0 == 0) {
201
- tcg_out_call_int(s, tcg_code_gen_epilogue, true);
202
- } else {
203
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0);
204
- tcg_out_call_int(s, tb_ret_addr, true);
205
- }
206
- break;
207
-
208
case INDEX_op_goto_tb:
209
tcg_debug_assert(s->tb_jmp_insn_offset != NULL);
210
/*
211
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
212
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
213
case INDEX_op_mov_i64:
214
case INDEX_op_call: /* Always emitted via tcg_out_call. */
215
+ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
50
default:
216
default:
51
g_assert_not_reached();
217
g_assert_not_reached();
218
}
219
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
220
index XXXXXXX..XXXXXXX 100644
221
--- a/tcg/mips/tcg-target.c.inc
222
+++ b/tcg/mips/tcg-target.c.inc
223
@@ -XXX,XX +XXX,XX @@ static void tcg_out_clz(TCGContext *s, MIPSInsn opcv2, MIPSInsn opcv6,
224
}
225
}
226
227
+static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
228
+{
229
+ TCGReg b0 = TCG_REG_ZERO;
230
+
231
+ if (a0 & ~0xffff) {
232
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_V0, a0 & ~0xffff);
233
+ b0 = TCG_REG_V0;
234
+ }
235
+ if (!tcg_out_opc_jmp(s, OPC_J, tb_ret_addr)) {
236
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)tb_ret_addr);
237
+ tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
238
+ }
239
+ tcg_out_opc_imm(s, OPC_ORI, TCG_REG_V0, b0, a0 & 0xffff);
240
+}
241
+
242
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
243
const TCGArg args[TCG_MAX_OP_ARGS],
244
const int const_args[TCG_MAX_OP_ARGS])
245
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
246
c2 = const_args[2];
247
248
switch (opc) {
249
- case INDEX_op_exit_tb:
250
- {
251
- TCGReg b0 = TCG_REG_ZERO;
252
-
253
- a0 = (intptr_t)a0;
254
- if (a0 & ~0xffff) {
255
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_V0, a0 & ~0xffff);
256
- b0 = TCG_REG_V0;
257
- }
258
- if (!tcg_out_opc_jmp(s, OPC_J, tb_ret_addr)) {
259
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0,
260
- (uintptr_t)tb_ret_addr);
261
- tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
262
- }
263
- tcg_out_opc_imm(s, OPC_ORI, TCG_REG_V0, b0, a0 & 0xffff);
264
- }
265
- break;
266
case INDEX_op_goto_tb:
267
/* indirect jump method */
268
tcg_debug_assert(s->tb_jmp_insn_offset == 0);
269
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
270
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
271
case INDEX_op_mov_i64:
272
case INDEX_op_call: /* Always emitted via tcg_out_call. */
273
+ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
274
default:
275
tcg_abort();
276
}
277
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
278
index XXXXXXX..XXXXXXX 100644
279
--- a/tcg/ppc/tcg-target.c.inc
280
+++ b/tcg/ppc/tcg-target.c.inc
281
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
282
tcg_out32(s, BCLR | BO_ALWAYS);
283
}
284
285
+static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
286
+{
287
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R3, arg);
288
+ tcg_out_b(s, 0, tcg_code_gen_epilogue);
289
+}
290
+
291
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
292
const TCGArg args[TCG_MAX_OP_ARGS],
293
const int const_args[TCG_MAX_OP_ARGS])
294
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
295
TCGArg a0, a1, a2;
296
297
switch (opc) {
298
- case INDEX_op_exit_tb:
299
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R3, args[0]);
300
- tcg_out_b(s, 0, tcg_code_gen_epilogue);
301
- break;
302
case INDEX_op_goto_tb:
303
if (s->tb_jmp_insn_offset) {
304
/* Direct jump. */
305
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
306
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
307
case INDEX_op_mov_i64:
308
case INDEX_op_call: /* Always emitted via tcg_out_call. */
309
+ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
310
default:
311
tcg_abort();
312
}
313
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
314
index XXXXXXX..XXXXXXX 100644
315
--- a/tcg/riscv/tcg-target.c.inc
316
+++ b/tcg/riscv/tcg-target.c.inc
317
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
318
319
static const tcg_insn_unit *tb_ret_addr;
320
321
+static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
322
+{
323
+ /* Reuse the zeroing that exists for goto_ptr. */
324
+ if (a0 == 0) {
325
+ tcg_out_call_int(s, tcg_code_gen_epilogue, true);
326
+ } else {
327
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0);
328
+ tcg_out_call_int(s, tb_ret_addr, true);
329
+ }
330
+}
331
+
332
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
333
const TCGArg args[TCG_MAX_OP_ARGS],
334
const int const_args[TCG_MAX_OP_ARGS])
335
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
336
int c2 = const_args[2];
337
338
switch (opc) {
339
- case INDEX_op_exit_tb:
340
- /* Reuse the zeroing that exists for goto_ptr. */
341
- if (a0 == 0) {
342
- tcg_out_call_int(s, tcg_code_gen_epilogue, true);
343
- } else {
344
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0);
345
- tcg_out_call_int(s, tb_ret_addr, true);
346
- }
347
- break;
348
-
349
case INDEX_op_goto_tb:
350
assert(s->tb_jmp_insn_offset == 0);
351
/* indirect jump method */
352
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
353
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
354
case INDEX_op_mov_i64:
355
case INDEX_op_call: /* Always emitted via tcg_out_call. */
356
+ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
357
default:
358
g_assert_not_reached();
359
}
360
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
361
index XXXXXXX..XXXXXXX 100644
362
--- a/tcg/s390x/tcg-target.c.inc
363
+++ b/tcg/s390x/tcg-target.c.inc
364
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
365
#endif
366
}
367
368
+static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
369
+{
370
+ /* Reuse the zeroing that exists for goto_ptr. */
371
+ if (a0 == 0) {
372
+ tgen_gotoi(s, S390_CC_ALWAYS, tcg_code_gen_epilogue);
373
+ } else {
374
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, a0);
375
+ tgen_gotoi(s, S390_CC_ALWAYS, tb_ret_addr);
376
+ }
377
+}
378
+
379
# define OP_32_64(x) \
380
case glue(glue(INDEX_op_,x),_i32): \
381
case glue(glue(INDEX_op_,x),_i64)
382
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
383
TCGArg a0, a1, a2;
384
385
switch (opc) {
386
- case INDEX_op_exit_tb:
387
- /* Reuse the zeroing that exists for goto_ptr. */
388
- a0 = args[0];
389
- if (a0 == 0) {
390
- tgen_gotoi(s, S390_CC_ALWAYS, tcg_code_gen_epilogue);
391
- } else {
392
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, a0);
393
- tgen_gotoi(s, S390_CC_ALWAYS, tb_ret_addr);
394
- }
395
- break;
396
-
397
case INDEX_op_goto_tb:
398
a0 = args[0];
399
/*
400
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
401
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
402
case INDEX_op_mov_i64:
403
case INDEX_op_call: /* Always emitted via tcg_out_call. */
404
+ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
405
default:
406
tcg_abort();
407
}
408
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
409
index XXXXXXX..XXXXXXX 100644
410
--- a/tcg/sparc64/tcg-target.c.inc
411
+++ b/tcg/sparc64/tcg-target.c.inc
412
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
413
#endif /* CONFIG_SOFTMMU */
414
}
415
416
+static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
417
+{
418
+ if (check_fit_ptr(a0, 13)) {
419
+ tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
420
+ tcg_out_movi_imm13(s, TCG_REG_O0, a0);
421
+ return;
422
+ } else if (USE_REG_TB) {
423
+ intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0);
424
+ if (check_fit_ptr(tb_diff, 13)) {
425
+ tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
426
+ /* Note that TCG_REG_TB has been unwound to O1. */
427
+ tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD);
428
+ return;
429
+ }
430
+ }
431
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
432
+ tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
433
+ tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
434
+}
435
+
436
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
437
const TCGArg args[TCG_MAX_OP_ARGS],
438
const int const_args[TCG_MAX_OP_ARGS])
439
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
440
c2 = const_args[2];
441
442
switch (opc) {
443
- case INDEX_op_exit_tb:
444
- if (check_fit_ptr(a0, 13)) {
445
- tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
446
- tcg_out_movi_imm13(s, TCG_REG_O0, a0);
447
- break;
448
- } else if (USE_REG_TB) {
449
- intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0);
450
- if (check_fit_ptr(tb_diff, 13)) {
451
- tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
452
- /* Note that TCG_REG_TB has been unwound to O1. */
453
- tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD);
454
- break;
455
- }
456
- }
457
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
458
- tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
459
- tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
460
- break;
461
case INDEX_op_goto_tb:
462
if (s->tb_jmp_insn_offset) {
463
/* direct jump method */
464
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
465
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
466
case INDEX_op_mov_i64:
467
case INDEX_op_call: /* Always emitted via tcg_out_call. */
468
+ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
469
default:
470
tcg_abort();
471
}
472
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
473
index XXXXXXX..XXXXXXX 100644
474
--- a/tcg/tci/tcg-target.c.inc
475
+++ b/tcg/tci/tcg-target.c.inc
476
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *func,
477
# define CASE_64(x)
478
#endif
479
480
+static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
481
+{
482
+ tcg_out_op_p(s, INDEX_op_exit_tb, (void *)arg);
483
+}
484
+
485
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
486
const TCGArg args[TCG_MAX_OP_ARGS],
487
const int const_args[TCG_MAX_OP_ARGS])
488
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
489
TCGOpcode exts;
490
491
switch (opc) {
492
- case INDEX_op_exit_tb:
493
- tcg_out_op_p(s, opc, (void *)args[0]);
494
- break;
495
-
496
case INDEX_op_goto_tb:
497
tcg_debug_assert(s->tb_jmp_insn_offset == 0);
498
/* indirect jump method. */
499
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
500
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
501
case INDEX_op_mov_i64:
502
case INDEX_op_call: /* Always emitted via tcg_out_call. */
503
+ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
504
default:
505
tcg_abort();
506
}
52
--
507
--
53
2.25.1
508
2.34.1
54
509
55
510
diff view generated by jsdifflib
1
GDB single-stepping is now handled generically.
1
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
2
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
3
---
6
target/hexagon/translate.c | 12 ++----------
4
tcg/i386/tcg-target.c.inc | 14 +++++---------
7
1 file changed, 2 insertions(+), 10 deletions(-)
5
1 file changed, 5 insertions(+), 9 deletions(-)
8
6
9
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
7
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
10
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
11
--- a/target/hexagon/translate.c
9
--- a/tcg/i386/tcg-target.c.inc
12
+++ b/target/hexagon/translate.c
10
+++ b/tcg/i386/tcg-target.c.inc
13
@@ -XXX,XX +XXX,XX @@ static void gen_end_tb(DisasContext *ctx)
11
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
14
{
12
15
gen_exec_counters(ctx);
13
switch (opc) {
16
tcg_gen_mov_tl(hex_gpr[HEX_REG_PC], hex_next_PC);
14
case INDEX_op_goto_tb:
17
- if (ctx->base.singlestep_enabled) {
15
- if (s->tb_jmp_insn_offset) {
18
- gen_exception_raw(EXCP_DEBUG);
16
- /* direct jump method */
19
- } else {
17
- int gap;
20
- tcg_gen_exit_tb(NULL, 0);
18
- /* jump displacement must be aligned for atomic patching;
21
- }
19
+ qemu_build_assert(TCG_TARGET_HAS_direct_jump);
22
+ tcg_gen_exit_tb(NULL, 0);
20
+ {
23
ctx->base.is_jmp = DISAS_NORETURN;
21
+ /*
24
}
22
+ * Jump displacement must be aligned for atomic patching;
25
23
* see if we need to add extra nops before jump
26
@@ -XXX,XX +XXX,XX @@ static void hexagon_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
24
*/
27
case DISAS_TOO_MANY:
25
- gap = QEMU_ALIGN_PTR_UP(s->code_ptr + 1, 4) - s->code_ptr;
28
gen_exec_counters(ctx);
26
+ int gap = QEMU_ALIGN_PTR_UP(s->code_ptr + 1, 4) - s->code_ptr;
29
tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], ctx->base.pc_next);
27
if (gap != 1) {
30
- if (ctx->base.singlestep_enabled) {
28
tcg_out_nopn(s, gap - 1);
31
- gen_exception_raw(EXCP_DEBUG);
29
}
30
tcg_out8(s, OPC_JMP_long); /* jmp im */
31
s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
32
tcg_out32(s, 0);
32
- } else {
33
- } else {
33
- tcg_gen_exit_tb(NULL, 0);
34
- /* indirect jump method */
34
- }
35
- tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, -1,
35
+ tcg_gen_exit_tb(NULL, 0);
36
- (intptr_t)(s->tb_jmp_target_addr + a0));
36
break;
37
}
37
case DISAS_NORETURN:
38
set_jmp_reset_offset(s, a0);
38
break;
39
break;
39
--
40
--
40
2.25.1
41
2.34.1
41
42
42
43
diff view generated by jsdifflib
1
GDB single-stepping is now handled generically.
1
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
2
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
3
---
6
target/sh4/helper.h | 1 -
4
tcg/ppc/tcg-target.c.inc | 32 +++++++++++++-------------------
7
target/sh4/op_helper.c | 5 -----
5
1 file changed, 13 insertions(+), 19 deletions(-)
8
target/sh4/translate.c | 14 +++-----------
9
3 files changed, 3 insertions(+), 17 deletions(-)
10
6
11
diff --git a/target/sh4/helper.h b/target/sh4/helper.h
7
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
12
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
13
--- a/target/sh4/helper.h
9
--- a/tcg/ppc/tcg-target.c.inc
14
+++ b/target/sh4/helper.h
10
+++ b/tcg/ppc/tcg-target.c.inc
15
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_1(raise_illegal_instruction, noreturn, env)
11
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
16
DEF_HELPER_1(raise_slot_illegal_instruction, noreturn, env)
12
17
DEF_HELPER_1(raise_fpu_disable, noreturn, env)
13
switch (opc) {
18
DEF_HELPER_1(raise_slot_fpu_disable, noreturn, env)
14
case INDEX_op_goto_tb:
19
-DEF_HELPER_1(debug, noreturn, env)
15
- if (s->tb_jmp_insn_offset) {
20
DEF_HELPER_1(sleep, noreturn, env)
16
- /* Direct jump. */
21
DEF_HELPER_2(trapa, noreturn, env, i32)
17
- if (TCG_TARGET_REG_BITS == 64) {
22
DEF_HELPER_1(exclusive, noreturn, env)
18
- /* Ensure the next insns are 8 or 16-byte aligned. */
23
diff --git a/target/sh4/op_helper.c b/target/sh4/op_helper.c
19
- while ((uintptr_t)s->code_ptr & (have_isa_2_07 ? 15 : 7)) {
24
index XXXXXXX..XXXXXXX 100644
20
- tcg_out32(s, NOP);
25
--- a/target/sh4/op_helper.c
21
- }
26
+++ b/target/sh4/op_helper.c
22
- s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
27
@@ -XXX,XX +XXX,XX @@ void helper_raise_slot_fpu_disable(CPUSH4State *env)
23
- tcg_out32(s, ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, 0));
28
raise_exception(env, 0x820, 0);
24
- tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, 0));
29
}
25
- } else {
30
26
- s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
31
-void helper_debug(CPUSH4State *env)
27
- tcg_out32(s, B);
32
-{
28
- s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
33
- raise_exception(env, EXCP_DEBUG, 0);
29
- break;
34
-}
30
+ qemu_build_assert(TCG_TARGET_HAS_direct_jump);
35
-
31
+ /* Direct jump. */
36
void helper_sleep(CPUSH4State *env)
32
+ if (TCG_TARGET_REG_BITS == 64) {
37
{
33
+ /* Ensure the next insns are 8 or 16-byte aligned. */
38
CPUState *cs = env_cpu(env);
34
+ while ((uintptr_t)s->code_ptr & (have_isa_2_07 ? 15 : 7)) {
39
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
35
+ tcg_out32(s, NOP);
40
index XXXXXXX..XXXXXXX 100644
36
}
41
--- a/target/sh4/translate.c
37
+ s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
42
+++ b/target/sh4/translate.c
38
+ tcg_out32(s, ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, 0));
43
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
39
+ tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, 0));
44
tcg_gen_exit_tb(ctx->base.tb, n);
45
} else {
46
tcg_gen_movi_i32(cpu_pc, dest);
47
- if (ctx->base.singlestep_enabled) {
48
- gen_helper_debug(cpu_env);
49
- } else if (use_exit_tb(ctx)) {
50
+ if (use_exit_tb(ctx)) {
51
tcg_gen_exit_tb(NULL, 0);
52
} else {
40
} else {
53
tcg_gen_lookup_and_goto_ptr();
41
- /* Indirect jump. */
54
@@ -XXX,XX +XXX,XX @@ static void gen_jump(DisasContext * ctx)
42
- tcg_debug_assert(s->tb_jmp_insn_offset == NULL);
55
     delayed jump as immediate jump are conditinal jumps */
43
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, 0,
56
    tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
44
- (intptr_t)(s->tb_jmp_insn_offset + args[0]));
57
tcg_gen_discard_i32(cpu_delayed_pc);
45
+ s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
58
- if (ctx->base.singlestep_enabled) {
46
+ tcg_out32(s, B);
59
- gen_helper_debug(cpu_env);
47
+ s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
60
- } else if (use_exit_tb(ctx)) {
48
+ break;
61
+ if (use_exit_tb(ctx)) {
49
}
62
tcg_gen_exit_tb(NULL, 0);
50
tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR);
63
} else {
51
tcg_out32(s, BCCTR | BO_ALWAYS);
64
tcg_gen_lookup_and_goto_ptr();
65
@@ -XXX,XX +XXX,XX @@ static void sh4_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
66
switch (ctx->base.is_jmp) {
67
case DISAS_STOP:
68
gen_save_cpu_state(ctx, true);
69
- if (ctx->base.singlestep_enabled) {
70
- gen_helper_debug(cpu_env);
71
- } else {
72
- tcg_gen_exit_tb(NULL, 0);
73
- }
74
+ tcg_gen_exit_tb(NULL, 0);
75
break;
76
case DISAS_NEXT:
77
case DISAS_TOO_MANY:
78
--
52
--
79
2.25.1
53
2.34.1
80
54
81
55
diff view generated by jsdifflib
1
GDB single-stepping is now handled generically.
1
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
2
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
3
---
6
target/rx/helper.h | 1 -
4
tcg/sparc64/tcg-target.c.inc | 41 +++++++++++-------------------------
7
target/rx/op_helper.c | 8 --------
5
1 file changed, 12 insertions(+), 29 deletions(-)
8
target/rx/translate.c | 12 ++----------
9
3 files changed, 2 insertions(+), 19 deletions(-)
10
6
11
diff --git a/target/rx/helper.h b/target/rx/helper.h
7
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
12
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
13
--- a/target/rx/helper.h
9
--- a/tcg/sparc64/tcg-target.c.inc
14
+++ b/target/rx/helper.h
10
+++ b/tcg/sparc64/tcg-target.c.inc
15
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_1(raise_illegal_instruction, noreturn, env)
11
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
16
DEF_HELPER_1(raise_access_fault, noreturn, env)
12
return false;
17
DEF_HELPER_1(raise_privilege_violation, noreturn, env)
18
DEF_HELPER_1(wait, noreturn, env)
19
-DEF_HELPER_1(debug, noreturn, env)
20
DEF_HELPER_2(rxint, noreturn, env, i32)
21
DEF_HELPER_1(rxbrk, noreturn, env)
22
DEF_HELPER_FLAGS_3(fadd, TCG_CALL_NO_WG, f32, env, f32, f32)
23
diff --git a/target/rx/op_helper.c b/target/rx/op_helper.c
24
index XXXXXXX..XXXXXXX 100644
25
--- a/target/rx/op_helper.c
26
+++ b/target/rx/op_helper.c
27
@@ -XXX,XX +XXX,XX @@ void QEMU_NORETURN helper_wait(CPURXState *env)
28
raise_exception(env, EXCP_HLT, 0);
29
}
13
}
30
14
31
-void QEMU_NORETURN helper_debug(CPURXState *env)
15
-static void tcg_out_ld_ptr(TCGContext *s, TCGReg ret, const void *arg)
32
-{
16
-{
33
- CPUState *cs = env_cpu(env);
17
- intptr_t diff = tcg_tbrel_diff(s, arg);
34
-
18
- if (USE_REG_TB && check_fit_ptr(diff, 13)) {
35
- cs->exception_index = EXCP_DEBUG;
19
- tcg_out_ld(s, TCG_TYPE_PTR, ret, TCG_REG_TB, diff);
36
- cpu_loop_exit(cs);
20
- return;
21
- }
22
- tcg_out_movi(s, TCG_TYPE_PTR, ret, (uintptr_t)arg & ~0x3ff);
23
- tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, (uintptr_t)arg & 0x3ff);
37
-}
24
-}
38
-
25
-
39
void QEMU_NORETURN helper_rxint(CPURXState *env, uint32_t vec)
26
static void tcg_out_sety(TCGContext *s, TCGReg rs)
40
{
27
{
41
raise_exception(env, 0x100 + vec, 0);
28
tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
42
diff --git a/target/rx/translate.c b/target/rx/translate.c
29
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
43
index XXXXXXX..XXXXXXX 100644
30
44
--- a/target/rx/translate.c
31
switch (opc) {
45
+++ b/target/rx/translate.c
32
case INDEX_op_goto_tb:
46
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
33
- if (s->tb_jmp_insn_offset) {
47
tcg_gen_exit_tb(dc->base.tb, n);
34
- /* direct jump method */
48
} else {
35
- if (USE_REG_TB) {
49
tcg_gen_movi_i32(cpu_pc, dest);
36
- /* make sure the patch is 8-byte aligned. */
50
- if (dc->base.singlestep_enabled) {
37
- if ((intptr_t)s->code_ptr & 4) {
51
- gen_helper_debug(cpu_env);
38
- tcg_out_nop(s);
52
- } else {
39
- }
53
- tcg_gen_lookup_and_goto_ptr();
40
- s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
54
- }
41
- tcg_out_sethi(s, TCG_REG_T1, 0);
55
+ tcg_gen_lookup_and_goto_ptr();
42
- tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR);
56
}
43
- tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL);
57
dc->base.is_jmp = DISAS_NORETURN;
44
- tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
58
}
45
- } else {
59
@@ -XXX,XX +XXX,XX @@ static void rx_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
46
- s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
60
gen_goto_tb(ctx, 0, dcbase->pc_next);
47
- tcg_out32(s, CALL);
61
break;
48
+ qemu_build_assert(TCG_TARGET_HAS_direct_jump);
62
case DISAS_JUMP:
49
+ /* Direct jump. */
63
- if (ctx->base.singlestep_enabled) {
50
+ if (USE_REG_TB) {
64
- gen_helper_debug(cpu_env);
51
+ /* make sure the patch is 8-byte aligned. */
65
- } else {
52
+ if ((intptr_t)s->code_ptr & 4) {
66
- tcg_gen_lookup_and_goto_ptr();
53
tcg_out_nop(s);
67
- }
54
}
68
+ tcg_gen_lookup_and_goto_ptr();
55
+ s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
69
break;
56
+ tcg_out_sethi(s, TCG_REG_T1, 0);
70
case DISAS_UPDATE:
57
+ tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR);
71
tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
58
+ tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL);
59
+ tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
60
} else {
61
- /* indirect jump method */
62
- tcg_out_ld_ptr(s, TCG_REG_TB, s->tb_jmp_target_addr + a0);
63
- tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL);
64
+ s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
65
+ tcg_out32(s, CALL);
66
tcg_out_nop(s);
67
}
68
set_jmp_reset_offset(s, a0);
72
--
69
--
73
2.25.1
70
2.34.1
74
71
75
72
diff view generated by jsdifflib
1
GDB single-stepping is now handled generically.
1
Test TCG_TARGET_HAS_direct_jump instead of testing an
2
implementation pointer.
2
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
7
---
5
target/s390x/tcg/translate.c | 8 ++------
8
tcg/aarch64/tcg-target.c.inc | 2 +-
6
1 file changed, 2 insertions(+), 6 deletions(-)
9
tcg/arm/tcg-target.c.inc | 2 +-
10
tcg/loongarch64/tcg-target.c.inc | 2 +-
11
tcg/mips/tcg-target.c.inc | 2 +-
12
tcg/riscv/tcg-target.c.inc | 2 +-
13
tcg/tci/tcg-target.c.inc | 2 +-
14
6 files changed, 6 insertions(+), 6 deletions(-)
7
15
8
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
16
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
9
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
10
--- a/target/s390x/tcg/translate.c
18
--- a/tcg/aarch64/tcg-target.c.inc
11
+++ b/target/s390x/tcg/translate.c
19
+++ b/tcg/aarch64/tcg-target.c.inc
12
@@ -XXX,XX +XXX,XX @@ struct DisasContext {
20
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
13
uint64_t pc_tmp;
21
14
uint32_t ilen;
22
switch (opc) {
15
enum cc_op cc_op;
23
case INDEX_op_goto_tb:
16
- bool do_debug;
24
- tcg_debug_assert(s->tb_jmp_insn_offset != NULL);
17
};
25
+ qemu_build_assert(TCG_TARGET_HAS_direct_jump);
18
26
/*
19
/* Information carried about a condition to be evaluated. */
27
* Ensure that ADRP+ADD are 8-byte aligned so that an atomic
20
@@ -XXX,XX +XXX,XX @@ static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
28
* write can be used to patch the target address.
21
29
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
22
dc->cc_op = CC_OP_DYNAMIC;
30
index XXXXXXX..XXXXXXX 100644
23
dc->ex_value = dc->base.tb->cs_base;
31
--- a/tcg/arm/tcg-target.c.inc
24
- dc->do_debug = dc->base.singlestep_enabled;
32
+++ b/tcg/arm/tcg-target.c.inc
25
}
33
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
26
34
intptr_t ptr, dif, dil;
27
static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
35
TCGReg base = TCG_REG_PC;
28
@@ -XXX,XX +XXX,XX @@ static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
36
29
/* FALLTHRU */
37
- tcg_debug_assert(s->tb_jmp_insn_offset == 0);
30
case DISAS_PC_CC_UPDATED:
38
+ qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
31
/* Exit the TB, either by raising a debug exception or by return. */
39
ptr = (intptr_t)tcg_splitwx_to_rx(s->tb_jmp_target_addr + args[0]);
32
- if (dc->do_debug) {
40
dif = tcg_pcrel_diff(s, (void *)ptr) - 8;
33
- gen_exception(EXCP_DEBUG);
41
dil = sextract32(dif, 0, 12);
34
- } else if ((dc->base.tb->flags & FLAG_MASK_PER) ||
42
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
35
- dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) {
43
index XXXXXXX..XXXXXXX 100644
36
+ if ((dc->base.tb->flags & FLAG_MASK_PER) ||
44
--- a/tcg/loongarch64/tcg-target.c.inc
37
+ dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) {
45
+++ b/tcg/loongarch64/tcg-target.c.inc
38
tcg_gen_exit_tb(NULL, 0);
46
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
39
} else {
47
40
tcg_gen_lookup_and_goto_ptr();
48
switch (opc) {
49
case INDEX_op_goto_tb:
50
- tcg_debug_assert(s->tb_jmp_insn_offset != NULL);
51
+ qemu_build_assert(TCG_TARGET_HAS_direct_jump);
52
/*
53
* Ensure that patch area is 8-byte aligned so that an
54
* atomic write can be used to patch the target address.
55
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
56
index XXXXXXX..XXXXXXX 100644
57
--- a/tcg/mips/tcg-target.c.inc
58
+++ b/tcg/mips/tcg-target.c.inc
59
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
60
switch (opc) {
61
case INDEX_op_goto_tb:
62
/* indirect jump method */
63
- tcg_debug_assert(s->tb_jmp_insn_offset == 0);
64
+ qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
65
tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_REG_ZERO,
66
(uintptr_t)(s->tb_jmp_target_addr + a0));
67
tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
68
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
69
index XXXXXXX..XXXXXXX 100644
70
--- a/tcg/riscv/tcg-target.c.inc
71
+++ b/tcg/riscv/tcg-target.c.inc
72
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
73
74
switch (opc) {
75
case INDEX_op_goto_tb:
76
- assert(s->tb_jmp_insn_offset == 0);
77
+ qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
78
/* indirect jump method */
79
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO,
80
(uintptr_t)(s->tb_jmp_target_addr + a0));
81
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
82
index XXXXXXX..XXXXXXX 100644
83
--- a/tcg/tci/tcg-target.c.inc
84
+++ b/tcg/tci/tcg-target.c.inc
85
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
86
87
switch (opc) {
88
case INDEX_op_goto_tb:
89
- tcg_debug_assert(s->tb_jmp_insn_offset == 0);
90
+ qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
91
/* indirect jump method. */
92
tcg_out_op_p(s, opc, s->tb_jmp_target_addr + args[0]);
93
set_jmp_reset_offset(s, args[0]);
41
--
94
--
42
2.25.1
95
2.34.1
43
96
44
97
diff view generated by jsdifflib
1
GDB single-stepping is now handled generically.
1
Similar to the existing set_jmp_reset_offset. Move any assert for
2
TCG_TARGET_HAS_direct_jump into the new function (which now cannot
3
be build-time). Will be unused if TCG_TARGET_HAS_direct_jump is
4
constant 0, but we can't test for constant in the preprocessor,
5
so just mark it G_GNUC_UNUSED.
2
6
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
9
---
6
target/openrisc/translate.c | 18 +++---------------
10
tcg/tcg.c | 10 ++++++++++
7
1 file changed, 3 insertions(+), 15 deletions(-)
11
tcg/aarch64/tcg-target.c.inc | 3 +--
12
tcg/i386/tcg-target.c.inc | 3 +--
13
tcg/loongarch64/tcg-target.c.inc | 3 +--
14
tcg/ppc/tcg-target.c.inc | 7 +++----
15
tcg/s390x/tcg-target.c.inc | 2 +-
16
tcg/sparc64/tcg-target.c.inc | 5 ++---
17
7 files changed, 19 insertions(+), 14 deletions(-)
8
18
9
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
19
diff --git a/tcg/tcg.c b/tcg/tcg.c
10
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
11
--- a/target/openrisc/translate.c
21
--- a/tcg/tcg.c
12
+++ b/target/openrisc/translate.c
22
+++ b/tcg/tcg.c
13
@@ -XXX,XX +XXX,XX @@ static void openrisc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
23
@@ -XXX,XX +XXX,XX @@ static void set_jmp_reset_offset(TCGContext *s, int which)
14
/* The jump destination is indirect/computed; use jmp_pc. */
24
s->tb_jmp_reset_offset[which] = tcg_current_code_size(s);
15
tcg_gen_mov_tl(cpu_pc, jmp_pc);
25
}
16
tcg_gen_discard_tl(jmp_pc);
26
17
- if (unlikely(dc->base.singlestep_enabled)) {
27
+static void G_GNUC_UNUSED set_jmp_insn_offset(TCGContext *s, int which)
18
- gen_exception(dc, EXCP_DEBUG);
28
+{
19
- } else {
29
+ /*
20
- tcg_gen_lookup_and_goto_ptr();
30
+ * We will check for overflow at the end of the opcode loop in
21
- }
31
+ * tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
22
+ tcg_gen_lookup_and_goto_ptr();
32
+ */
33
+ tcg_debug_assert(TCG_TARGET_HAS_direct_jump);
34
+ s->tb_jmp_insn_offset[which] = tcg_current_code_size(s);
35
+}
36
+
37
/* Signal overflow, starting over with fewer guest insns. */
38
static G_NORETURN
39
void tcg_raise_tb_overflow(TCGContext *s)
40
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
41
index XXXXXXX..XXXXXXX 100644
42
--- a/tcg/aarch64/tcg-target.c.inc
43
+++ b/tcg/aarch64/tcg-target.c.inc
44
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
45
46
switch (opc) {
47
case INDEX_op_goto_tb:
48
- qemu_build_assert(TCG_TARGET_HAS_direct_jump);
49
/*
50
* Ensure that ADRP+ADD are 8-byte aligned so that an atomic
51
* write can be used to patch the target address.
52
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
53
if ((uintptr_t)s->code_ptr & 7) {
54
tcg_out32(s, NOP);
55
}
56
- s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
57
+ set_jmp_insn_offset(s, a0);
58
/*
59
* actual branch destination will be patched by
60
* tb_target_set_jmp_target later
61
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
62
index XXXXXXX..XXXXXXX 100644
63
--- a/tcg/i386/tcg-target.c.inc
64
+++ b/tcg/i386/tcg-target.c.inc
65
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
66
67
switch (opc) {
68
case INDEX_op_goto_tb:
69
- qemu_build_assert(TCG_TARGET_HAS_direct_jump);
70
{
71
/*
72
* Jump displacement must be aligned for atomic patching;
73
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
74
tcg_out_nopn(s, gap - 1);
75
}
76
tcg_out8(s, OPC_JMP_long); /* jmp im */
77
- s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
78
+ set_jmp_insn_offset(s, a0);
79
tcg_out32(s, 0);
80
}
81
set_jmp_reset_offset(s, a0);
82
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
83
index XXXXXXX..XXXXXXX 100644
84
--- a/tcg/loongarch64/tcg-target.c.inc
85
+++ b/tcg/loongarch64/tcg-target.c.inc
86
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
87
88
switch (opc) {
89
case INDEX_op_goto_tb:
90
- qemu_build_assert(TCG_TARGET_HAS_direct_jump);
91
/*
92
* Ensure that patch area is 8-byte aligned so that an
93
* atomic write can be used to patch the target address.
94
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
95
if ((uintptr_t)s->code_ptr & 7) {
96
tcg_out_nop(s);
97
}
98
- s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
99
+ set_jmp_insn_offset(s, a0);
100
/*
101
* actual branch destination will be patched by
102
* tb_target_set_jmp_target later
103
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
104
index XXXXXXX..XXXXXXX 100644
105
--- a/tcg/ppc/tcg-target.c.inc
106
+++ b/tcg/ppc/tcg-target.c.inc
107
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
108
109
switch (opc) {
110
case INDEX_op_goto_tb:
111
- qemu_build_assert(TCG_TARGET_HAS_direct_jump);
112
/* Direct jump. */
113
if (TCG_TARGET_REG_BITS == 64) {
114
/* Ensure the next insns are 8 or 16-byte aligned. */
115
while ((uintptr_t)s->code_ptr & (have_isa_2_07 ? 15 : 7)) {
116
tcg_out32(s, NOP);
117
}
118
- s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
119
+ set_jmp_insn_offset(s, args[0]);
120
tcg_out32(s, ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, 0));
121
tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, 0));
122
} else {
123
- s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
124
+ set_jmp_insn_offset(s, args[0]);
125
tcg_out32(s, B);
126
- s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
127
+ set_jmp_reset_offset(s, args[0]);
23
break;
128
break;
24
}
129
}
25
/* The jump destination is direct; use jmp_pc_imm.
130
tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR);
26
@@ -XXX,XX +XXX,XX @@ static void openrisc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
131
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
27
break;
132
index XXXXXXX..XXXXXXX 100644
133
--- a/tcg/s390x/tcg-target.c.inc
134
+++ b/tcg/s390x/tcg-target.c.inc
135
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
136
tcg_out16(s, NOP);
28
}
137
}
29
tcg_gen_movi_tl(cpu_pc, jmp_dest);
138
tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
30
- if (unlikely(dc->base.singlestep_enabled)) {
139
- s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
31
- gen_exception(dc, EXCP_DEBUG);
140
+ set_jmp_insn_offset(s, a0);
32
- } else {
141
s->code_ptr += 2;
33
- tcg_gen_lookup_and_goto_ptr();
142
set_jmp_reset_offset(s, a0);
34
- }
35
+ tcg_gen_lookup_and_goto_ptr();
36
break;
143
break;
37
144
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
38
case DISAS_EXIT:
145
index XXXXXXX..XXXXXXX 100644
39
- if (unlikely(dc->base.singlestep_enabled)) {
146
--- a/tcg/sparc64/tcg-target.c.inc
40
- gen_exception(dc, EXCP_DEBUG);
147
+++ b/tcg/sparc64/tcg-target.c.inc
41
- } else {
148
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
42
- tcg_gen_exit_tb(NULL, 0);
149
43
- }
150
switch (opc) {
44
+ tcg_gen_exit_tb(NULL, 0);
151
case INDEX_op_goto_tb:
45
break;
152
- qemu_build_assert(TCG_TARGET_HAS_direct_jump);
46
default:
153
/* Direct jump. */
47
g_assert_not_reached();
154
if (USE_REG_TB) {
155
/* make sure the patch is 8-byte aligned. */
156
if ((intptr_t)s->code_ptr & 4) {
157
tcg_out_nop(s);
158
}
159
- s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
160
+ set_jmp_insn_offset(s, a0);
161
tcg_out_sethi(s, TCG_REG_T1, 0);
162
tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR);
163
tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL);
164
tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
165
} else {
166
- s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
167
+ set_jmp_insn_offset(s, a0);
168
tcg_out32(s, CALL);
169
tcg_out_nop(s);
170
}
48
--
171
--
49
2.25.1
172
2.34.1
50
173
51
174
diff view generated by jsdifflib
1
We have already set DISAS_NORETURN in generate_exception,
1
Similar to the existing set_jmp_reset_offset. Include the
2
which makes the exit_tb unreachable.
2
rw->rx address space conversion done by arm and s390x, and
3
forgotten by mips and riscv.
3
4
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
8
---
7
target/riscv/insn_trans/trans_privileged.c.inc | 6 +-----
9
tcg/tcg.c | 9 +++++++++
8
1 file changed, 1 insertion(+), 5 deletions(-)
10
tcg/arm/tcg-target.c.inc | 2 +-
11
tcg/mips/tcg-target.c.inc | 2 +-
12
tcg/riscv/tcg-target.c.inc | 2 +-
13
tcg/tci/tcg-target.c.inc | 2 +-
14
5 files changed, 13 insertions(+), 4 deletions(-)
9
15
10
diff --git a/target/riscv/insn_trans/trans_privileged.c.inc b/target/riscv/insn_trans/trans_privileged.c.inc
16
diff --git a/tcg/tcg.c b/tcg/tcg.c
11
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
12
--- a/target/riscv/insn_trans/trans_privileged.c.inc
18
--- a/tcg/tcg.c
13
+++ b/target/riscv/insn_trans/trans_privileged.c.inc
19
+++ b/tcg/tcg.c
14
@@ -XXX,XX +XXX,XX @@ static bool trans_ecall(DisasContext *ctx, arg_ecall *a)
20
@@ -XXX,XX +XXX,XX @@ static void G_GNUC_UNUSED set_jmp_insn_offset(TCGContext *s, int which)
15
{
21
s->tb_jmp_insn_offset[which] = tcg_current_code_size(s);
16
/* always generates U-level ECALL, fixed in do_interrupt handler */
17
generate_exception(ctx, RISCV_EXCP_U_ECALL);
18
- exit_tb(ctx); /* no chaining */
19
- ctx->base.is_jmp = DISAS_NORETURN;
20
return true;
21
}
22
}
22
23
23
@@ -XXX,XX +XXX,XX @@ static bool trans_ebreak(DisasContext *ctx, arg_ebreak *a)
24
+static uintptr_t G_GNUC_UNUSED get_jmp_target_addr(TCGContext *s, int which)
24
post = opcode_at(&ctx->base, post_addr);
25
+{
25
}
26
+ /*
26
27
+ * Return the read-execute version of the pointer, for the benefit
27
- if (pre == 0x01f01013 && ebreak == 0x00100073 && post == 0x40705013) {
28
+ * of any pc-relative addressing mode.
28
+ if (pre == 0x01f01013 && ebreak == 0x00100073 && post == 0x40705013) {
29
+ */
29
generate_exception(ctx, RISCV_EXCP_SEMIHOST);
30
+ return (uintptr_t)tcg_splitwx_to_rx(&s->tb_jmp_target_addr[which]);
30
} else {
31
+}
31
generate_exception(ctx, RISCV_EXCP_BREAKPOINT);
32
+
32
}
33
/* Signal overflow, starting over with fewer guest insns. */
33
- exit_tb(ctx); /* no chaining */
34
static G_NORETURN
34
- ctx->base.is_jmp = DISAS_NORETURN;
35
void tcg_raise_tb_overflow(TCGContext *s)
35
return true;
36
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
36
}
37
index XXXXXXX..XXXXXXX 100644
38
--- a/tcg/arm/tcg-target.c.inc
39
+++ b/tcg/arm/tcg-target.c.inc
40
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
41
TCGReg base = TCG_REG_PC;
42
43
qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
44
- ptr = (intptr_t)tcg_splitwx_to_rx(s->tb_jmp_target_addr + args[0]);
45
+ ptr = get_jmp_target_addr(s, args[0]);
46
dif = tcg_pcrel_diff(s, (void *)ptr) - 8;
47
dil = sextract32(dif, 0, 12);
48
if (dif != dil) {
49
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
50
index XXXXXXX..XXXXXXX 100644
51
--- a/tcg/mips/tcg-target.c.inc
52
+++ b/tcg/mips/tcg-target.c.inc
53
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
54
/* indirect jump method */
55
qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
56
tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_REG_ZERO,
57
- (uintptr_t)(s->tb_jmp_target_addr + a0));
58
+ get_jmp_target_addr(s, a0));
59
tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
60
tcg_out_nop(s);
61
set_jmp_reset_offset(s, a0);
62
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
63
index XXXXXXX..XXXXXXX 100644
64
--- a/tcg/riscv/tcg-target.c.inc
65
+++ b/tcg/riscv/tcg-target.c.inc
66
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
67
qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
68
/* indirect jump method */
69
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO,
70
- (uintptr_t)(s->tb_jmp_target_addr + a0));
71
+ get_jmp_target_addr(s, a0));
72
tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_TMP0, 0);
73
set_jmp_reset_offset(s, a0);
74
break;
75
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
76
index XXXXXXX..XXXXXXX 100644
77
--- a/tcg/tci/tcg-target.c.inc
78
+++ b/tcg/tci/tcg-target.c.inc
79
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
80
case INDEX_op_goto_tb:
81
qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
82
/* indirect jump method. */
83
- tcg_out_op_p(s, opc, s->tb_jmp_target_addr + args[0]);
84
+ tcg_out_op_p(s, opc, (void *)get_jmp_target_addr(s, args[0]));
85
set_jmp_reset_offset(s, args[0]);
86
break;
37
87
38
--
88
--
39
2.25.1
89
2.34.1
40
90
41
91
diff view generated by jsdifflib
1
GDB single-stepping is now handled generically.
1
The INDEX_op_goto_tb opcode needs no register allocation.
2
Split out a dedicated helper function for it.
2
3
3
Tested-by: Michael Rolnik <mrolnik@gmail.com>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Michael Rolnik <mrolnik@gmail.com>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
7
---
8
target/avr/translate.c | 19 ++++---------------
8
tcg/tcg.c | 4 ++
9
1 file changed, 4 insertions(+), 15 deletions(-)
9
tcg/aarch64/tcg-target.c.inc | 40 ++++++++++---------
10
tcg/arm/tcg-target.c.inc | 49 ++++++++++++-----------
11
tcg/i386/tcg-target.c.inc | 33 ++++++++--------
12
tcg/loongarch64/tcg-target.c.inc | 38 +++++++++---------
13
tcg/mips/tcg-target.c.inc | 21 +++++-----
14
tcg/ppc/tcg-target.c.inc | 52 ++++++++++++------------
15
tcg/riscv/tcg-target.c.inc | 20 +++++-----
16
tcg/s390x/tcg-target.c.inc | 31 ++++++++-------
17
tcg/sparc64/tcg-target.c.inc | 68 +++++++++++++++++---------------
18
tcg/tci/tcg-target.c.inc | 16 ++++----
19
11 files changed, 199 insertions(+), 173 deletions(-)
10
20
11
diff --git a/target/avr/translate.c b/target/avr/translate.c
21
diff --git a/tcg/tcg.c b/tcg/tcg.c
12
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
13
--- a/target/avr/translate.c
23
--- a/tcg/tcg.c
14
+++ b/target/avr/translate.c
24
+++ b/tcg/tcg.c
15
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
25
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
16
tcg_gen_exit_tb(tb, n);
26
static void tcg_out_movi(TCGContext *s, TCGType type,
17
} else {
27
TCGReg ret, tcg_target_long arg);
18
tcg_gen_movi_i32(cpu_pc, dest);
28
static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg);
19
- if (ctx->base.singlestep_enabled) {
29
+static void tcg_out_goto_tb(TCGContext *s, int which);
20
- gen_helper_debug(cpu_env);
30
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
31
const TCGArg args[TCG_MAX_OP_ARGS],
32
const int const_args[TCG_MAX_OP_ARGS]);
33
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start)
34
case INDEX_op_exit_tb:
35
tcg_out_exit_tb(s, op->args[0]);
36
break;
37
+ case INDEX_op_goto_tb:
38
+ tcg_out_goto_tb(s, op->args[0]);
39
+ break;
40
case INDEX_op_dup2_vec:
41
if (tcg_reg_alloc_dup2(s, op)) {
42
break;
43
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
44
index XXXXXXX..XXXXXXX 100644
45
--- a/tcg/aarch64/tcg-target.c.inc
46
+++ b/tcg/aarch64/tcg-target.c.inc
47
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
48
}
49
}
50
51
+static void tcg_out_goto_tb(TCGContext *s, int which)
52
+{
53
+ /*
54
+ * Ensure that ADRP+ADD are 8-byte aligned so that an atomic
55
+ * write can be used to patch the target address.
56
+ */
57
+ if ((uintptr_t)s->code_ptr & 7) {
58
+ tcg_out32(s, NOP);
59
+ }
60
+ set_jmp_insn_offset(s, which);
61
+ /*
62
+ * actual branch destination will be patched by
63
+ * tb_target_set_jmp_target later
64
+ */
65
+ tcg_out_insn(s, 3406, ADRP, TCG_REG_TMP, 0);
66
+ tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_TMP, TCG_REG_TMP, 0);
67
+ tcg_out_insn(s, 3207, BR, TCG_REG_TMP);
68
+ set_jmp_reset_offset(s, which);
69
+}
70
+
71
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
72
const TCGArg args[TCG_MAX_OP_ARGS],
73
const int const_args[TCG_MAX_OP_ARGS])
74
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
75
#define REG0(I) (const_args[I] ? TCG_REG_XZR : (TCGReg)args[I])
76
77
switch (opc) {
78
- case INDEX_op_goto_tb:
79
- /*
80
- * Ensure that ADRP+ADD are 8-byte aligned so that an atomic
81
- * write can be used to patch the target address.
82
- */
83
- if ((uintptr_t)s->code_ptr & 7) {
84
- tcg_out32(s, NOP);
85
- }
86
- set_jmp_insn_offset(s, a0);
87
- /*
88
- * actual branch destination will be patched by
89
- * tb_target_set_jmp_target later
90
- */
91
- tcg_out_insn(s, 3406, ADRP, TCG_REG_TMP, 0);
92
- tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_TMP, TCG_REG_TMP, 0);
93
- tcg_out_insn(s, 3207, BR, TCG_REG_TMP);
94
- set_jmp_reset_offset(s, a0);
95
- break;
96
-
97
case INDEX_op_goto_ptr:
98
tcg_out_insn(s, 3207, BR, a0);
99
break;
100
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
101
case INDEX_op_mov_i64:
102
case INDEX_op_call: /* Always emitted via tcg_out_call. */
103
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
104
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
105
default:
106
g_assert_not_reached();
107
}
108
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
109
index XXXXXXX..XXXXXXX 100644
110
--- a/tcg/arm/tcg-target.c.inc
111
+++ b/tcg/arm/tcg-target.c.inc
112
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
113
tcg_out_epilogue(s);
114
}
115
116
+static void tcg_out_goto_tb(TCGContext *s, int which)
117
+{
118
+ /* Indirect jump method */
119
+ intptr_t ptr, dif, dil;
120
+ TCGReg base = TCG_REG_PC;
121
+
122
+ qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
123
+ ptr = get_jmp_target_addr(s, which);
124
+ dif = tcg_pcrel_diff(s, (void *)ptr) - 8;
125
+ dil = sextract32(dif, 0, 12);
126
+ if (dif != dil) {
127
+ /*
128
+ * The TB is close, but outside the 12 bits addressable by
129
+ * the load. We can extend this to 20 bits with a sub of a
130
+ * shifted immediate from pc. In the vastly unlikely event
131
+ * the code requires more than 1MB, we'll use 2 insns and
132
+ * be no worse off.
133
+ */
134
+ base = TCG_REG_R0;
135
+ tcg_out_movi32(s, COND_AL, base, ptr - dil);
136
+ }
137
+ tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, base, dil);
138
+ set_jmp_reset_offset(s, which);
139
+}
140
+
141
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
142
const TCGArg args[TCG_MAX_OP_ARGS],
143
const int const_args[TCG_MAX_OP_ARGS])
144
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
145
int c;
146
147
switch (opc) {
148
- case INDEX_op_goto_tb:
149
- {
150
- /* Indirect jump method */
151
- intptr_t ptr, dif, dil;
152
- TCGReg base = TCG_REG_PC;
153
-
154
- qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
155
- ptr = get_jmp_target_addr(s, args[0]);
156
- dif = tcg_pcrel_diff(s, (void *)ptr) - 8;
157
- dil = sextract32(dif, 0, 12);
158
- if (dif != dil) {
159
- /* The TB is close, but outside the 12 bits addressable by
160
- the load. We can extend this to 20 bits with a sub of a
161
- shifted immediate from pc. In the vastly unlikely event
162
- the code requires more than 1MB, we'll use 2 insns and
163
- be no worse off. */
164
- base = TCG_REG_R0;
165
- tcg_out_movi32(s, COND_AL, base, ptr - dil);
166
- }
167
- tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, base, dil);
168
- set_jmp_reset_offset(s, args[0]);
169
- }
170
- break;
171
case INDEX_op_goto_ptr:
172
tcg_out_b_reg(s, COND_AL, args[0]);
173
break;
174
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
175
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
176
case INDEX_op_call: /* Always emitted via tcg_out_call. */
177
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
178
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
179
default:
180
tcg_abort();
181
}
182
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
183
index XXXXXXX..XXXXXXX 100644
184
--- a/tcg/i386/tcg-target.c.inc
185
+++ b/tcg/i386/tcg-target.c.inc
186
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
187
}
188
}
189
190
+static void tcg_out_goto_tb(TCGContext *s, int which)
191
+{
192
+ /*
193
+ * Jump displacement must be aligned for atomic patching;
194
+ * see if we need to add extra nops before jump
195
+ */
196
+ int gap = QEMU_ALIGN_PTR_UP(s->code_ptr + 1, 4) - s->code_ptr;
197
+ if (gap != 1) {
198
+ tcg_out_nopn(s, gap - 1);
199
+ }
200
+ tcg_out8(s, OPC_JMP_long); /* jmp im */
201
+ set_jmp_insn_offset(s, which);
202
+ tcg_out32(s, 0);
203
+ set_jmp_reset_offset(s, which);
204
+}
205
+
206
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
207
const TCGArg args[TCG_MAX_OP_ARGS],
208
const int const_args[TCG_MAX_OP_ARGS])
209
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
210
const_a2 = const_args[2];
211
212
switch (opc) {
213
- case INDEX_op_goto_tb:
214
- {
215
- /*
216
- * Jump displacement must be aligned for atomic patching;
217
- * see if we need to add extra nops before jump
218
- */
219
- int gap = QEMU_ALIGN_PTR_UP(s->code_ptr + 1, 4) - s->code_ptr;
220
- if (gap != 1) {
221
- tcg_out_nopn(s, gap - 1);
222
- }
223
- tcg_out8(s, OPC_JMP_long); /* jmp im */
224
- set_jmp_insn_offset(s, a0);
225
- tcg_out32(s, 0);
226
- }
227
- set_jmp_reset_offset(s, a0);
228
- break;
229
case INDEX_op_goto_ptr:
230
/* jmp to the given host address (could be epilogue) */
231
tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, a0);
232
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
233
case INDEX_op_mov_i64:
234
case INDEX_op_call: /* Always emitted via tcg_out_call. */
235
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
236
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
237
default:
238
tcg_abort();
239
}
240
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
241
index XXXXXXX..XXXXXXX 100644
242
--- a/tcg/loongarch64/tcg-target.c.inc
243
+++ b/tcg/loongarch64/tcg-target.c.inc
244
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
245
}
246
}
247
248
+static void tcg_out_goto_tb(TCGContext *s, int which)
249
+{
250
+ /*
251
+ * Ensure that patch area is 8-byte aligned so that an
252
+ * atomic write can be used to patch the target address.
253
+ */
254
+ if ((uintptr_t)s->code_ptr & 7) {
255
+ tcg_out_nop(s);
256
+ }
257
+ set_jmp_insn_offset(s, which);
258
+ /*
259
+ * actual branch destination will be patched by
260
+ * tb_target_set_jmp_target later
261
+ */
262
+ tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, 0);
263
+ tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0);
264
+ set_jmp_reset_offset(s, which);
265
+}
266
+
267
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
268
const TCGArg args[TCG_MAX_OP_ARGS],
269
const int const_args[TCG_MAX_OP_ARGS])
270
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
271
int c2 = const_args[2];
272
273
switch (opc) {
274
- case INDEX_op_goto_tb:
275
- /*
276
- * Ensure that patch area is 8-byte aligned so that an
277
- * atomic write can be used to patch the target address.
278
- */
279
- if ((uintptr_t)s->code_ptr & 7) {
280
- tcg_out_nop(s);
281
- }
282
- set_jmp_insn_offset(s, a0);
283
- /*
284
- * actual branch destination will be patched by
285
- * tb_target_set_jmp_target later
286
- */
287
- tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, 0);
288
- tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0);
289
- set_jmp_reset_offset(s, a0);
290
- break;
291
-
292
case INDEX_op_mb:
293
tcg_out_mb(s, a0);
294
break;
295
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
296
case INDEX_op_mov_i64:
297
case INDEX_op_call: /* Always emitted via tcg_out_call. */
298
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
299
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
300
default:
301
g_assert_not_reached();
302
}
303
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
304
index XXXXXXX..XXXXXXX 100644
305
--- a/tcg/mips/tcg-target.c.inc
306
+++ b/tcg/mips/tcg-target.c.inc
307
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
308
tcg_out_opc_imm(s, OPC_ORI, TCG_REG_V0, b0, a0 & 0xffff);
309
}
310
311
+static void tcg_out_goto_tb(TCGContext *s, int which)
312
+{
313
+ /* indirect jump method */
314
+ qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
315
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_REG_ZERO,
316
+ get_jmp_target_addr(s, which));
317
+ tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
318
+ tcg_out_nop(s);
319
+ set_jmp_reset_offset(s, which);
320
+}
321
+
322
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
323
const TCGArg args[TCG_MAX_OP_ARGS],
324
const int const_args[TCG_MAX_OP_ARGS])
325
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
326
c2 = const_args[2];
327
328
switch (opc) {
329
- case INDEX_op_goto_tb:
330
- /* indirect jump method */
331
- qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
332
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_REG_ZERO,
333
- get_jmp_target_addr(s, a0));
334
- tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
335
- tcg_out_nop(s);
336
- set_jmp_reset_offset(s, a0);
337
- break;
338
case INDEX_op_goto_ptr:
339
/* jmp to the given host address (could be epilogue) */
340
tcg_out_opc_reg(s, OPC_JR, 0, a0, 0);
341
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
342
case INDEX_op_mov_i64:
343
case INDEX_op_call: /* Always emitted via tcg_out_call. */
344
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
345
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
346
default:
347
tcg_abort();
348
}
349
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
350
index XXXXXXX..XXXXXXX 100644
351
--- a/tcg/ppc/tcg-target.c.inc
352
+++ b/tcg/ppc/tcg-target.c.inc
353
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
354
tcg_out_b(s, 0, tcg_code_gen_epilogue);
355
}
356
357
+static void tcg_out_goto_tb(TCGContext *s, int which)
358
+{
359
+ /* Direct jump. */
360
+ if (TCG_TARGET_REG_BITS == 64) {
361
+ /* Ensure the next insns are 8 or 16-byte aligned. */
362
+ while ((uintptr_t)s->code_ptr & (have_isa_2_07 ? 15 : 7)) {
363
+ tcg_out32(s, NOP);
364
+ }
365
+ set_jmp_insn_offset(s, which);
366
+ tcg_out32(s, ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, 0));
367
+ tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, 0));
368
+ tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR);
369
+ tcg_out32(s, BCCTR | BO_ALWAYS);
370
+ set_jmp_reset_offset(s, which);
371
+ if (USE_REG_TB) {
372
+ /* For the unlinked case, need to reset TCG_REG_TB. */
373
+ tcg_out_mem_long(s, ADDI, ADD, TCG_REG_TB, TCG_REG_TB,
374
+ -tcg_current_code_size(s));
375
+ }
376
+ } else {
377
+ set_jmp_insn_offset(s, which);
378
+ tcg_out32(s, B);
379
+ set_jmp_reset_offset(s, which);
380
+ }
381
+}
382
+
383
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
384
const TCGArg args[TCG_MAX_OP_ARGS],
385
const int const_args[TCG_MAX_OP_ARGS])
386
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
387
TCGArg a0, a1, a2;
388
389
switch (opc) {
390
- case INDEX_op_goto_tb:
391
- /* Direct jump. */
392
- if (TCG_TARGET_REG_BITS == 64) {
393
- /* Ensure the next insns are 8 or 16-byte aligned. */
394
- while ((uintptr_t)s->code_ptr & (have_isa_2_07 ? 15 : 7)) {
395
- tcg_out32(s, NOP);
396
- }
397
- set_jmp_insn_offset(s, args[0]);
398
- tcg_out32(s, ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, 0));
399
- tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, 0));
21
- } else {
400
- } else {
22
- tcg_gen_lookup_and_goto_ptr();
401
- set_jmp_insn_offset(s, args[0]);
23
- }
402
- tcg_out32(s, B);
24
+ tcg_gen_lookup_and_goto_ptr();
403
- set_jmp_reset_offset(s, args[0]);
25
}
26
ctx->base.is_jmp = DISAS_NORETURN;
27
}
28
@@ -XXX,XX +XXX,XX @@ static void avr_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
29
tcg_gen_movi_tl(cpu_pc, ctx->npc);
30
/* fall through */
31
case DISAS_LOOKUP:
32
- if (!ctx->base.singlestep_enabled) {
33
- tcg_gen_lookup_and_goto_ptr();
34
- break;
404
- break;
35
- }
405
- }
36
- /* fall through */
406
- tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR);
37
+ tcg_gen_lookup_and_goto_ptr();
407
- tcg_out32(s, BCCTR | BO_ALWAYS);
38
+ break;
408
- set_jmp_reset_offset(s, args[0]);
39
case DISAS_EXIT:
409
- if (USE_REG_TB) {
40
- if (ctx->base.singlestep_enabled) {
410
- /* For the unlinked case, need to reset TCG_REG_TB. */
41
- gen_helper_debug(cpu_env);
411
- tcg_out_mem_long(s, ADDI, ADD, TCG_REG_TB, TCG_REG_TB,
412
- -tcg_current_code_size(s));
413
- }
414
- break;
415
case INDEX_op_goto_ptr:
416
tcg_out32(s, MTSPR | RS(args[0]) | CTR);
417
if (USE_REG_TB) {
418
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
419
case INDEX_op_mov_i64:
420
case INDEX_op_call: /* Always emitted via tcg_out_call. */
421
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
422
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
423
default:
424
tcg_abort();
425
}
426
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
427
index XXXXXXX..XXXXXXX 100644
428
--- a/tcg/riscv/tcg-target.c.inc
429
+++ b/tcg/riscv/tcg-target.c.inc
430
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
431
}
432
}
433
434
+static void tcg_out_goto_tb(TCGContext *s, int which)
435
+{
436
+ qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
437
+ /* indirect jump method */
438
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO,
439
+ get_jmp_target_addr(s, which));
440
+ tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_TMP0, 0);
441
+ set_jmp_reset_offset(s, which);
442
+}
443
+
444
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
445
const TCGArg args[TCG_MAX_OP_ARGS],
446
const int const_args[TCG_MAX_OP_ARGS])
447
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
448
int c2 = const_args[2];
449
450
switch (opc) {
451
- case INDEX_op_goto_tb:
452
- qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
453
- /* indirect jump method */
454
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO,
455
- get_jmp_target_addr(s, a0));
456
- tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_TMP0, 0);
457
- set_jmp_reset_offset(s, a0);
458
- break;
459
-
460
case INDEX_op_goto_ptr:
461
tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, a0, 0);
462
break;
463
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
464
case INDEX_op_mov_i64:
465
case INDEX_op_call: /* Always emitted via tcg_out_call. */
466
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
467
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
468
default:
469
g_assert_not_reached();
470
}
471
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
472
index XXXXXXX..XXXXXXX 100644
473
--- a/tcg/s390x/tcg-target.c.inc
474
+++ b/tcg/s390x/tcg-target.c.inc
475
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
476
}
477
}
478
479
+static void tcg_out_goto_tb(TCGContext *s, int which)
480
+{
481
+ /*
482
+ * Branch displacement must be aligned for atomic patching;
483
+ * see if we need to add extra nop before branch
484
+ */
485
+ if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) {
486
+ tcg_out16(s, NOP);
487
+ }
488
+ tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
489
+ set_jmp_insn_offset(s, which);
490
+ s->code_ptr += 2;
491
+ set_jmp_reset_offset(s, which);
492
+}
493
+
494
# define OP_32_64(x) \
495
case glue(glue(INDEX_op_,x),_i32): \
496
case glue(glue(INDEX_op_,x),_i64)
497
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
498
TCGArg a0, a1, a2;
499
500
switch (opc) {
501
- case INDEX_op_goto_tb:
502
- a0 = args[0];
503
- /*
504
- * branch displacement must be aligned for atomic patching;
505
- * see if we need to add extra nop before branch
506
- */
507
- if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) {
508
- tcg_out16(s, NOP);
509
- }
510
- tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
511
- set_jmp_insn_offset(s, a0);
512
- s->code_ptr += 2;
513
- set_jmp_reset_offset(s, a0);
514
- break;
515
-
516
case INDEX_op_goto_ptr:
517
a0 = args[0];
518
tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, a0);
519
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
520
case INDEX_op_mov_i64:
521
case INDEX_op_call: /* Always emitted via tcg_out_call. */
522
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
523
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
524
default:
525
tcg_abort();
526
}
527
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
528
index XXXXXXX..XXXXXXX 100644
529
--- a/tcg/sparc64/tcg-target.c.inc
530
+++ b/tcg/sparc64/tcg-target.c.inc
531
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
532
tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
533
}
534
535
+static void tcg_out_goto_tb(TCGContext *s, int which)
536
+{
537
+ /* Direct jump. */
538
+ if (USE_REG_TB) {
539
+ /* make sure the patch is 8-byte aligned. */
540
+ if ((intptr_t)s->code_ptr & 4) {
541
+ tcg_out_nop(s);
542
+ }
543
+ set_jmp_insn_offset(s, which);
544
+ tcg_out_sethi(s, TCG_REG_T1, 0);
545
+ tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR);
546
+ tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL);
547
+ tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
548
+ } else {
549
+ set_jmp_insn_offset(s, which);
550
+ tcg_out32(s, CALL);
551
+ tcg_out_nop(s);
552
+ }
553
+ set_jmp_reset_offset(s, which);
554
+
555
+ /*
556
+ * For the unlinked path of goto_tb, we need to reset TCG_REG_TB
557
+ * to the beginning of this TB.
558
+ */
559
+ if (USE_REG_TB) {
560
+ int c = -tcg_current_code_size(s);
561
+ if (check_fit_i32(c, 13)) {
562
+ tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, c, ARITH_ADD);
563
+ } else {
564
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, c);
565
+ tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
566
+ }
567
+ }
568
+}
569
+
570
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
571
const TCGArg args[TCG_MAX_OP_ARGS],
572
const int const_args[TCG_MAX_OP_ARGS])
573
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
574
c2 = const_args[2];
575
576
switch (opc) {
577
- case INDEX_op_goto_tb:
578
- /* Direct jump. */
579
- if (USE_REG_TB) {
580
- /* make sure the patch is 8-byte aligned. */
581
- if ((intptr_t)s->code_ptr & 4) {
582
- tcg_out_nop(s);
583
- }
584
- set_jmp_insn_offset(s, a0);
585
- tcg_out_sethi(s, TCG_REG_T1, 0);
586
- tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR);
587
- tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL);
588
- tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
42
- } else {
589
- } else {
43
- tcg_gen_exit_tb(NULL, 0);
590
- set_jmp_insn_offset(s, a0);
44
- }
591
- tcg_out32(s, CALL);
45
+ tcg_gen_exit_tb(NULL, 0);
592
- tcg_out_nop(s);
593
- }
594
- set_jmp_reset_offset(s, a0);
595
-
596
- /* For the unlinked path of goto_tb, we need to reset
597
- TCG_REG_TB to the beginning of this TB. */
598
- if (USE_REG_TB) {
599
- c = -tcg_current_code_size(s);
600
- if (check_fit_i32(c, 13)) {
601
- tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, c, ARITH_ADD);
602
- } else {
603
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, c);
604
- tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB,
605
- TCG_REG_T1, ARITH_ADD);
606
- }
607
- }
608
- break;
609
case INDEX_op_goto_ptr:
610
tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
611
if (USE_REG_TB) {
612
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
613
case INDEX_op_mov_i64:
614
case INDEX_op_call: /* Always emitted via tcg_out_call. */
615
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
616
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
617
default:
618
tcg_abort();
619
}
620
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
621
index XXXXXXX..XXXXXXX 100644
622
--- a/tcg/tci/tcg-target.c.inc
623
+++ b/tcg/tci/tcg-target.c.inc
624
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
625
tcg_out_op_p(s, INDEX_op_exit_tb, (void *)arg);
626
}
627
628
+static void tcg_out_goto_tb(TCGContext *s, int which)
629
+{
630
+ qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
631
+ /* indirect jump method. */
632
+ tcg_out_op_p(s, INDEX_op_goto_tb, (void *)get_jmp_target_addr(s, which));
633
+ set_jmp_reset_offset(s, which);
634
+}
635
+
636
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
637
const TCGArg args[TCG_MAX_OP_ARGS],
638
const int const_args[TCG_MAX_OP_ARGS])
639
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
640
TCGOpcode exts;
641
642
switch (opc) {
643
- case INDEX_op_goto_tb:
644
- qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
645
- /* indirect jump method. */
646
- tcg_out_op_p(s, opc, (void *)get_jmp_target_addr(s, args[0]));
647
- set_jmp_reset_offset(s, args[0]);
648
- break;
649
-
650
case INDEX_op_goto_ptr:
651
tcg_out_op_r(s, opc, args[0]);
46
break;
652
break;
47
default:
653
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
48
g_assert_not_reached();
654
case INDEX_op_mov_i64:
655
case INDEX_op_call: /* Always emitted via tcg_out_call. */
656
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
657
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
658
default:
659
tcg_abort();
660
}
49
--
661
--
50
2.25.1
662
2.34.1
51
663
52
664
diff view generated by jsdifflib
1
GDB single-stepping is now handled generically.
1
This will shortly be used for more than reset.
2
2
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
6
---
5
target/cris/translate.c | 16 ----------------
7
include/exec/exec-all.h | 2 +-
6
1 file changed, 16 deletions(-)
8
accel/tcg/translate-all.c | 8 ++++----
9
tcg/tcg.c | 4 ++--
10
3 files changed, 7 insertions(+), 7 deletions(-)
7
11
8
diff --git a/target/cris/translate.c b/target/cris/translate.c
12
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
9
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
10
--- a/target/cris/translate.c
14
--- a/include/exec/exec-all.h
11
+++ b/target/cris/translate.c
15
+++ b/include/exec/exec-all.h
12
@@ -XXX,XX +XXX,XX @@ static void cris_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
16
@@ -XXX,XX +XXX,XX @@ struct TranslationBlock {
17
* setting one of the jump targets (or patching the jump instruction). Only
18
* two of such jumps are supported.
19
*/
20
+#define TB_JMP_OFFSET_INVALID 0xffff /* indicates no jump generated */
21
uint16_t jmp_reset_offset[2]; /* offset of original jump target */
22
-#define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
23
uintptr_t jmp_target_arg[2]; /* target address or offset */
24
25
/*
26
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
27
index XXXXXXX..XXXXXXX 100644
28
--- a/accel/tcg/translate-all.c
29
+++ b/accel/tcg/translate-all.c
30
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
31
tb->jmp_dest[1] = (uintptr_t)NULL;
32
33
/* init original jump addresses which have been set during tcg_gen_code() */
34
- if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
35
+ if (tb->jmp_reset_offset[0] != TB_JMP_OFFSET_INVALID) {
36
tb_reset_jump(tb, 0);
37
}
38
- if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
39
+ if (tb->jmp_reset_offset[1] != TB_JMP_OFFSET_INVALID) {
40
tb_reset_jump(tb, 1);
41
}
42
43
@@ -XXX,XX +XXX,XX @@ static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
44
if (tb_page_addr1(tb) != -1) {
45
tst->cross_page++;
46
}
47
- if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
48
+ if (tb->jmp_reset_offset[0] != TB_JMP_OFFSET_INVALID) {
49
tst->direct_jmp_count++;
50
- if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
51
+ if (tb->jmp_reset_offset[1] != TB_JMP_OFFSET_INVALID) {
52
tst->direct_jmp2_count++;
13
}
53
}
14
}
54
}
15
55
diff --git a/tcg/tcg.c b/tcg/tcg.c
16
- if (unlikely(dc->base.singlestep_enabled)) {
56
index XXXXXXX..XXXXXXX 100644
17
- switch (is_jmp) {
57
--- a/tcg/tcg.c
18
- case DISAS_TOO_MANY:
58
+++ b/tcg/tcg.c
19
- case DISAS_UPDATE_NEXT:
59
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start)
20
- tcg_gen_movi_tl(env_pc, npc);
60
#endif
21
- /* fall through */
61
22
- case DISAS_JUMP:
62
/* Initialize goto_tb jump offsets. */
23
- case DISAS_UPDATE:
63
- tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
24
- t_gen_raise_exception(EXCP_DEBUG);
64
- tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
25
- return;
65
+ tb->jmp_reset_offset[0] = TB_JMP_OFFSET_INVALID;
26
- default:
66
+ tb->jmp_reset_offset[1] = TB_JMP_OFFSET_INVALID;
27
- break;
67
tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset;
28
- }
68
if (TCG_TARGET_HAS_direct_jump) {
29
- g_assert_not_reached();
69
tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg;
30
- }
31
-
32
switch (is_jmp) {
33
case DISAS_TOO_MANY:
34
gen_goto_tb(dc, 0, npc);
35
--
70
--
36
2.25.1
71
2.34.1
37
72
38
73
diff view generated by jsdifflib
1
We were using singlestep_enabled as a proxy for whether
1
This can replace four other variables that are references
2
translator_use_goto_tb would always return false.
2
into the TranslationBlock structure.
3
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
---
6
target/microblaze/translate.c | 4 ++--
7
include/tcg/tcg.h | 11 +++--------
7
1 file changed, 2 insertions(+), 2 deletions(-)
8
accel/tcg/translate-all.c | 2 +-
9
tcg/tcg-op.c | 14 +++++++-------
10
tcg/tcg.c | 14 +++-----------
11
4 files changed, 14 insertions(+), 27 deletions(-)
8
12
9
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
13
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
10
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
11
--- a/target/microblaze/translate.c
15
--- a/include/tcg/tcg.h
12
+++ b/target/microblaze/translate.c
16
+++ b/include/tcg/tcg.h
13
@@ -XXX,XX +XXX,XX @@ static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
17
@@ -XXX,XX +XXX,XX @@ struct TCGContext {
14
break;
18
int nb_indirects;
15
19
int nb_ops;
16
case DISAS_JUMP:
20
17
- if (dc->jmp_dest != -1 && !cs->singlestep_enabled) {
21
- /* goto_tb support */
18
+ if (dc->jmp_dest != -1 && !(tb_cflags(dc->base.tb) & CF_NO_GOTO_TB)) {
22
- tcg_insn_unit *code_buf;
19
/* Direct jump. */
23
- uint16_t *tb_jmp_reset_offset; /* tb->jmp_reset_offset */
20
tcg_gen_discard_i32(cpu_btarget);
24
- uintptr_t *tb_jmp_insn_offset; /* tb->jmp_target_arg if direct_jump */
21
25
- uintptr_t *tb_jmp_target_addr; /* tb->jmp_target_arg if !direct_jump */
22
@@ -XXX,XX +XXX,XX @@ static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
26
-
23
return;
27
TCGRegSet reserved_regs;
24
}
28
- uint32_t tb_cflags; /* cflags of the current TB */
25
29
intptr_t current_frame_offset;
26
- /* Indirect jump (or direct jump w/ singlestep) */
30
intptr_t frame_start;
27
+ /* Indirect jump (or direct jump w/ goto_tb disabled) */
31
intptr_t frame_end;
28
tcg_gen_mov_i32(cpu_pc, cpu_btarget);
32
TCGTemp *frame_temp;
29
tcg_gen_discard_i32(cpu_btarget);
33
34
- tcg_insn_unit *code_ptr;
35
+ TranslationBlock *gen_tb; /* tb for which code is being generated */
36
+ tcg_insn_unit *code_buf; /* pointer for start of tb */
37
+ tcg_insn_unit *code_ptr; /* pointer for running end of tb */
38
39
#ifdef CONFIG_PROFILER
40
TCGProfile prof;
41
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/accel/tcg/translate-all.c
44
+++ b/accel/tcg/translate-all.c
45
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
46
tb->trace_vcpu_dstate = *cpu->trace_dstate;
47
tb_set_page_addr0(tb, phys_pc);
48
tb_set_page_addr1(tb, -1);
49
- tcg_ctx->tb_cflags = cflags;
50
+ tcg_ctx->gen_tb = tb;
51
tb_overflow:
52
53
#ifdef CONFIG_PROFILER
54
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
55
index XXXXXXX..XXXXXXX 100644
56
--- a/tcg/tcg-op.c
57
+++ b/tcg/tcg-op.c
58
@@ -XXX,XX +XXX,XX @@ void tcg_gen_op6(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3,
59
60
void tcg_gen_mb(TCGBar mb_type)
61
{
62
- if (tcg_ctx->tb_cflags & CF_PARALLEL) {
63
+ if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) {
64
tcg_gen_op1(INDEX_op_mb, mb_type);
65
}
66
}
67
@@ -XXX,XX +XXX,XX @@ void tcg_gen_exit_tb(const TranslationBlock *tb, unsigned idx)
68
void tcg_gen_goto_tb(unsigned idx)
69
{
70
/* We tested CF_NO_GOTO_TB in translator_use_goto_tb. */
71
- tcg_debug_assert(!(tcg_ctx->tb_cflags & CF_NO_GOTO_TB));
72
+ tcg_debug_assert(!(tcg_ctx->gen_tb->cflags & CF_NO_GOTO_TB));
73
/* We only support two chained exits. */
74
tcg_debug_assert(idx <= TB_EXIT_IDXMAX);
75
#ifdef CONFIG_DEBUG_TCG
76
@@ -XXX,XX +XXX,XX @@ void tcg_gen_lookup_and_goto_ptr(void)
77
{
78
TCGv_ptr ptr;
79
80
- if (tcg_ctx->tb_cflags & CF_NO_GOTO_PTR) {
81
+ if (tcg_ctx->gen_tb->cflags & CF_NO_GOTO_PTR) {
82
tcg_gen_exit_tb(NULL, 0);
83
return;
84
}
85
@@ -XXX,XX +XXX,XX @@ void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv,
86
{
87
memop = tcg_canonicalize_memop(memop, 0, 0);
88
89
- if (!(tcg_ctx->tb_cflags & CF_PARALLEL)) {
90
+ if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
91
TCGv_i32 t1 = tcg_temp_new_i32();
92
TCGv_i32 t2 = tcg_temp_new_i32();
93
94
@@ -XXX,XX +XXX,XX @@ void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv,
95
{
96
memop = tcg_canonicalize_memop(memop, 1, 0);
97
98
- if (!(tcg_ctx->tb_cflags & CF_PARALLEL)) {
99
+ if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
100
TCGv_i64 t1 = tcg_temp_new_i64();
101
TCGv_i64 t2 = tcg_temp_new_i64();
102
103
@@ -XXX,XX +XXX,XX @@ static void * const table_##NAME[(MO_SIZE | MO_BSWAP) + 1] = { \
104
void tcg_gen_atomic_##NAME##_i32 \
105
(TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, MemOp memop) \
106
{ \
107
- if (tcg_ctx->tb_cflags & CF_PARALLEL) { \
108
+ if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) { \
109
do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME); \
110
} else { \
111
do_nonatomic_op_i32(ret, addr, val, idx, memop, NEW, \
112
@@ -XXX,XX +XXX,XX @@ void tcg_gen_atomic_##NAME##_i32 \
113
void tcg_gen_atomic_##NAME##_i64 \
114
(TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, MemOp memop) \
115
{ \
116
- if (tcg_ctx->tb_cflags & CF_PARALLEL) { \
117
+ if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) { \
118
do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME); \
119
} else { \
120
do_nonatomic_op_i64(ret, addr, val, idx, memop, NEW, \
121
diff --git a/tcg/tcg.c b/tcg/tcg.c
122
index XXXXXXX..XXXXXXX 100644
123
--- a/tcg/tcg.c
124
+++ b/tcg/tcg.c
125
@@ -XXX,XX +XXX,XX @@ static void set_jmp_reset_offset(TCGContext *s, int which)
126
* We will check for overflow at the end of the opcode loop in
127
* tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
128
*/
129
- s->tb_jmp_reset_offset[which] = tcg_current_code_size(s);
130
+ s->gen_tb->jmp_reset_offset[which] = tcg_current_code_size(s);
131
}
132
133
static void G_GNUC_UNUSED set_jmp_insn_offset(TCGContext *s, int which)
134
@@ -XXX,XX +XXX,XX @@ static void G_GNUC_UNUSED set_jmp_insn_offset(TCGContext *s, int which)
135
* tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
136
*/
137
tcg_debug_assert(TCG_TARGET_HAS_direct_jump);
138
- s->tb_jmp_insn_offset[which] = tcg_current_code_size(s);
139
+ s->gen_tb->jmp_target_arg[which] = tcg_current_code_size(s);
140
}
141
142
static uintptr_t G_GNUC_UNUSED get_jmp_target_addr(TCGContext *s, int which)
143
@@ -XXX,XX +XXX,XX @@ static uintptr_t G_GNUC_UNUSED get_jmp_target_addr(TCGContext *s, int which)
144
* Return the read-execute version of the pointer, for the benefit
145
* of any pc-relative addressing mode.
146
*/
147
- return (uintptr_t)tcg_splitwx_to_rx(&s->tb_jmp_target_addr[which]);
148
+ return (uintptr_t)tcg_splitwx_to_rx(s->gen_tb->jmp_target_arg + which);
149
}
150
151
/* Signal overflow, starting over with fewer guest insns. */
152
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start)
153
/* Initialize goto_tb jump offsets. */
154
tb->jmp_reset_offset[0] = TB_JMP_OFFSET_INVALID;
155
tb->jmp_reset_offset[1] = TB_JMP_OFFSET_INVALID;
156
- tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset;
157
- if (TCG_TARGET_HAS_direct_jump) {
158
- tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg;
159
- tcg_ctx->tb_jmp_target_addr = NULL;
160
- } else {
161
- tcg_ctx->tb_jmp_insn_offset = NULL;
162
- tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg;
163
- }
164
165
tcg_reg_alloc_start(s);
30
166
31
--
167
--
32
2.25.1
168
2.34.1
33
169
34
170
diff view generated by jsdifflib
1
GDB single-stepping is now handled generically.
1
Stop overloading jmp_target_arg for both offset and address,
2
depending on TCG_TARGET_HAS_direct_jump. Instead, add a new
3
field to hold the jump insn offset and always set the target
4
address in jmp_target_addr[]. This will allow a tcg backend
5
to use either direct or indirect depending on displacement.
2
6
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
9
---
6
target/mips/tcg/translate.c | 50 +++++++++++++------------------------
10
include/exec/exec-all.h | 3 ++-
7
1 file changed, 18 insertions(+), 32 deletions(-)
11
accel/tcg/cpu-exec.c | 5 ++---
12
tcg/tcg.c | 6 ++++--
13
3 files changed, 8 insertions(+), 6 deletions(-)
8
14
9
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
15
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
10
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
11
--- a/target/mips/tcg/translate.c
17
--- a/include/exec/exec-all.h
12
+++ b/target/mips/tcg/translate.c
18
+++ b/include/exec/exec-all.h
13
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
19
@@ -XXX,XX +XXX,XX @@ struct TranslationBlock {
14
tcg_gen_exit_tb(ctx->base.tb, n);
20
*/
15
} else {
21
#define TB_JMP_OFFSET_INVALID 0xffff /* indicates no jump generated */
16
gen_save_pc(dest);
22
uint16_t jmp_reset_offset[2]; /* offset of original jump target */
17
- if (ctx->base.singlestep_enabled) {
23
- uintptr_t jmp_target_arg[2]; /* target address or offset */
18
- save_cpu_state(ctx, 0);
24
+ uint16_t jmp_insn_offset[2]; /* offset of direct jump insn */
19
- gen_helper_raise_exception_debug(cpu_env);
25
+ uintptr_t jmp_target_addr[2]; /* target address */
20
- } else {
26
21
- tcg_gen_lookup_and_goto_ptr();
27
/*
22
- }
28
* Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps.
23
+ tcg_gen_lookup_and_goto_ptr();
29
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/accel/tcg/cpu-exec.c
32
+++ b/accel/tcg/cpu-exec.c
33
@@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu)
34
35
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
36
{
37
+ tb->jmp_target_addr[n] = addr;
38
if (TCG_TARGET_HAS_direct_jump) {
39
- uintptr_t offset = tb->jmp_target_arg[n];
40
+ uintptr_t offset = tb->jmp_insn_offset[n];
41
uintptr_t tc_ptr = (uintptr_t)tb->tc.ptr;
42
uintptr_t jmp_rx = tc_ptr + offset;
43
uintptr_t jmp_rw = jmp_rx - tcg_splitwx_diff;
44
tb_target_set_jmp_target(tc_ptr, jmp_rx, jmp_rw, addr);
45
- } else {
46
- tb->jmp_target_arg[n] = addr;
24
}
47
}
25
}
48
}
26
49
27
@@ -XXX,XX +XXX,XX @@ static void gen_branch(DisasContext *ctx, int insn_bytes)
50
diff --git a/tcg/tcg.c b/tcg/tcg.c
28
} else {
51
index XXXXXXX..XXXXXXX 100644
29
tcg_gen_mov_tl(cpu_PC, btarget);
52
--- a/tcg/tcg.c
30
}
53
+++ b/tcg/tcg.c
31
- if (ctx->base.singlestep_enabled) {
54
@@ -XXX,XX +XXX,XX @@ static void G_GNUC_UNUSED set_jmp_insn_offset(TCGContext *s, int which)
32
- save_cpu_state(ctx, 0);
55
* tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
33
- gen_helper_raise_exception_debug(cpu_env);
56
*/
34
- }
57
tcg_debug_assert(TCG_TARGET_HAS_direct_jump);
35
tcg_gen_lookup_and_goto_ptr();
58
- s->gen_tb->jmp_target_arg[which] = tcg_current_code_size(s);
36
break;
59
+ s->gen_tb->jmp_insn_offset[which] = tcg_current_code_size(s);
37
default:
38
@@ -XXX,XX +XXX,XX @@ static void mips_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
39
{
40
DisasContext *ctx = container_of(dcbase, DisasContext, base);
41
42
- if (ctx->base.singlestep_enabled && ctx->base.is_jmp != DISAS_NORETURN) {
43
- save_cpu_state(ctx, ctx->base.is_jmp != DISAS_EXIT);
44
- gen_helper_raise_exception_debug(cpu_env);
45
- } else {
46
- switch (ctx->base.is_jmp) {
47
- case DISAS_STOP:
48
- gen_save_pc(ctx->base.pc_next);
49
- tcg_gen_lookup_and_goto_ptr();
50
- break;
51
- case DISAS_NEXT:
52
- case DISAS_TOO_MANY:
53
- save_cpu_state(ctx, 0);
54
- gen_goto_tb(ctx, 0, ctx->base.pc_next);
55
- break;
56
- case DISAS_EXIT:
57
- tcg_gen_exit_tb(NULL, 0);
58
- break;
59
- case DISAS_NORETURN:
60
- break;
61
- default:
62
- g_assert_not_reached();
63
- }
64
+ switch (ctx->base.is_jmp) {
65
+ case DISAS_STOP:
66
+ gen_save_pc(ctx->base.pc_next);
67
+ tcg_gen_lookup_and_goto_ptr();
68
+ break;
69
+ case DISAS_NEXT:
70
+ case DISAS_TOO_MANY:
71
+ save_cpu_state(ctx, 0);
72
+ gen_goto_tb(ctx, 0, ctx->base.pc_next);
73
+ break;
74
+ case DISAS_EXIT:
75
+ tcg_gen_exit_tb(NULL, 0);
76
+ break;
77
+ case DISAS_NORETURN:
78
+ break;
79
+ default:
80
+ g_assert_not_reached();
81
}
82
}
60
}
83
61
62
static uintptr_t G_GNUC_UNUSED get_jmp_target_addr(TCGContext *s, int which)
63
@@ -XXX,XX +XXX,XX @@ static uintptr_t G_GNUC_UNUSED get_jmp_target_addr(TCGContext *s, int which)
64
* Return the read-execute version of the pointer, for the benefit
65
* of any pc-relative addressing mode.
66
*/
67
- return (uintptr_t)tcg_splitwx_to_rx(s->gen_tb->jmp_target_arg + which);
68
+ return (uintptr_t)tcg_splitwx_to_rx(&s->gen_tb->jmp_target_addr[which]);
69
}
70
71
/* Signal overflow, starting over with fewer guest insns. */
72
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start)
73
/* Initialize goto_tb jump offsets. */
74
tb->jmp_reset_offset[0] = TB_JMP_OFFSET_INVALID;
75
tb->jmp_reset_offset[1] = TB_JMP_OFFSET_INVALID;
76
+ tb->jmp_insn_offset[0] = TB_JMP_OFFSET_INVALID;
77
+ tb->jmp_insn_offset[1] = TB_JMP_OFFSET_INVALID;
78
79
tcg_reg_alloc_start(s);
80
84
--
81
--
85
2.25.1
82
2.34.1
86
83
87
84
diff view generated by jsdifflib
1
Currently the change in cpu_tb_exec is masked by the debug exception
1
Replace 'tc_ptr' and 'addr' with 'tb' and 'n'.
2
being raised by the translators. But this allows us to remove that code.
3
2
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
5
---
6
accel/tcg/cpu-exec.c | 11 +++++++++++
6
tcg/aarch64/tcg-target.h | 3 ++-
7
1 file changed, 11 insertions(+)
7
tcg/arm/tcg-target.h | 3 ++-
8
tcg/i386/tcg-target.h | 9 ++-------
9
tcg/loongarch64/tcg-target.h | 3 ++-
10
tcg/mips/tcg-target.h | 3 ++-
11
tcg/ppc/tcg-target.h | 3 ++-
12
tcg/riscv/tcg-target.h | 3 ++-
13
tcg/s390x/tcg-target.h | 10 ++--------
14
tcg/sparc64/tcg-target.h | 3 ++-
15
tcg/tci/tcg-target.h | 3 ++-
16
accel/tcg/cpu-exec.c | 11 ++++++++---
17
tcg/aarch64/tcg-target.c.inc | 5 +++--
18
tcg/i386/tcg-target.c.inc | 9 +++++++++
19
tcg/loongarch64/tcg-target.c.inc | 5 +++--
20
tcg/ppc/tcg-target.c.inc | 7 ++++---
21
tcg/s390x/tcg-target.c.inc | 10 ++++++++++
22
tcg/sparc64/tcg-target.c.inc | 7 ++++---
23
17 files changed, 61 insertions(+), 36 deletions(-)
8
24
25
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
26
index XXXXXXX..XXXXXXX 100644
27
--- a/tcg/aarch64/tcg-target.h
28
+++ b/tcg/aarch64/tcg-target.h
29
@@ -XXX,XX +XXX,XX @@ typedef enum {
30
#define TCG_TARGET_DEFAULT_MO (0)
31
#define TCG_TARGET_HAS_MEMORY_BSWAP 0
32
33
-void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
34
+void tb_target_set_jmp_target(const TranslationBlock *, int,
35
+ uintptr_t, uintptr_t);
36
37
#define TCG_TARGET_NEED_LDST_LABELS
38
#define TCG_TARGET_NEED_POOL_LABELS
39
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
40
index XXXXXXX..XXXXXXX 100644
41
--- a/tcg/arm/tcg-target.h
42
+++ b/tcg/arm/tcg-target.h
43
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
44
#define TCG_TARGET_HAS_MEMORY_BSWAP 0
45
46
/* not defined -- call should be eliminated at compile time */
47
-void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
48
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
49
+ uintptr_t, uintptr_t);
50
51
#define TCG_TARGET_NEED_LDST_LABELS
52
#define TCG_TARGET_NEED_POOL_LABELS
53
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
54
index XXXXXXX..XXXXXXX 100644
55
--- a/tcg/i386/tcg-target.h
56
+++ b/tcg/i386/tcg-target.h
57
@@ -XXX,XX +XXX,XX @@ extern bool have_movbe;
58
#define TCG_TARGET_extract_i64_valid(ofs, len) \
59
(((ofs) == 8 && (len) == 8) || ((ofs) + (len)) == 32)
60
61
-static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
62
- uintptr_t jmp_rw, uintptr_t addr)
63
-{
64
- /* patch the branch destination */
65
- qatomic_set((int32_t *)jmp_rw, addr - (jmp_rx + 4));
66
- /* no need to flush icache explicitly */
67
-}
68
+void tb_target_set_jmp_target(const TranslationBlock *, int,
69
+ uintptr_t, uintptr_t);
70
71
/* This defines the natural memory order supported by this
72
* architecture before guarantees made by various barrier
73
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
74
index XXXXXXX..XXXXXXX 100644
75
--- a/tcg/loongarch64/tcg-target.h
76
+++ b/tcg/loongarch64/tcg-target.h
77
@@ -XXX,XX +XXX,XX @@ typedef enum {
78
#define TCG_TARGET_HAS_muluh_i64 1
79
#define TCG_TARGET_HAS_mulsh_i64 1
80
81
-void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
82
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
83
+ uintptr_t, uintptr_t);
84
85
#define TCG_TARGET_DEFAULT_MO (0)
86
87
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
88
index XXXXXXX..XXXXXXX 100644
89
--- a/tcg/mips/tcg-target.h
90
+++ b/tcg/mips/tcg-target.h
91
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
92
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
93
94
/* not defined -- call should be eliminated at compile time */
95
-void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t)
96
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
97
+ uintptr_t, uintptr_t)
98
QEMU_ERROR("code path is reachable");
99
100
#define TCG_TARGET_NEED_LDST_LABELS
101
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
102
index XXXXXXX..XXXXXXX 100644
103
--- a/tcg/ppc/tcg-target.h
104
+++ b/tcg/ppc/tcg-target.h
105
@@ -XXX,XX +XXX,XX @@ extern bool have_vsx;
106
#define TCG_TARGET_HAS_bitsel_vec have_vsx
107
#define TCG_TARGET_HAS_cmpsel_vec 0
108
109
-void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
110
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
111
+ uintptr_t, uintptr_t);
112
113
#define TCG_TARGET_DEFAULT_MO (0)
114
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
115
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
116
index XXXXXXX..XXXXXXX 100644
117
--- a/tcg/riscv/tcg-target.h
118
+++ b/tcg/riscv/tcg-target.h
119
@@ -XXX,XX +XXX,XX @@ typedef enum {
120
#endif
121
122
/* not defined -- call should be eliminated at compile time */
123
-void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
124
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
125
+ uintptr_t, uintptr_t);
126
127
#define TCG_TARGET_DEFAULT_MO (0)
128
129
diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h
130
index XXXXXXX..XXXXXXX 100644
131
--- a/tcg/s390x/tcg-target.h
132
+++ b/tcg/s390x/tcg-target.h
133
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
134
135
#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
136
137
-static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
138
- uintptr_t jmp_rw, uintptr_t addr)
139
-{
140
- /* patch the branch destination */
141
- intptr_t disp = addr - (jmp_rx - 2);
142
- qatomic_set((int32_t *)jmp_rw, disp / 2);
143
- /* no need to flush icache explicitly */
144
-}
145
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
146
+ uintptr_t jmp_rx, uintptr_t jmp_rw);
147
148
#define TCG_TARGET_NEED_LDST_LABELS
149
#define TCG_TARGET_NEED_POOL_LABELS
150
diff --git a/tcg/sparc64/tcg-target.h b/tcg/sparc64/tcg-target.h
151
index XXXXXXX..XXXXXXX 100644
152
--- a/tcg/sparc64/tcg-target.h
153
+++ b/tcg/sparc64/tcg-target.h
154
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
155
#define TCG_TARGET_DEFAULT_MO (0)
156
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
157
158
-void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
159
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
160
+ uintptr_t, uintptr_t);
161
162
#define TCG_TARGET_NEED_POOL_LABELS
163
164
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
165
index XXXXXXX..XXXXXXX 100644
166
--- a/tcg/tci/tcg-target.h
167
+++ b/tcg/tci/tcg-target.h
168
@@ -XXX,XX +XXX,XX @@ typedef enum {
169
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
170
171
/* not defined -- call should be eliminated at compile time */
172
-void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
173
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
174
+ uintptr_t, uintptr_t);
175
176
#endif /* TCG_TARGET_H */
9
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
177
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
10
index XXXXXXX..XXXXXXX 100644
178
index XXXXXXX..XXXXXXX 100644
11
--- a/accel/tcg/cpu-exec.c
179
--- a/accel/tcg/cpu-exec.c
12
+++ b/accel/tcg/cpu-exec.c
180
+++ b/accel/tcg/cpu-exec.c
13
@@ -XXX,XX +XXX,XX @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
181
@@ -XXX,XX +XXX,XX @@ void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
14
cc->set_pc(cpu, last_tb->pc);
182
{
15
}
183
tb->jmp_target_addr[n] = addr;
184
if (TCG_TARGET_HAS_direct_jump) {
185
+ /*
186
+ * Get the rx view of the structure, from which we find the
187
+ * executable code address, and tb_target_set_jmp_target can
188
+ * produce a pc-relative displacement to jmp_target_addr[n].
189
+ */
190
+ const TranslationBlock *c_tb = tcg_splitwx_to_rx(tb);
191
uintptr_t offset = tb->jmp_insn_offset[n];
192
- uintptr_t tc_ptr = (uintptr_t)tb->tc.ptr;
193
- uintptr_t jmp_rx = tc_ptr + offset;
194
+ uintptr_t jmp_rx = (uintptr_t)tb->tc.ptr + offset;
195
uintptr_t jmp_rw = jmp_rx - tcg_splitwx_diff;
196
- tb_target_set_jmp_target(tc_ptr, jmp_rx, jmp_rw, addr);
197
+ tb_target_set_jmp_target(c_tb, n, jmp_rx, jmp_rw);
16
}
198
}
199
}
200
201
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
202
index XXXXXXX..XXXXXXX 100644
203
--- a/tcg/aarch64/tcg-target.c.inc
204
+++ b/tcg/aarch64/tcg-target.c.inc
205
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target,
206
tcg_out_call_int(s, target);
207
}
208
209
-void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
210
- uintptr_t jmp_rw, uintptr_t addr)
211
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
212
+ uintptr_t jmp_rx, uintptr_t jmp_rw)
213
{
214
+ uintptr_t addr = tb->jmp_target_addr[n];
215
tcg_insn_unit i1, i2;
216
TCGType rt = TCG_TYPE_I64;
217
TCGReg rd = TCG_REG_TMP;
218
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
219
index XXXXXXX..XXXXXXX 100644
220
--- a/tcg/i386/tcg-target.c.inc
221
+++ b/tcg/i386/tcg-target.c.inc
222
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which)
223
set_jmp_reset_offset(s, which);
224
}
225
226
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
227
+ uintptr_t jmp_rx, uintptr_t jmp_rw)
228
+{
229
+ /* patch the branch destination */
230
+ uintptr_t addr = tb->jmp_target_addr[n];
231
+ qatomic_set((int32_t *)jmp_rw, addr - (jmp_rx + 4));
232
+ /* no need to flush icache explicitly */
233
+}
17
+
234
+
18
+ /*
235
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
19
+ * If gdb single-step, and we haven't raised another exception,
236
const TCGArg args[TCG_MAX_OP_ARGS],
20
+ * raise a debug exception. Single-step with another exception
237
const int const_args[TCG_MAX_OP_ARGS])
21
+ * is handled in cpu_handle_exception.
238
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
22
+ */
239
index XXXXXXX..XXXXXXX 100644
23
+ if (unlikely(cpu->singlestep_enabled) && cpu->exception_index == -1) {
240
--- a/tcg/loongarch64/tcg-target.c.inc
24
+ cpu->exception_index = EXCP_DEBUG;
241
+++ b/tcg/loongarch64/tcg-target.c.inc
25
+ cpu_loop_exit(cpu);
242
@@ -XXX,XX +XXX,XX @@ static void tcg_out_nop(TCGContext *s)
26
+ }
243
tcg_out32(s, NOP);
244
}
245
246
-void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
247
- uintptr_t jmp_rw, uintptr_t addr)
248
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
249
+ uintptr_t jmp_rx, uintptr_t jmp_rw)
250
{
251
tcg_insn_unit i1, i2;
252
ptrdiff_t upper, lower;
253
+ uintptr_t addr = tb->jmp_target_addr[n];
254
ptrdiff_t offset = (ptrdiff_t)(addr - jmp_rx) >> 2;
255
256
if (offset == sextreg(offset, 0, 26)) {
257
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
258
index XXXXXXX..XXXXXXX 100644
259
--- a/tcg/ppc/tcg-target.c.inc
260
+++ b/tcg/ppc/tcg-target.c.inc
261
@@ -XXX,XX +XXX,XX @@ static inline void ppc64_replace4(uintptr_t rx, uintptr_t rw,
262
flush_idcache_range(rx, rw, 16);
263
}
264
265
-void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
266
- uintptr_t jmp_rw, uintptr_t addr)
267
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
268
+ uintptr_t jmp_rx, uintptr_t jmp_rw)
269
{
270
tcg_insn_unit i0, i1, i2, i3;
271
- intptr_t tb_diff = addr - tc_ptr;
272
+ uintptr_t addr = tb->jmp_target_addr[n];
273
+ intptr_t tb_diff = addr - (uintptr_t)tb->tc.ptr;
274
intptr_t br_diff = addr - (jmp_rx + 4);
275
intptr_t lo, hi;
276
277
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
278
index XXXXXXX..XXXXXXX 100644
279
--- a/tcg/s390x/tcg-target.c.inc
280
+++ b/tcg/s390x/tcg-target.c.inc
281
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which)
282
set_jmp_reset_offset(s, which);
283
}
284
285
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
286
+ uintptr_t jmp_rx, uintptr_t jmp_rw)
287
+{
288
+ /* patch the branch destination */
289
+ uintptr_t addr = tb->jmp_target_addr[n];
290
+ intptr_t disp = addr - (jmp_rx - 2);
291
+ qatomic_set((int32_t *)jmp_rw, disp / 2);
292
+ /* no need to flush icache explicitly */
293
+}
27
+
294
+
28
return last_tb;
295
# define OP_32_64(x) \
29
}
296
case glue(glue(INDEX_op_,x),_i32): \
297
case glue(glue(INDEX_op_,x),_i64)
298
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
299
index XXXXXXX..XXXXXXX 100644
300
--- a/tcg/sparc64/tcg-target.c.inc
301
+++ b/tcg/sparc64/tcg-target.c.inc
302
@@ -XXX,XX +XXX,XX @@ void tcg_register_jit(const void *buf, size_t buf_size)
303
tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
304
}
305
306
-void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
307
- uintptr_t jmp_rw, uintptr_t addr)
308
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
309
+ uintptr_t jmp_rx, uintptr_t jmp_rw)
310
{
311
- intptr_t tb_disp = addr - tc_ptr;
312
+ uintptr_t addr = tb->jmp_target_addr[n];
313
+ intptr_t tb_disp = addr - (uintptr_t)tb->tc.ptr;
314
intptr_t br_disp = addr - jmp_rx;
315
tcg_insn_unit i1, i2;
30
316
31
--
317
--
32
2.25.1
318
2.34.1
33
319
34
320
diff view generated by jsdifflib
Deleted patch
1
GDB single-stepping is now handled generically.
2
1
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/alpha/translate.c | 13 +++----------
7
1 file changed, 3 insertions(+), 10 deletions(-)
8
9
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/alpha/translate.c
12
+++ b/target/alpha/translate.c
13
@@ -XXX,XX +XXX,XX @@ static void alpha_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
14
tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
15
/* FALLTHRU */
16
case DISAS_PC_UPDATED:
17
- if (!ctx->base.singlestep_enabled) {
18
- tcg_gen_lookup_and_goto_ptr();
19
- break;
20
- }
21
- /* FALLTHRU */
22
+ tcg_gen_lookup_and_goto_ptr();
23
+ break;
24
case DISAS_PC_UPDATED_NOCHAIN:
25
- if (ctx->base.singlestep_enabled) {
26
- gen_excp_1(EXCP_DEBUG, 0);
27
- } else {
28
- tcg_gen_exit_tb(NULL, 0);
29
- }
30
+ tcg_gen_exit_tb(NULL, 0);
31
break;
32
default:
33
g_assert_not_reached();
34
--
35
2.25.1
36
37
diff view generated by jsdifflib
1
GDB single-stepping is now handled generically.
1
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
2
2
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
4
---
5
target/microblaze/translate.c | 14 ++------------
5
include/tcg/tcg.h | 3 +++
6
1 file changed, 2 insertions(+), 12 deletions(-)
6
tcg/aarch64/tcg-target.h | 4 ----
7
tcg/arm/tcg-target.h | 5 -----
8
tcg/i386/tcg-target.h | 3 ---
9
tcg/loongarch64/tcg-target.h | 3 ---
10
tcg/mips/tcg-target.h | 5 -----
11
tcg/ppc/tcg-target.h | 4 ----
12
tcg/riscv/tcg-target.h | 4 ----
13
tcg/s390x/tcg-target.h | 4 ----
14
tcg/sparc64/tcg-target.h | 4 ----
15
tcg/tci/tcg-target.h | 4 ----
16
11 files changed, 3 insertions(+), 40 deletions(-)
7
17
8
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
18
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
9
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
10
--- a/target/microblaze/translate.c
20
--- a/include/tcg/tcg.h
11
+++ b/target/microblaze/translate.c
21
+++ b/include/tcg/tcg.h
12
@@ -XXX,XX +XXX,XX @@ static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
22
@@ -XXX,XX +XXX,XX @@ void tcg_func_start(TCGContext *s);
13
23
14
static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
24
int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start);
15
{
25
16
- if (dc->base.singlestep_enabled) {
26
+void tb_target_set_jmp_target(const TranslationBlock *, int,
17
- TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
27
+ uintptr_t, uintptr_t);
18
- tcg_gen_movi_i32(cpu_pc, dest);
28
+
19
- gen_helper_raise_exception(cpu_env, tmp);
29
void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size);
20
- tcg_temp_free_i32(tmp);
30
21
- } else if (translator_use_goto_tb(&dc->base, dest)) {
31
TCGTemp *tcg_global_mem_new_internal(TCGType, TCGv_ptr,
22
+ if (translator_use_goto_tb(&dc->base, dest)) {
32
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
23
tcg_gen_goto_tb(n);
33
index XXXXXXX..XXXXXXX 100644
24
tcg_gen_movi_i32(cpu_pc, dest);
34
--- a/tcg/aarch64/tcg-target.h
25
tcg_gen_exit_tb(dc->base.tb, n);
35
+++ b/tcg/aarch64/tcg-target.h
26
@@ -XXX,XX +XXX,XX @@ static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
36
@@ -XXX,XX +XXX,XX @@ typedef enum {
27
/* Indirect jump (or direct jump w/ goto_tb disabled) */
37
28
tcg_gen_mov_i32(cpu_pc, cpu_btarget);
38
#define TCG_TARGET_DEFAULT_MO (0)
29
tcg_gen_discard_i32(cpu_btarget);
39
#define TCG_TARGET_HAS_MEMORY_BSWAP 0
30
-
40
-
31
- if (unlikely(cs->singlestep_enabled)) {
41
-void tb_target_set_jmp_target(const TranslationBlock *, int,
32
- gen_raise_exception(dc, EXCP_DEBUG);
42
- uintptr_t, uintptr_t);
33
- } else {
43
-
34
- tcg_gen_lookup_and_goto_ptr();
44
#define TCG_TARGET_NEED_LDST_LABELS
35
- }
45
#define TCG_TARGET_NEED_POOL_LABELS
36
+ tcg_gen_lookup_and_goto_ptr();
46
37
return;
47
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
38
48
index XXXXXXX..XXXXXXX 100644
39
default:
49
--- a/tcg/arm/tcg-target.h
50
+++ b/tcg/arm/tcg-target.h
51
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
52
53
#define TCG_TARGET_DEFAULT_MO (0)
54
#define TCG_TARGET_HAS_MEMORY_BSWAP 0
55
-
56
-/* not defined -- call should be eliminated at compile time */
57
-void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
58
- uintptr_t, uintptr_t);
59
-
60
#define TCG_TARGET_NEED_LDST_LABELS
61
#define TCG_TARGET_NEED_POOL_LABELS
62
63
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
64
index XXXXXXX..XXXXXXX 100644
65
--- a/tcg/i386/tcg-target.h
66
+++ b/tcg/i386/tcg-target.h
67
@@ -XXX,XX +XXX,XX @@ extern bool have_movbe;
68
#define TCG_TARGET_extract_i64_valid(ofs, len) \
69
(((ofs) == 8 && (len) == 8) || ((ofs) + (len)) == 32)
70
71
-void tb_target_set_jmp_target(const TranslationBlock *, int,
72
- uintptr_t, uintptr_t);
73
-
74
/* This defines the natural memory order supported by this
75
* architecture before guarantees made by various barrier
76
* instructions.
77
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
78
index XXXXXXX..XXXXXXX 100644
79
--- a/tcg/loongarch64/tcg-target.h
80
+++ b/tcg/loongarch64/tcg-target.h
81
@@ -XXX,XX +XXX,XX @@ typedef enum {
82
#define TCG_TARGET_HAS_muluh_i64 1
83
#define TCG_TARGET_HAS_mulsh_i64 1
84
85
-void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
86
- uintptr_t, uintptr_t);
87
-
88
#define TCG_TARGET_DEFAULT_MO (0)
89
90
#define TCG_TARGET_NEED_LDST_LABELS
91
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
92
index XXXXXXX..XXXXXXX 100644
93
--- a/tcg/mips/tcg-target.h
94
+++ b/tcg/mips/tcg-target.h
95
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
96
#define TCG_TARGET_DEFAULT_MO (0)
97
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
98
99
-/* not defined -- call should be eliminated at compile time */
100
-void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
101
- uintptr_t, uintptr_t)
102
- QEMU_ERROR("code path is reachable");
103
-
104
#define TCG_TARGET_NEED_LDST_LABELS
105
106
#endif
107
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
108
index XXXXXXX..XXXXXXX 100644
109
--- a/tcg/ppc/tcg-target.h
110
+++ b/tcg/ppc/tcg-target.h
111
@@ -XXX,XX +XXX,XX @@ extern bool have_vsx;
112
#define TCG_TARGET_HAS_bitsel_vec have_vsx
113
#define TCG_TARGET_HAS_cmpsel_vec 0
114
115
-void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
116
- uintptr_t, uintptr_t);
117
-
118
#define TCG_TARGET_DEFAULT_MO (0)
119
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
120
-
121
#define TCG_TARGET_NEED_LDST_LABELS
122
#define TCG_TARGET_NEED_POOL_LABELS
123
124
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
125
index XXXXXXX..XXXXXXX 100644
126
--- a/tcg/riscv/tcg-target.h
127
+++ b/tcg/riscv/tcg-target.h
128
@@ -XXX,XX +XXX,XX @@ typedef enum {
129
#define TCG_TARGET_HAS_mulsh_i64 1
130
#endif
131
132
-/* not defined -- call should be eliminated at compile time */
133
-void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
134
- uintptr_t, uintptr_t);
135
-
136
#define TCG_TARGET_DEFAULT_MO (0)
137
138
#define TCG_TARGET_NEED_LDST_LABELS
139
diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h
140
index XXXXXXX..XXXXXXX 100644
141
--- a/tcg/s390x/tcg-target.h
142
+++ b/tcg/s390x/tcg-target.h
143
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
144
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
145
146
#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
147
-
148
-void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
149
- uintptr_t jmp_rx, uintptr_t jmp_rw);
150
-
151
#define TCG_TARGET_NEED_LDST_LABELS
152
#define TCG_TARGET_NEED_POOL_LABELS
153
154
diff --git a/tcg/sparc64/tcg-target.h b/tcg/sparc64/tcg-target.h
155
index XXXXXXX..XXXXXXX 100644
156
--- a/tcg/sparc64/tcg-target.h
157
+++ b/tcg/sparc64/tcg-target.h
158
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
159
160
#define TCG_TARGET_DEFAULT_MO (0)
161
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
162
-
163
-void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
164
- uintptr_t, uintptr_t);
165
-
166
#define TCG_TARGET_NEED_POOL_LABELS
167
168
#endif
169
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
170
index XXXXXXX..XXXXXXX 100644
171
--- a/tcg/tci/tcg-target.h
172
+++ b/tcg/tci/tcg-target.h
173
@@ -XXX,XX +XXX,XX @@ typedef enum {
174
175
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
176
177
-/* not defined -- call should be eliminated at compile time */
178
-void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
179
- uintptr_t, uintptr_t);
180
-
181
#endif /* TCG_TARGET_H */
40
--
182
--
41
2.25.1
183
2.34.1
42
184
43
185
diff view generated by jsdifflib
1
This reverts commit 1b36e4f5a5de585210ea95f2257839c2312be28f.
1
Install empty versions for !TCG_TARGET_HAS_direct_jump hosts.
2
2
3
Despite a comment saying why cpu_common_props cannot be placed in
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
a file that is compiled once, it was moved anyway. Revert that.
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
6
Since then, Property is not defined in hw/core/cpu.h, so it is now
7
easier to declare a function to install the properties rather than
8
the Property array itself.
9
10
Cc: Eduardo Habkost <ehabkost@redhat.com>
11
Suggested-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
6
---
14
include/hw/core/cpu.h | 1 +
7
tcg/arm/tcg-target.c.inc | 6 ++++++
15
cpu.c | 21 +++++++++++++++++++++
8
tcg/mips/tcg-target.c.inc | 6 ++++++
16
hw/core/cpu-common.c | 17 +----------------
9
tcg/riscv/tcg-target.c.inc | 6 ++++++
17
3 files changed, 23 insertions(+), 16 deletions(-)
10
tcg/tci/tcg-target.c.inc | 6 ++++++
11
4 files changed, 24 insertions(+)
18
12
19
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
13
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
20
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
21
--- a/include/hw/core/cpu.h
15
--- a/tcg/arm/tcg-target.c.inc
22
+++ b/include/hw/core/cpu.h
16
+++ b/tcg/arm/tcg-target.c.inc
23
@@ -XXX,XX +XXX,XX @@ void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...)
17
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which)
24
GCC_FMT_ATTR(2, 3);
18
set_jmp_reset_offset(s, which);
25
26
/* $(top_srcdir)/cpu.c */
27
+void cpu_class_init_props(DeviceClass *dc);
28
void cpu_exec_initfn(CPUState *cpu);
29
void cpu_exec_realizefn(CPUState *cpu, Error **errp);
30
void cpu_exec_unrealizefn(CPUState *cpu);
31
diff --git a/cpu.c b/cpu.c
32
index XXXXXXX..XXXXXXX 100644
33
--- a/cpu.c
34
+++ b/cpu.c
35
@@ -XXX,XX +XXX,XX @@ void cpu_exec_unrealizefn(CPUState *cpu)
36
cpu_list_remove(cpu);
37
}
19
}
38
20
39
+static Property cpu_common_props[] = {
21
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
40
+#ifndef CONFIG_USER_ONLY
22
+ uintptr_t jmp_rx, uintptr_t jmp_rw)
41
+ /*
42
+ * Create a memory property for softmmu CPU object,
43
+ * so users can wire up its memory. (This can't go in hw/core/cpu.c
44
+ * because that file is compiled only once for both user-mode
45
+ * and system builds.) The default if no link is set up is to use
46
+ * the system address space.
47
+ */
48
+ DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION,
49
+ MemoryRegion *),
50
+#endif
51
+ DEFINE_PROP_BOOL("start-powered-off", CPUState, start_powered_off, false),
52
+ DEFINE_PROP_END_OF_LIST(),
53
+};
54
+
55
+void cpu_class_init_props(DeviceClass *dc)
56
+{
23
+{
57
+ device_class_set_props(dc, cpu_common_props);
24
+ /* Always indirect, nothing to do */
58
+}
25
+}
59
+
26
+
60
void cpu_exec_initfn(CPUState *cpu)
27
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
61
{
28
const TCGArg args[TCG_MAX_OP_ARGS],
62
cpu->as = NULL;
29
const int const_args[TCG_MAX_OP_ARGS])
63
diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c
30
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
64
index XXXXXXX..XXXXXXX 100644
31
index XXXXXXX..XXXXXXX 100644
65
--- a/hw/core/cpu-common.c
32
--- a/tcg/mips/tcg-target.c.inc
66
+++ b/hw/core/cpu-common.c
33
+++ b/tcg/mips/tcg-target.c.inc
67
@@ -XXX,XX +XXX,XX @@ static int64_t cpu_common_get_arch_id(CPUState *cpu)
34
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which)
68
return cpu->cpu_index;
35
set_jmp_reset_offset(s, which);
69
}
36
}
70
37
71
-static Property cpu_common_props[] = {
38
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
72
-#ifndef CONFIG_USER_ONLY
39
+ uintptr_t jmp_rx, uintptr_t jmp_rw)
73
- /* Create a memory property for softmmu CPU object,
40
+{
74
- * so users can wire up its memory. (This can't go in hw/core/cpu.c
41
+ /* Always indirect, nothing to do */
75
- * because that file is compiled only once for both user-mode
42
+}
76
- * and system builds.) The default if no link is set up is to use
43
+
77
- * the system address space.
44
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
78
- */
45
const TCGArg args[TCG_MAX_OP_ARGS],
79
- DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION,
46
const int const_args[TCG_MAX_OP_ARGS])
80
- MemoryRegion *),
47
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
81
-#endif
48
index XXXXXXX..XXXXXXX 100644
82
- DEFINE_PROP_BOOL("start-powered-off", CPUState, start_powered_off, false),
49
--- a/tcg/riscv/tcg-target.c.inc
83
- DEFINE_PROP_END_OF_LIST(),
50
+++ b/tcg/riscv/tcg-target.c.inc
84
-};
51
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which)
85
-
52
set_jmp_reset_offset(s, which);
86
static void cpu_class_init(ObjectClass *klass, void *data)
53
}
87
{
54
88
DeviceClass *dc = DEVICE_CLASS(klass);
55
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
89
@@ -XXX,XX +XXX,XX @@ static void cpu_class_init(ObjectClass *klass, void *data)
56
+ uintptr_t jmp_rx, uintptr_t jmp_rw)
90
dc->realize = cpu_common_realizefn;
57
+{
91
dc->unrealize = cpu_common_unrealizefn;
58
+ /* Always indirect, nothing to do */
92
dc->reset = cpu_common_reset;
59
+}
93
- device_class_set_props(dc, cpu_common_props);
60
+
94
+ cpu_class_init_props(dc);
61
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
95
/*
62
const TCGArg args[TCG_MAX_OP_ARGS],
96
* Reason: CPUs still need special care by board code: wiring up
63
const int const_args[TCG_MAX_OP_ARGS])
97
* IRQs, adding reset handlers, halting non-first CPUs, ...
64
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
65
index XXXXXXX..XXXXXXX 100644
66
--- a/tcg/tci/tcg-target.c.inc
67
+++ b/tcg/tci/tcg-target.c.inc
68
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which)
69
set_jmp_reset_offset(s, which);
70
}
71
72
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
73
+ uintptr_t jmp_rx, uintptr_t jmp_rw)
74
+{
75
+ /* Always indirect, nothing to do */
76
+}
77
+
78
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
79
const TCGArg args[TCG_MAX_OP_ARGS],
80
const int const_args[TCG_MAX_OP_ARGS])
98
--
81
--
99
2.25.1
82
2.34.1
100
83
101
84
diff view generated by jsdifflib
1
GDB single-stepping is now handled generically.
1
We now have the option to generate direct or indirect
2
2
goto_tb depending on the dynamic displacement, thus
3
Acked-by: Laurent Vivier <laurent@vivier.eu>
3
the define is no longer necessary or completely accurate.
4
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
7
---
6
target/m68k/translate.c | 44 +++++++++--------------------------------
8
tcg/aarch64/tcg-target.h | 1 -
7
1 file changed, 9 insertions(+), 35 deletions(-)
9
tcg/arm/tcg-target.h | 1 -
8
10
tcg/i386/tcg-target.h | 1 -
9
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
11
tcg/loongarch64/tcg-target.h | 1 -
10
index XXXXXXX..XXXXXXX 100644
12
tcg/mips/tcg-target.h | 1 -
11
--- a/target/m68k/translate.c
13
tcg/ppc/tcg-target.h | 1 -
12
+++ b/target/m68k/translate.c
14
tcg/riscv/tcg-target.h | 1 -
13
@@ -XXX,XX +XXX,XX @@ static void do_writebacks(DisasContext *s)
15
tcg/s390x/tcg-target.h | 1 -
14
}
16
tcg/sparc64/tcg-target.h | 1 -
17
tcg/tci/tcg-target.h | 1 -
18
accel/tcg/cpu-exec.c | 23 +++++++++++------------
19
tcg/tcg.c | 1 -
20
tcg/arm/tcg-target.c.inc | 1 -
21
tcg/mips/tcg-target.c.inc | 1 -
22
tcg/riscv/tcg-target.c.inc | 1 -
23
tcg/s390x/tcg-target.c.inc | 3 +++
24
tcg/tci/tcg-target.c.inc | 1 -
25
17 files changed, 14 insertions(+), 27 deletions(-)
26
27
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
28
index XXXXXXX..XXXXXXX 100644
29
--- a/tcg/aarch64/tcg-target.h
30
+++ b/tcg/aarch64/tcg-target.h
31
@@ -XXX,XX +XXX,XX @@ typedef enum {
32
#define TCG_TARGET_HAS_muls2_i64 0
33
#define TCG_TARGET_HAS_muluh_i64 1
34
#define TCG_TARGET_HAS_mulsh_i64 1
35
-#define TCG_TARGET_HAS_direct_jump 1
36
37
#define TCG_TARGET_HAS_v64 1
38
#define TCG_TARGET_HAS_v128 1
39
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
40
index XXXXXXX..XXXXXXX 100644
41
--- a/tcg/arm/tcg-target.h
42
+++ b/tcg/arm/tcg-target.h
43
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
44
#define TCG_TARGET_HAS_mulsh_i32 0
45
#define TCG_TARGET_HAS_div_i32 use_idiv_instructions
46
#define TCG_TARGET_HAS_rem_i32 0
47
-#define TCG_TARGET_HAS_direct_jump 0
48
#define TCG_TARGET_HAS_qemu_st8_i32 0
49
50
#define TCG_TARGET_HAS_v64 use_neon_instructions
51
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
52
index XXXXXXX..XXXXXXX 100644
53
--- a/tcg/i386/tcg-target.h
54
+++ b/tcg/i386/tcg-target.h
55
@@ -XXX,XX +XXX,XX @@ extern bool have_movbe;
56
#define TCG_TARGET_HAS_muls2_i32 1
57
#define TCG_TARGET_HAS_muluh_i32 0
58
#define TCG_TARGET_HAS_mulsh_i32 0
59
-#define TCG_TARGET_HAS_direct_jump 1
60
61
#if TCG_TARGET_REG_BITS == 64
62
/* Keep target addresses zero-extended in a register. */
63
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
64
index XXXXXXX..XXXXXXX 100644
65
--- a/tcg/loongarch64/tcg-target.h
66
+++ b/tcg/loongarch64/tcg-target.h
67
@@ -XXX,XX +XXX,XX @@ typedef enum {
68
#define TCG_TARGET_HAS_clz_i32 1
69
#define TCG_TARGET_HAS_ctz_i32 1
70
#define TCG_TARGET_HAS_ctpop_i32 0
71
-#define TCG_TARGET_HAS_direct_jump 1
72
#define TCG_TARGET_HAS_brcond2 0
73
#define TCG_TARGET_HAS_setcond2 0
74
#define TCG_TARGET_HAS_qemu_st8_i32 0
75
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
76
index XXXXXXX..XXXXXXX 100644
77
--- a/tcg/mips/tcg-target.h
78
+++ b/tcg/mips/tcg-target.h
79
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
80
#define TCG_TARGET_HAS_muluh_i32 1
81
#define TCG_TARGET_HAS_mulsh_i32 1
82
#define TCG_TARGET_HAS_bswap32_i32 1
83
-#define TCG_TARGET_HAS_direct_jump 0
84
85
#if TCG_TARGET_REG_BITS == 64
86
#define TCG_TARGET_HAS_add2_i32 0
87
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
88
index XXXXXXX..XXXXXXX 100644
89
--- a/tcg/ppc/tcg-target.h
90
+++ b/tcg/ppc/tcg-target.h
91
@@ -XXX,XX +XXX,XX @@ extern bool have_vsx;
92
#define TCG_TARGET_HAS_muls2_i32 0
93
#define TCG_TARGET_HAS_muluh_i32 1
94
#define TCG_TARGET_HAS_mulsh_i32 1
95
-#define TCG_TARGET_HAS_direct_jump 1
96
#define TCG_TARGET_HAS_qemu_st8_i32 0
97
98
#if TCG_TARGET_REG_BITS == 64
99
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
100
index XXXXXXX..XXXXXXX 100644
101
--- a/tcg/riscv/tcg-target.h
102
+++ b/tcg/riscv/tcg-target.h
103
@@ -XXX,XX +XXX,XX @@ typedef enum {
104
#define TCG_TARGET_HAS_clz_i32 0
105
#define TCG_TARGET_HAS_ctz_i32 0
106
#define TCG_TARGET_HAS_ctpop_i32 0
107
-#define TCG_TARGET_HAS_direct_jump 0
108
#define TCG_TARGET_HAS_brcond2 1
109
#define TCG_TARGET_HAS_setcond2 1
110
#define TCG_TARGET_HAS_qemu_st8_i32 0
111
diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h
112
index XXXXXXX..XXXXXXX 100644
113
--- a/tcg/s390x/tcg-target.h
114
+++ b/tcg/s390x/tcg-target.h
115
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
116
#define TCG_TARGET_HAS_mulsh_i32 0
117
#define TCG_TARGET_HAS_extrl_i64_i32 0
118
#define TCG_TARGET_HAS_extrh_i64_i32 0
119
-#define TCG_TARGET_HAS_direct_jump 1
120
#define TCG_TARGET_HAS_qemu_st8_i32 0
121
122
#define TCG_TARGET_HAS_div2_i64 1
123
diff --git a/tcg/sparc64/tcg-target.h b/tcg/sparc64/tcg-target.h
124
index XXXXXXX..XXXXXXX 100644
125
--- a/tcg/sparc64/tcg-target.h
126
+++ b/tcg/sparc64/tcg-target.h
127
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
128
#define TCG_TARGET_HAS_muls2_i32 1
129
#define TCG_TARGET_HAS_muluh_i32 0
130
#define TCG_TARGET_HAS_mulsh_i32 0
131
-#define TCG_TARGET_HAS_direct_jump 1
132
#define TCG_TARGET_HAS_qemu_st8_i32 0
133
134
#define TCG_TARGET_HAS_extrl_i64_i32 1
135
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
136
index XXXXXXX..XXXXXXX 100644
137
--- a/tcg/tci/tcg-target.h
138
+++ b/tcg/tci/tcg-target.h
139
@@ -XXX,XX +XXX,XX @@
140
#define TCG_TARGET_HAS_muls2_i32 1
141
#define TCG_TARGET_HAS_muluh_i32 0
142
#define TCG_TARGET_HAS_mulsh_i32 0
143
-#define TCG_TARGET_HAS_direct_jump 0
144
#define TCG_TARGET_HAS_qemu_st8_i32 0
145
146
#if TCG_TARGET_REG_BITS == 64
147
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
148
index XXXXXXX..XXXXXXX 100644
149
--- a/accel/tcg/cpu-exec.c
150
+++ b/accel/tcg/cpu-exec.c
151
@@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu)
152
153
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
154
{
155
+ /*
156
+ * Get the rx view of the structure, from which we find the
157
+ * executable code address, and tb_target_set_jmp_target can
158
+ * produce a pc-relative displacement to jmp_target_addr[n].
159
+ */
160
+ const TranslationBlock *c_tb = tcg_splitwx_to_rx(tb);
161
+ uintptr_t offset = tb->jmp_insn_offset[n];
162
+ uintptr_t jmp_rx = (uintptr_t)tb->tc.ptr + offset;
163
+ uintptr_t jmp_rw = jmp_rx - tcg_splitwx_diff;
164
+
165
tb->jmp_target_addr[n] = addr;
166
- if (TCG_TARGET_HAS_direct_jump) {
167
- /*
168
- * Get the rx view of the structure, from which we find the
169
- * executable code address, and tb_target_set_jmp_target can
170
- * produce a pc-relative displacement to jmp_target_addr[n].
171
- */
172
- const TranslationBlock *c_tb = tcg_splitwx_to_rx(tb);
173
- uintptr_t offset = tb->jmp_insn_offset[n];
174
- uintptr_t jmp_rx = (uintptr_t)tb->tc.ptr + offset;
175
- uintptr_t jmp_rw = jmp_rx - tcg_splitwx_diff;
176
- tb_target_set_jmp_target(c_tb, n, jmp_rx, jmp_rw);
177
- }
178
+ tb_target_set_jmp_target(c_tb, n, jmp_rx, jmp_rw);
15
}
179
}
16
180
17
-static bool is_singlestepping(DisasContext *s)
181
static inline void tb_add_jump(TranslationBlock *tb, int n,
18
-{
182
diff --git a/tcg/tcg.c b/tcg/tcg.c
19
- /*
183
index XXXXXXX..XXXXXXX 100644
20
- * Return true if we are singlestepping either because of
184
--- a/tcg/tcg.c
21
- * architectural singlestep or QEMU gdbstub singlestep. This does
185
+++ b/tcg/tcg.c
22
- * not include the command line '-singlestep' mode which is rather
186
@@ -XXX,XX +XXX,XX @@ static void G_GNUC_UNUSED set_jmp_insn_offset(TCGContext *s, int which)
23
- * misnamed as it only means "one instruction per TB" and doesn't
187
* We will check for overflow at the end of the opcode loop in
24
- * affect the code we generate.
188
* tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
25
- */
189
*/
26
- return s->base.singlestep_enabled || s->ss_active;
190
- tcg_debug_assert(TCG_TARGET_HAS_direct_jump);
27
-}
191
s->gen_tb->jmp_insn_offset[which] = tcg_current_code_size(s);
28
-
29
/* is_jmp field values */
30
#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
31
#define DISAS_EXIT DISAS_TARGET_1 /* cpu state was modified dynamically */
32
@@ -XXX,XX +XXX,XX @@ static void gen_exception(DisasContext *s, uint32_t dest, int nr)
33
s->base.is_jmp = DISAS_NORETURN;
34
}
192
}
35
193
36
-static void gen_singlestep_exception(DisasContext *s)
194
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
37
-{
195
index XXXXXXX..XXXXXXX 100644
38
- /*
196
--- a/tcg/arm/tcg-target.c.inc
39
- * Generate the right kind of exception for singlestep, which is
197
+++ b/tcg/arm/tcg-target.c.inc
40
- * either the architectural singlestep or EXCP_DEBUG for QEMU's
198
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which)
41
- * gdb singlestepping.
199
intptr_t ptr, dif, dil;
42
- */
200
TCGReg base = TCG_REG_PC;
43
- if (s->ss_active) {
201
44
- gen_raise_exception(EXCP_TRACE);
202
- qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
45
- } else {
203
ptr = get_jmp_target_addr(s, which);
46
- gen_raise_exception(EXCP_DEBUG);
204
dif = tcg_pcrel_diff(s, (void *)ptr) - 8;
47
- }
205
dil = sextract32(dif, 0, 12);
48
-}
206
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
49
-
207
index XXXXXXX..XXXXXXX 100644
50
static inline void gen_addr_fault(DisasContext *s)
208
--- a/tcg/mips/tcg-target.c.inc
51
{
209
+++ b/tcg/mips/tcg-target.c.inc
52
gen_exception(s, s->base.pc_next, EXCP_ADDRESS);
210
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
53
@@ -XXX,XX +XXX,XX @@ static void gen_exit_tb(DisasContext *s)
211
static void tcg_out_goto_tb(TCGContext *s, int which)
54
/* Generate a jump to an immediate address. */
212
{
55
static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest)
213
/* indirect jump method */
56
{
214
- qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
57
- if (unlikely(is_singlestepping(s))) {
215
tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_REG_ZERO,
58
+ if (unlikely(s->ss_active)) {
216
get_jmp_target_addr(s, which));
59
update_cc_op(s);
217
tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
60
tcg_gen_movi_i32(QREG_PC, dest);
218
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
61
- gen_singlestep_exception(s);
219
index XXXXXXX..XXXXXXX 100644
62
+ gen_raise_exception(EXCP_TRACE);
220
--- a/tcg/riscv/tcg-target.c.inc
63
} else if (translator_use_goto_tb(&s->base, dest)) {
221
+++ b/tcg/riscv/tcg-target.c.inc
64
tcg_gen_goto_tb(n);
222
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
65
tcg_gen_movi_i32(QREG_PC, dest);
223
66
@@ -XXX,XX +XXX,XX @@ static void m68k_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
224
static void tcg_out_goto_tb(TCGContext *s, int which)
67
225
{
68
dc->ss_active = (M68K_SR_TRACE(env->sr) == M68K_SR_TRACE_ANY_INS);
226
- qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
69
/* If architectural single step active, limit to 1 */
227
/* indirect jump method */
70
- if (is_singlestepping(dc)) {
228
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO,
71
+ if (dc->ss_active) {
229
get_jmp_target_addr(s, which));
72
dc->base.max_insns = 1;
230
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
73
}
231
index XXXXXXX..XXXXXXX 100644
74
}
232
--- a/tcg/s390x/tcg-target.c.inc
75
@@ -XXX,XX +XXX,XX @@ static void m68k_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
233
+++ b/tcg/s390x/tcg-target.c.inc
76
break;
234
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which)
77
case DISAS_TOO_MANY:
235
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
78
update_cc_op(dc);
236
uintptr_t jmp_rx, uintptr_t jmp_rw)
79
- if (is_singlestepping(dc)) {
237
{
80
+ if (dc->ss_active) {
238
+ if (!HAVE_FACILITY(GEN_INST_EXT)) {
81
tcg_gen_movi_i32(QREG_PC, dc->pc);
239
+ return;
82
- gen_singlestep_exception(dc);
240
+ }
83
+ gen_raise_exception(EXCP_TRACE);
241
/* patch the branch destination */
84
} else {
242
uintptr_t addr = tb->jmp_target_addr[n];
85
gen_jmp_tb(dc, 0, dc->pc);
243
intptr_t disp = addr - (jmp_rx - 2);
86
}
244
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
87
break;
245
index XXXXXXX..XXXXXXX 100644
88
case DISAS_JUMP:
246
--- a/tcg/tci/tcg-target.c.inc
89
/* We updated CC_OP and PC in gen_jmp/gen_jmp_im. */
247
+++ b/tcg/tci/tcg-target.c.inc
90
- if (is_singlestepping(dc)) {
248
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
91
- gen_singlestep_exception(dc);
249
92
+ if (dc->ss_active) {
250
static void tcg_out_goto_tb(TCGContext *s, int which)
93
+ gen_raise_exception(EXCP_TRACE);
251
{
94
} else {
252
- qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
95
tcg_gen_lookup_and_goto_ptr();
253
/* indirect jump method. */
96
}
254
tcg_out_op_p(s, INDEX_op_goto_tb, (void *)get_jmp_target_addr(s, which));
97
@@ -XXX,XX +XXX,XX @@ static void m68k_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
255
set_jmp_reset_offset(s, which);
98
* We updated CC_OP and PC in gen_exit_tb, but also modified
99
* other state that may require returning to the main loop.
100
*/
101
- if (is_singlestepping(dc)) {
102
- gen_singlestep_exception(dc);
103
+ if (dc->ss_active) {
104
+ gen_raise_exception(EXCP_TRACE);
105
} else {
106
tcg_gen_exit_tb(NULL, 0);
107
}
108
--
256
--
109
2.25.1
257
2.34.1
110
258
111
259
diff view generated by jsdifflib
1
GDB single-stepping is now handled generically.
1
The old implementation replaces two insns, swapping between
2
2
3
    b    <dest>
4
    nop
5
    br    x30
6
and
7
    adrp    x30, <dest>
8
    addi    x30, x30, lo12:<dest>
9
    br    x30
10
11
There is a race condition in which a thread could be stopped at
12
the PC of the second insn, and when restarted does not see the
13
complete address computation and branches to nowhere.
14
15
The new implemetation replaces only one insn, swapping between
16
17
    b    <dest>
18
    br    tmp
19
and
20
    ldr    tmp, <jmp_addr>
21
    br    tmp
22
23
Reported-by: hev <r@hev.cc>
24
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
25
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
26
---
5
target/arm/translate-a64.c | 10 ++--------
27
tcg/aarch64/tcg-target.h | 2 +-
6
target/arm/translate.c | 36 ++++++------------------------------
28
tcg/aarch64/tcg-target.c.inc | 66 +++++++++++++++---------------------
7
2 files changed, 8 insertions(+), 38 deletions(-)
29
2 files changed, 29 insertions(+), 39 deletions(-)
8
30
9
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
31
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
10
index XXXXXXX..XXXXXXX 100644
32
index XXXXXXX..XXXXXXX 100644
11
--- a/target/arm/translate-a64.c
33
--- a/tcg/aarch64/tcg-target.h
12
+++ b/target/arm/translate-a64.c
34
+++ b/tcg/aarch64/tcg-target.h
13
@@ -XXX,XX +XXX,XX @@ static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
35
@@ -XXX,XX +XXX,XX @@
14
gen_a64_set_pc_im(dest);
36
15
if (s->ss_active) {
37
#define TCG_TARGET_INSN_UNIT_SIZE 4
16
gen_step_complete_exception(s);
38
#define TCG_TARGET_TLB_DISPLACEMENT_BITS 24
17
- } else if (s->base.singlestep_enabled) {
39
-#define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
18
- gen_exception_internal(EXCP_DEBUG);
40
+#define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
19
} else {
41
20
tcg_gen_lookup_and_goto_ptr();
42
typedef enum {
21
s->base.is_jmp = DISAS_NORETURN;
43
TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3,
22
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
44
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
23
{
24
DisasContext *dc = container_of(dcbase, DisasContext, base);
25
26
- if (unlikely(dc->base.singlestep_enabled || dc->ss_active)) {
27
+ if (unlikely(dc->ss_active)) {
28
/* Note that this means single stepping WFI doesn't halt the CPU.
29
* For conditional branch insns this is harmless unreachable code as
30
* gen_goto_tb() has already handled emitting the debug exception
31
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
32
/* fall through */
33
case DISAS_EXIT:
34
case DISAS_JUMP:
35
- if (dc->base.singlestep_enabled) {
36
- gen_exception_internal(EXCP_DEBUG);
37
- } else {
38
- gen_step_complete_exception(dc);
39
- }
40
+ gen_step_complete_exception(dc);
41
break;
42
case DISAS_NORETURN:
43
break;
44
diff --git a/target/arm/translate.c b/target/arm/translate.c
45
index XXXXXXX..XXXXXXX 100644
45
index XXXXXXX..XXXXXXX 100644
46
--- a/target/arm/translate.c
46
--- a/tcg/aarch64/tcg-target.c.inc
47
+++ b/target/arm/translate.c
47
+++ b/tcg/aarch64/tcg-target.c.inc
48
@@ -XXX,XX +XXX,XX @@ static void gen_exception_internal(int excp)
48
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target,
49
tcg_temp_free_i32(tcg_excp);
49
tcg_out_call_int(s, target);
50
}
50
}
51
51
52
-static void gen_step_complete_exception(DisasContext *s)
52
-void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
53
+static void gen_singlestep_exception(DisasContext *s)
53
- uintptr_t jmp_rx, uintptr_t jmp_rw)
54
{
55
/* We just completed step of an insn. Move from Active-not-pending
56
* to Active-pending, and then also take the swstep exception.
57
@@ -XXX,XX +XXX,XX @@ static void gen_step_complete_exception(DisasContext *s)
58
s->base.is_jmp = DISAS_NORETURN;
59
}
60
61
-static void gen_singlestep_exception(DisasContext *s)
62
-{
54
-{
63
- /* Generate the right kind of exception for singlestep, which is
55
- uintptr_t addr = tb->jmp_target_addr[n];
64
- * either the architectural singlestep or EXCP_DEBUG for QEMU's
56
- tcg_insn_unit i1, i2;
65
- * gdb singlestepping.
57
- TCGType rt = TCG_TYPE_I64;
66
- */
58
- TCGReg rd = TCG_REG_TMP;
67
- if (s->ss_active) {
59
- uint64_t pair;
68
- gen_step_complete_exception(s);
60
-
61
- ptrdiff_t offset = addr - jmp_rx;
62
-
63
- if (offset == sextract64(offset, 0, 26)) {
64
- i1 = I3206_B | ((offset >> 2) & 0x3ffffff);
65
- i2 = NOP;
69
- } else {
66
- } else {
70
- gen_exception_internal(EXCP_DEBUG);
67
- offset = (addr >> 12) - (jmp_rx >> 12);
68
-
69
- /* patch ADRP */
70
- i1 = I3406_ADRP | (offset & 3) << 29 | (offset & 0x1ffffc) << (5 - 2) | rd;
71
- /* patch ADDI */
72
- i2 = I3401_ADDI | rt << 31 | (addr & 0xfff) << 10 | rd << 5 | rd;
71
- }
73
- }
74
- pair = (uint64_t)i2 << 32 | i1;
75
- qatomic_set((uint64_t *)jmp_rw, pair);
76
- flush_idcache_range(jmp_rx, jmp_rw, 8);
72
-}
77
-}
73
-
78
-
74
-static inline bool is_singlestepping(DisasContext *s)
79
static inline void tcg_out_goto_label(TCGContext *s, TCGLabel *l)
75
-{
80
{
76
- /* Return true if we are singlestepping either because of
81
if (!l->has_value) {
77
- * architectural singlestep or QEMU gdbstub singlestep. This does
82
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
78
- * not include the command line '-singlestep' mode which is rather
83
static void tcg_out_goto_tb(TCGContext *s, int which)
79
- * misnamed as it only means "one instruction per TB" and doesn't
80
- * affect the code we generate.
81
- */
82
- return s->base.singlestep_enabled || s->ss_active;
83
-}
84
-
85
void clear_eci_state(DisasContext *s)
86
{
84
{
87
/*
85
/*
88
@@ -XXX,XX +XXX,XX @@ static inline void gen_bx_excret_final_code(DisasContext *s)
86
- * Ensure that ADRP+ADD are 8-byte aligned so that an atomic
89
/* Is the new PC value in the magic range indicating exception return? */
87
- * write can be used to patch the target address.
90
tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
88
+ * Direct branch, or indirect address load, will be patched
91
/* No: end the TB as we would for a DISAS_JMP */
89
+ * by tb_target_set_jmp_target. Assert indirect load offset
92
- if (is_singlestepping(s)) {
90
+ * in range early, regardless of direct branch distance.
93
+ if (s->ss_active) {
91
*/
94
gen_singlestep_exception(s);
92
- if ((uintptr_t)s->code_ptr & 7) {
95
} else {
93
- tcg_out32(s, NOP);
96
tcg_gen_exit_tb(NULL, 0);
94
- }
97
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
95
+ intptr_t i_off = tcg_pcrel_diff(s, (void *)get_jmp_target_addr(s, which));
98
/* Jump, specifying which TB number to use if we gen_goto_tb() */
96
+ tcg_debug_assert(i_off == sextract64(i_off, 0, 21));
99
static inline void gen_jmp_tb(DisasContext *s, uint32_t dest, int tbno)
97
+
100
{
98
set_jmp_insn_offset(s, which);
101
- if (unlikely(is_singlestepping(s))) {
99
- /*
102
+ if (unlikely(s->ss_active)) {
100
- * actual branch destination will be patched by
103
/* An indirect jump so that we still trigger the debug exception. */
101
- * tb_target_set_jmp_target later
104
gen_set_pc_im(s, dest);
102
- */
105
s->base.is_jmp = DISAS_JUMP;
103
- tcg_out_insn(s, 3406, ADRP, TCG_REG_TMP, 0);
106
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
104
- tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_TMP, TCG_REG_TMP, 0);
107
dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
105
+ tcg_out32(s, I3206_B);
108
106
tcg_out_insn(s, 3207, BR, TCG_REG_TMP);
109
/* If architectural single step active, limit to 1. */
107
set_jmp_reset_offset(s, which);
110
- if (is_singlestepping(dc)) {
108
}
111
+ if (dc->ss_active) {
109
112
dc->base.max_insns = 1;
110
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
113
}
111
+ uintptr_t jmp_rx, uintptr_t jmp_rw)
114
112
+{
115
@@ -XXX,XX +XXX,XX @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
113
+ uintptr_t d_addr = tb->jmp_target_addr[n];
116
* insn codepath itself.
114
+ ptrdiff_t d_offset = d_addr - jmp_rx;
117
*/
115
+ tcg_insn_unit insn;
118
gen_bx_excret_final_code(dc);
116
+
119
- } else if (unlikely(is_singlestepping(dc))) {
117
+ /* Either directly branch, or indirect branch load. */
120
+ } else if (unlikely(dc->ss_active)) {
118
+ if (d_offset == sextract64(d_offset, 0, 28)) {
121
/* Unconditional and "condition passed" instruction codepath. */
119
+ insn = deposit32(I3206_B, 0, 26, d_offset >> 2);
122
switch (dc->base.is_jmp) {
120
+ } else {
123
case DISAS_SWI:
121
+ uintptr_t i_addr = (uintptr_t)&tb->jmp_target_addr[n];
124
@@ -XXX,XX +XXX,XX @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
122
+ ptrdiff_t i_offset = i_addr - jmp_rx;
125
/* "Condition failed" instruction codepath for the branch/trap insn */
123
+
126
gen_set_label(dc->condlabel);
124
+ /* Note that we asserted this in range in tcg_out_goto_tb. */
127
gen_set_condexec(dc);
125
+ insn = deposit32(I3305_LDR | TCG_REG_TMP, 0, 5, i_offset >> 2);
128
- if (unlikely(is_singlestepping(dc))) {
126
+ }
129
+ if (unlikely(dc->ss_active)) {
127
+ qatomic_set((uint32_t *)jmp_rw, insn);
130
gen_set_pc_im(dc, dc->base.pc_next);
128
+ flush_idcache_range(jmp_rx, jmp_rw, 4);
131
gen_singlestep_exception(dc);
129
+}
132
} else {
130
+
131
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
132
const TCGArg args[TCG_MAX_OP_ARGS],
133
const int const_args[TCG_MAX_OP_ARGS])
133
--
134
--
134
2.25.1
135
2.34.1
135
136
136
137
diff view generated by jsdifflib
1
GDB single-stepping is now handled generically, which means
1
The old ppc64 implementation replaces 2 or 4 insns, which leaves a race
2
we don't need to do anything in the wrappers.
2
condition in which a thread could be stopped at a PC in the middle of
3
3
the sequence, and when restarted does not see the complete address
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
4
computation and branches to nowhere.
5
6
The new implemetation replaces only one insn, swapping between
7
8
    b <dest>
9
and
10
    mtctr    r31
11
12
falling through to a general-case indirect branch.
13
14
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
16
---
7
target/riscv/translate.c | 27 +------------------
17
tcg/ppc/tcg-target.h | 3 +-
8
.../riscv/insn_trans/trans_privileged.c.inc | 4 +--
18
tcg/ppc/tcg-target.c.inc | 158 +++++++++++----------------------------
9
target/riscv/insn_trans/trans_rvi.c.inc | 8 +++---
19
2 files changed, 44 insertions(+), 117 deletions(-)
10
target/riscv/insn_trans/trans_rvv.c.inc | 2 +-
20
11
4 files changed, 7 insertions(+), 34 deletions(-)
21
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
12
13
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
14
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
15
--- a/target/riscv/translate.c
23
--- a/tcg/ppc/tcg-target.h
16
+++ b/target/riscv/translate.c
24
+++ b/tcg/ppc/tcg-target.h
17
@@ -XXX,XX +XXX,XX @@ static void generate_exception_mtval(DisasContext *ctx, int excp)
25
@@ -XXX,XX +XXX,XX @@
18
ctx->base.is_jmp = DISAS_NORETURN;
26
27
#ifdef _ARCH_PPC64
28
# define TCG_TARGET_REG_BITS 64
29
-# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
30
#else
31
# define TCG_TARGET_REG_BITS 32
32
-# define MAX_CODE_GEN_BUFFER_SIZE (32 * MiB)
33
#endif
34
+#define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
35
36
#define TCG_TARGET_NB_REGS 64
37
#define TCG_TARGET_INSN_UNIT_SIZE 4
38
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
39
index XXXXXXX..XXXXXXX 100644
40
--- a/tcg/ppc/tcg-target.c.inc
41
+++ b/tcg/ppc/tcg-target.c.inc
42
@@ -XXX,XX +XXX,XX @@ static void tcg_out_mb(TCGContext *s, TCGArg a0)
43
tcg_out32(s, insn);
19
}
44
}
20
45
21
-static void gen_exception_debug(void)
46
-static inline uint64_t make_pair(tcg_insn_unit i1, tcg_insn_unit i2)
22
-{
47
-{
23
- gen_helper_raise_exception(cpu_env, tcg_constant_i32(EXCP_DEBUG));
48
- if (HOST_BIG_ENDIAN) {
24
-}
49
- return (uint64_t)i1 << 32 | i2;
25
-
50
- }
26
-/* Wrapper around tcg_gen_exit_tb that handles single stepping */
51
- return (uint64_t)i2 << 32 | i1;
27
-static void exit_tb(DisasContext *ctx)
52
-}
28
-{
53
-
29
- if (ctx->base.singlestep_enabled) {
54
-static inline void ppc64_replace2(uintptr_t rx, uintptr_t rw,
30
- gen_exception_debug();
55
- tcg_insn_unit i0, tcg_insn_unit i1)
56
-{
57
-#if TCG_TARGET_REG_BITS == 64
58
- qatomic_set((uint64_t *)rw, make_pair(i0, i1));
59
- flush_idcache_range(rx, rw, 8);
60
-#else
61
- qemu_build_not_reached();
62
-#endif
63
-}
64
-
65
-static inline void ppc64_replace4(uintptr_t rx, uintptr_t rw,
66
- tcg_insn_unit i0, tcg_insn_unit i1,
67
- tcg_insn_unit i2, tcg_insn_unit i3)
68
-{
69
- uint64_t p[2];
70
-
71
- p[!HOST_BIG_ENDIAN] = make_pair(i0, i1);
72
- p[HOST_BIG_ENDIAN] = make_pair(i2, i3);
73
-
74
- /*
75
- * There's no convenient way to get the compiler to allocate a pair
76
- * of registers at an even index, so copy into r6/r7 and clobber.
77
- */
78
- asm("mr %%r6, %1\n\t"
79
- "mr %%r7, %2\n\t"
80
- "stq %%r6, %0"
81
- : "=Q"(*(__int128 *)rw) : "r"(p[0]), "r"(p[1]) : "r6", "r7");
82
- flush_idcache_range(rx, rw, 16);
83
-}
84
-
85
-void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
86
- uintptr_t jmp_rx, uintptr_t jmp_rw)
87
-{
88
- tcg_insn_unit i0, i1, i2, i3;
89
- uintptr_t addr = tb->jmp_target_addr[n];
90
- intptr_t tb_diff = addr - (uintptr_t)tb->tc.ptr;
91
- intptr_t br_diff = addr - (jmp_rx + 4);
92
- intptr_t lo, hi;
93
-
94
- if (TCG_TARGET_REG_BITS == 32) {
95
- intptr_t diff = addr - jmp_rx;
96
- tcg_debug_assert(in_range_b(diff));
97
- qatomic_set((uint32_t *)jmp_rw, B | (diff & 0x3fffffc));
98
- flush_idcache_range(jmp_rx, jmp_rw, 4);
99
- return;
100
- }
101
-
102
- /*
103
- * For 16-bit displacements, we can use a single add + branch.
104
- * This happens quite often.
105
- */
106
- if (tb_diff == (int16_t)tb_diff) {
107
- i0 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, tb_diff);
108
- i1 = B | (br_diff & 0x3fffffc);
109
- ppc64_replace2(jmp_rx, jmp_rw, i0, i1);
110
- return;
111
- }
112
-
113
- lo = (int16_t)tb_diff;
114
- hi = (int32_t)(tb_diff - lo);
115
- assert(tb_diff == hi + lo);
116
- i0 = ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, hi >> 16);
117
- i1 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, lo);
118
-
119
- /*
120
- * Without stq from 2.07, we can only update two insns,
121
- * and those must be the ones that load the target address.
122
- */
123
- if (!have_isa_2_07) {
124
- ppc64_replace2(jmp_rx, jmp_rw, i0, i1);
125
- return;
126
- }
127
-
128
- /*
129
- * For 26-bit displacements, we can use a direct branch.
130
- * Otherwise we still need the indirect branch, which we
131
- * must restore after a potential direct branch write.
132
- */
133
- br_diff -= 4;
134
- if (in_range_b(br_diff)) {
135
- i2 = B | (br_diff & 0x3fffffc);
136
- i3 = NOP;
31
- } else {
137
- } else {
32
- tcg_gen_exit_tb(NULL, 0);
138
- i2 = MTSPR | RS(TCG_REG_TB) | CTR;
33
- }
139
- i3 = BCCTR | BO_ALWAYS;
34
-}
140
- }
35
-
141
- ppc64_replace4(jmp_rx, jmp_rw, i0, i1, i2, i3);
36
-/* Wrapper around tcg_gen_lookup_and_goto_ptr that handles single stepping */
142
-}
37
-static void lookup_and_goto_ptr(DisasContext *ctx)
143
-
38
-{
144
static void tcg_out_call_int(TCGContext *s, int lk,
39
- if (ctx->base.singlestep_enabled) {
145
const tcg_insn_unit *target)
40
- gen_exception_debug();
146
{
147
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
148
149
static void tcg_out_goto_tb(TCGContext *s, int which)
150
{
151
- /* Direct jump. */
152
- if (TCG_TARGET_REG_BITS == 64) {
153
- /* Ensure the next insns are 8 or 16-byte aligned. */
154
- while ((uintptr_t)s->code_ptr & (have_isa_2_07 ? 15 : 7)) {
155
- tcg_out32(s, NOP);
156
- }
157
+ uintptr_t ptr = get_jmp_target_addr(s, which);
158
+
159
+ if (USE_REG_TB) {
160
+ ptrdiff_t offset = tcg_tbrel_diff(s, (void *)ptr);
161
+ tcg_out_mem_long(s, LD, LDX, TCG_REG_TB, TCG_REG_TB, offset);
162
+
163
+ /* Direct branch will be patched by tb_target_set_jmp_target. */
164
set_jmp_insn_offset(s, which);
165
- tcg_out32(s, ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, 0));
166
- tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, 0));
167
tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR);
168
+
169
+ /* When branch is out of range, fall through to indirect. */
170
+ tcg_out32(s, BCCTR | BO_ALWAYS);
171
+
172
+ /* For the unlinked case, need to reset TCG_REG_TB. */
173
+ set_jmp_reset_offset(s, which);
174
+ tcg_out_mem_long(s, ADDI, ADD, TCG_REG_TB, TCG_REG_TB,
175
+ -tcg_current_code_size(s));
176
+ } else {
177
+ /* Direct branch will be patched by tb_target_set_jmp_target. */
178
+ set_jmp_insn_offset(s, which);
179
+ tcg_out32(s, NOP);
180
+
181
+ /* When branch is out of range, fall through to indirect. */
182
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, ptr - (int16_t)ptr);
183
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1, (int16_t)ptr);
184
+ tcg_out32(s, MTSPR | RS(TCG_REG_TMP1) | CTR);
185
tcg_out32(s, BCCTR | BO_ALWAYS);
186
set_jmp_reset_offset(s, which);
187
- if (USE_REG_TB) {
188
- /* For the unlinked case, need to reset TCG_REG_TB. */
189
- tcg_out_mem_long(s, ADDI, ADD, TCG_REG_TB, TCG_REG_TB,
190
- -tcg_current_code_size(s));
191
- }
41
- } else {
192
- } else {
42
- tcg_gen_lookup_and_goto_ptr();
193
- set_jmp_insn_offset(s, which);
43
- }
194
- tcg_out32(s, B);
44
-}
195
- set_jmp_reset_offset(s, which);
45
-
46
static void gen_exception_illegal(DisasContext *ctx)
47
{
48
generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST);
49
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
50
tcg_gen_exit_tb(ctx->base.tb, n);
51
} else {
52
tcg_gen_movi_tl(cpu_pc, dest);
53
- lookup_and_goto_ptr(ctx);
54
+ tcg_gen_lookup_and_goto_ptr();
55
}
196
}
56
}
197
}
57
198
58
diff --git a/target/riscv/insn_trans/trans_privileged.c.inc b/target/riscv/insn_trans/trans_privileged.c.inc
199
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
59
index XXXXXXX..XXXXXXX 100644
200
+ uintptr_t jmp_rx, uintptr_t jmp_rw)
60
--- a/target/riscv/insn_trans/trans_privileged.c.inc
201
+{
61
+++ b/target/riscv/insn_trans/trans_privileged.c.inc
202
+ uintptr_t addr = tb->jmp_target_addr[n];
62
@@ -XXX,XX +XXX,XX @@ static bool trans_sret(DisasContext *ctx, arg_sret *a)
203
+ intptr_t diff = addr - jmp_rx;
63
204
+ tcg_insn_unit insn;
64
if (has_ext(ctx, RVS)) {
205
+
65
gen_helper_sret(cpu_pc, cpu_env, cpu_pc);
206
+ if (in_range_b(diff)) {
66
- exit_tb(ctx); /* no chaining */
207
+ insn = B | (diff & 0x3fffffc);
67
+ tcg_gen_exit_tb(NULL, 0); /* no chaining */
208
+ } else if (USE_REG_TB) {
68
ctx->base.is_jmp = DISAS_NORETURN;
209
+ insn = MTSPR | RS(TCG_REG_TB) | CTR;
69
} else {
210
+ } else {
70
return false;
211
+ insn = NOP;
71
@@ -XXX,XX +XXX,XX @@ static bool trans_mret(DisasContext *ctx, arg_mret *a)
212
+ }
72
#ifndef CONFIG_USER_ONLY
213
+
73
tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
214
+ qatomic_set((uint32_t *)jmp_rw, insn);
74
gen_helper_mret(cpu_pc, cpu_env, cpu_pc);
215
+ flush_idcache_range(jmp_rx, jmp_rw, 4);
75
- exit_tb(ctx); /* no chaining */
216
+}
76
+ tcg_gen_exit_tb(NULL, 0); /* no chaining */
217
+
77
ctx->base.is_jmp = DISAS_NORETURN;
218
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
78
return true;
219
const TCGArg args[TCG_MAX_OP_ARGS],
79
#else
220
const int const_args[TCG_MAX_OP_ARGS])
80
diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc
81
index XXXXXXX..XXXXXXX 100644
82
--- a/target/riscv/insn_trans/trans_rvi.c.inc
83
+++ b/target/riscv/insn_trans/trans_rvi.c.inc
84
@@ -XXX,XX +XXX,XX @@ static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
85
if (a->rd != 0) {
86
tcg_gen_movi_tl(cpu_gpr[a->rd], ctx->pc_succ_insn);
87
}
88
-
89
- /* No chaining with JALR. */
90
- lookup_and_goto_ptr(ctx);
91
+ tcg_gen_lookup_and_goto_ptr();
92
93
if (misaligned) {
94
gen_set_label(misaligned);
95
@@ -XXX,XX +XXX,XX @@ static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a)
96
* however we need to end the translation block
97
*/
98
tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
99
- exit_tb(ctx);
100
+ tcg_gen_exit_tb(NULL, 0);
101
ctx->base.is_jmp = DISAS_NORETURN;
102
return true;
103
}
104
@@ -XXX,XX +XXX,XX @@ static bool do_csr_post(DisasContext *ctx)
105
{
106
/* We may have changed important cpu state -- exit to main loop. */
107
tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
108
- exit_tb(ctx);
109
+ tcg_gen_exit_tb(NULL, 0);
110
ctx->base.is_jmp = DISAS_NORETURN;
111
return true;
112
}
113
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
114
index XXXXXXX..XXXXXXX 100644
115
--- a/target/riscv/insn_trans/trans_rvv.c.inc
116
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
117
@@ -XXX,XX +XXX,XX @@ static bool trans_vsetvl(DisasContext *ctx, arg_vsetvl *a)
118
gen_set_gpr(ctx, a->rd, dst);
119
120
tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
121
- lookup_and_goto_ptr(ctx);
122
+ tcg_gen_lookup_and_goto_ptr();
123
ctx->base.is_jmp = DISAS_NORETURN;
124
return true;
125
}
126
--
221
--
127
2.25.1
222
2.34.1
128
223
129
224
diff view generated by jsdifflib
1
GDB single-stepping is now handled generically.
1
This is always true for sparc64, so this is dead since 3a5f6805c7ca.
2
2
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
---
6
target/hppa/translate.c | 17 ++++-------------
7
tcg/sparc64/tcg-target.c.inc | 62 ++++++++++++------------------------
7
1 file changed, 4 insertions(+), 13 deletions(-)
8
1 file changed, 21 insertions(+), 41 deletions(-)
8
9
9
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
10
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
10
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
11
--- a/target/hppa/translate.c
12
--- a/tcg/sparc64/tcg-target.c.inc
12
+++ b/target/hppa/translate.c
13
+++ b/tcg/sparc64/tcg-target.c.inc
13
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *ctx, int which,
14
@@ -XXX,XX +XXX,XX @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
14
} else {
15
#endif
15
copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
16
16
copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
17
#define TCG_REG_TB TCG_REG_I1
17
- if (ctx->base.singlestep_enabled) {
18
-#define USE_REG_TB (sizeof(void *) > 4)
18
- gen_excp_1(EXCP_DEBUG);
19
20
static const int tcg_target_reg_alloc_order[] = {
21
TCG_REG_L0,
22
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
23
}
24
25
/* A 13-bit constant relative to the TB. */
26
- if (!in_prologue && USE_REG_TB) {
27
+ if (!in_prologue) {
28
test = tcg_tbrel_diff(s, (void *)arg);
29
if (check_fit_ptr(test, 13)) {
30
tcg_out_arithi(s, ret, TCG_REG_TB, test, ARITH_ADD);
31
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
32
}
33
34
/* Use the constant pool, if possible. */
35
- if (!in_prologue && USE_REG_TB) {
36
+ if (!in_prologue) {
37
new_pool_label(s, arg, R_SPARC_13, s->code_ptr,
38
tcg_tbrel_diff(s, NULL));
39
tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(TCG_REG_TB));
40
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
41
#endif
42
43
/* We choose TCG_REG_TB such that no move is required. */
44
- if (USE_REG_TB) {
45
- QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1);
46
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
47
- }
48
+ QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1);
49
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
50
51
tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
52
/* delay slot */
53
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
54
tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
55
tcg_out_movi_imm13(s, TCG_REG_O0, a0);
56
return;
57
- } else if (USE_REG_TB) {
58
+ } else {
59
intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0);
60
if (check_fit_ptr(tb_diff, 13)) {
61
tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
62
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
63
64
static void tcg_out_goto_tb(TCGContext *s, int which)
65
{
66
+ int c;
67
+
68
/* Direct jump. */
69
- if (USE_REG_TB) {
70
- /* make sure the patch is 8-byte aligned. */
71
- if ((intptr_t)s->code_ptr & 4) {
72
- tcg_out_nop(s);
73
- }
74
- set_jmp_insn_offset(s, which);
75
- tcg_out_sethi(s, TCG_REG_T1, 0);
76
- tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR);
77
- tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL);
78
- tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
79
- } else {
80
- set_jmp_insn_offset(s, which);
81
- tcg_out32(s, CALL);
82
+ /* make sure the patch is 8-byte aligned. */
83
+ if ((intptr_t)s->code_ptr & 4) {
84
tcg_out_nop(s);
85
}
86
+ set_jmp_insn_offset(s, which);
87
+ tcg_out_sethi(s, TCG_REG_T1, 0);
88
+ tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR);
89
+ tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL);
90
+ tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
91
set_jmp_reset_offset(s, which);
92
93
/*
94
* For the unlinked path of goto_tb, we need to reset TCG_REG_TB
95
* to the beginning of this TB.
96
*/
97
- if (USE_REG_TB) {
98
- int c = -tcg_current_code_size(s);
99
- if (check_fit_i32(c, 13)) {
100
- tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, c, ARITH_ADD);
19
- } else {
101
- } else {
20
- tcg_gen_lookup_and_goto_ptr();
102
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, c);
103
- tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
21
- }
104
- }
22
+ tcg_gen_lookup_and_goto_ptr();
105
+ c = -tcg_current_code_size(s);
106
+ if (check_fit_i32(c, 13)) {
107
+ tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, c, ARITH_ADD);
108
+ } else {
109
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, c);
110
+ tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
23
}
111
}
24
}
112
}
25
113
26
@@ -XXX,XX +XXX,XX @@ static bool do_rfi(DisasContext *ctx, bool rfi_r)
114
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
27
gen_helper_rfi(cpu_env);
115
switch (opc) {
28
}
116
case INDEX_op_goto_ptr:
29
/* Exit the TB to recognize new interrupts. */
117
tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
30
- if (ctx->base.singlestep_enabled) {
118
- if (USE_REG_TB) {
31
- gen_excp_1(EXCP_DEBUG);
119
- tcg_out_mov_delay(s, TCG_REG_TB, a0);
32
- } else {
120
- } else {
33
- tcg_gen_exit_tb(NULL, 0);
121
- tcg_out_nop(s);
122
- }
123
+ tcg_out_mov_delay(s, TCG_REG_TB, a0);
124
break;
125
case INDEX_op_br:
126
tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
127
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
128
tcg_debug_assert(tb_disp == (int32_t)tb_disp);
129
tcg_debug_assert(br_disp == (int32_t)br_disp);
130
131
- if (!USE_REG_TB) {
132
- qatomic_set((uint32_t *)jmp_rw,
133
-         deposit32(CALL, 0, 30, br_disp >> 2));
134
- flush_idcache_range(jmp_rx, jmp_rw, 4);
135
- return;
34
- }
136
- }
35
+ tcg_gen_exit_tb(NULL, 0);
137
-
36
ctx->base.is_jmp = DISAS_NORETURN;
138
/* This does not exercise the range of the branch, but we do
37
139
still need to be able to load the new value of TCG_REG_TB.
38
return nullify_end(ctx);
140
But this does still happen quite often. */
39
@@ -XXX,XX +XXX,XX @@ static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
40
nullify_save(ctx);
41
/* FALLTHRU */
42
case DISAS_IAQ_N_UPDATED:
43
- if (ctx->base.singlestep_enabled) {
44
- gen_excp_1(EXCP_DEBUG);
45
- } else if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
46
+ if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
47
tcg_gen_lookup_and_goto_ptr();
48
+ break;
49
}
50
/* FALLTHRU */
51
case DISAS_EXIT:
52
--
141
--
53
2.25.1
142
2.34.1
54
143
55
144
diff view generated by jsdifflib
Deleted patch
1
We were using singlestep_enabled as a proxy for whether
2
translator_use_goto_tb would always return false.
3
1
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/i386/tcg/translate.c | 5 +++--
7
1 file changed, 3 insertions(+), 2 deletions(-)
8
9
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/i386/tcg/translate.c
12
+++ b/target/i386/tcg/translate.c
13
@@ -XXX,XX +XXX,XX @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
14
DisasContext *dc = container_of(dcbase, DisasContext, base);
15
CPUX86State *env = cpu->env_ptr;
16
uint32_t flags = dc->base.tb->flags;
17
+ uint32_t cflags = tb_cflags(dc->base.tb);
18
int cpl = (flags >> HF_CPL_SHIFT) & 3;
19
int iopl = (flags >> IOPL_SHIFT) & 3;
20
21
@@ -XXX,XX +XXX,XX @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
22
dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
23
dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
24
dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
25
- dc->jmp_opt = !(dc->base.singlestep_enabled ||
26
+ dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) ||
27
(flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)));
28
/*
29
* If jmp_opt, we want to handle each string instruction individually.
30
* For icount also disable repz optimization so that each iteration
31
* is accounted separately.
32
*/
33
- dc->repz_opt = !dc->jmp_opt && !(tb_cflags(dc->base.tb) & CF_USE_ICOUNT);
34
+ dc->repz_opt = !dc->jmp_opt && !(cflags & CF_USE_ICOUNT);
35
36
dc->T0 = tcg_temp_new();
37
dc->T1 = tcg_temp_new();
38
--
39
2.25.1
40
41
diff view generated by jsdifflib
1
GDB single-stepping is now handled generically.
1
The old sparc64 implementation may replace two insns, which leaves
2
Reuse gen_debug_exception to handle architectural debug exceptions.
2
a race condition in which a thread could be stopped at a PC in the
3
middle of the sequence, and when restarted does not see the complete
4
address computation and branches to nowhere.
3
5
6
The new implemetation replaces only one insn, swapping between a
7
direct branch and a direct call. The TCG_REG_TB register is loaded
8
from tb->jmp_target_addr[] in the delay slot.
9
10
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
12
---
6
target/ppc/translate.c | 38 ++++++++------------------------------
13
tcg/sparc64/tcg-target.c.inc | 87 +++++++++++++++---------------------
7
1 file changed, 8 insertions(+), 30 deletions(-)
14
1 file changed, 37 insertions(+), 50 deletions(-)
8
15
9
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
16
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
10
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
11
--- a/target/ppc/translate.c
18
--- a/tcg/sparc64/tcg-target.c.inc
12
+++ b/target/ppc/translate.c
19
+++ b/tcg/sparc64/tcg-target.c.inc
13
@@ -XXX,XX +XXX,XX @@
20
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
14
21
15
#define CPU_SINGLE_STEP 0x1
22
static void tcg_out_goto_tb(TCGContext *s, int which)
16
#define CPU_BRANCH_STEP 0x2
17
-#define GDBSTUB_SINGLE_STEP 0x4
18
19
/* Include definitions for instructions classes and implementations flags */
20
/* #define PPC_DEBUG_DISAS */
21
@@ -XXX,XX +XXX,XX @@ static uint32_t gen_prep_dbgex(DisasContext *ctx)
22
23
static void gen_debug_exception(DisasContext *ctx)
24
{
23
{
25
- gen_helper_raise_exception(cpu_env, tcg_constant_i32(EXCP_DEBUG));
24
- int c;
26
+ gen_helper_raise_exception(cpu_env, tcg_constant_i32(gen_prep_dbgex(ctx)));
25
+ ptrdiff_t off = tcg_tbrel_diff(s, (void *)get_jmp_target_addr(s, which));
27
ctx->base.is_jmp = DISAS_NORETURN;
26
27
- /* Direct jump. */
28
- /* make sure the patch is 8-byte aligned. */
29
- if ((intptr_t)s->code_ptr & 4) {
30
- tcg_out_nop(s);
31
- }
32
+ /* Direct branch will be patched by tb_target_set_jmp_target. */
33
set_jmp_insn_offset(s, which);
34
- tcg_out_sethi(s, TCG_REG_T1, 0);
35
- tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR);
36
- tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL);
37
- tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
38
+ tcg_out32(s, CALL);
39
+ /* delay slot */
40
+ tcg_debug_assert(check_fit_ptr(off, 13));
41
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, TCG_REG_TB, off);
42
set_jmp_reset_offset(s, which);
43
44
/*
45
* For the unlinked path of goto_tb, we need to reset TCG_REG_TB
46
* to the beginning of this TB.
47
*/
48
- c = -tcg_current_code_size(s);
49
- if (check_fit_i32(c, 13)) {
50
- tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, c, ARITH_ADD);
51
+ off = -tcg_current_code_size(s);
52
+ if (check_fit_i32(off, 13)) {
53
+ tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, off, ARITH_ADD);
54
} else {
55
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, c);
56
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, off);
57
tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
58
}
28
}
59
}
29
60
30
@@ -XXX,XX +XXX,XX @@ static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
61
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
31
62
+ uintptr_t jmp_rx, uintptr_t jmp_rw)
32
static void gen_lookup_and_goto_ptr(DisasContext *ctx)
63
+{
64
+ uintptr_t addr = tb->jmp_target_addr[n];
65
+ intptr_t br_disp = (intptr_t)(addr - jmp_rx) >> 2;
66
+ tcg_insn_unit insn;
67
+
68
+ br_disp >>= 2;
69
+ if (check_fit_ptr(br_disp, 19)) {
70
+ /* ba,pt %icc, addr */
71
+ insn = deposit32(INSN_OP(0) | INSN_OP2(1) | INSN_COND(COND_A)
72
+ | BPCC_ICC | BPCC_PT, 0, 19, br_disp);
73
+ } else if (check_fit_ptr(br_disp, 22)) {
74
+ /* ba addr */
75
+ insn = deposit32(INSN_OP(0) | INSN_OP2(2) | INSN_COND(COND_A),
76
+ 0, 22, br_disp);
77
+ } else {
78
+ /* The code_gen_buffer can't be larger than 2GB. */
79
+ tcg_debug_assert(check_fit_ptr(br_disp, 30));
80
+ /* call addr */
81
+ insn = deposit32(CALL, 0, 30, br_disp);
82
+ }
83
+
84
+ qatomic_set((uint32_t *)jmp_rw, insn);
85
+ flush_idcache_range(jmp_rx, jmp_rw, 4);
86
+}
87
+
88
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
89
const TCGArg args[TCG_MAX_OP_ARGS],
90
const int const_args[TCG_MAX_OP_ARGS])
91
@@ -XXX,XX +XXX,XX @@ void tcg_register_jit(const void *buf, size_t buf_size)
33
{
92
{
34
- int sse = ctx->singlestep_enabled;
93
tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
35
- if (unlikely(sse)) {
94
}
36
- if (sse & GDBSTUB_SINGLE_STEP) {
95
-
37
- gen_debug_exception(ctx);
96
-void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
38
- } else if (sse & (CPU_SINGLE_STEP | CPU_BRANCH_STEP)) {
97
- uintptr_t jmp_rx, uintptr_t jmp_rw)
39
- gen_helper_raise_exception(cpu_env, tcg_constant_i32(gen_prep_dbgex(ctx)));
98
-{
40
- } else {
99
- uintptr_t addr = tb->jmp_target_addr[n];
41
- tcg_gen_exit_tb(NULL, 0);
100
- intptr_t tb_disp = addr - (uintptr_t)tb->tc.ptr;
42
- }
101
- intptr_t br_disp = addr - jmp_rx;
43
+ if (unlikely(ctx->singlestep_enabled)) {
102
- tcg_insn_unit i1, i2;
44
+ gen_debug_exception(ctx);
103
-
45
} else {
104
- /* We can reach the entire address space for ILP32.
46
tcg_gen_lookup_and_goto_ptr();
105
- For LP64, the code_gen_buffer can't be larger than 2GB. */
47
}
106
- tcg_debug_assert(tb_disp == (int32_t)tb_disp);
48
@@ -XXX,XX +XXX,XX @@ static void ppc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
107
- tcg_debug_assert(br_disp == (int32_t)br_disp);
49
ctx->singlestep_enabled = 0;
108
-
50
if ((hflags >> HFLAGS_SE) & 1) {
109
- /* This does not exercise the range of the branch, but we do
51
ctx->singlestep_enabled |= CPU_SINGLE_STEP;
110
- still need to be able to load the new value of TCG_REG_TB.
52
+ ctx->base.max_insns = 1;
111
- But this does still happen quite often. */
53
}
112
- if (check_fit_ptr(tb_disp, 13)) {
54
if ((hflags >> HFLAGS_BE) & 1) {
113
- /* ba,pt %icc, addr */
55
ctx->singlestep_enabled |= CPU_BRANCH_STEP;
114
- i1 = (INSN_OP(0) | INSN_OP2(1) | INSN_COND(COND_A)
56
}
115
- | BPCC_ICC | BPCC_PT | INSN_OFF19(br_disp));
57
- if (unlikely(ctx->base.singlestep_enabled)) {
116
- i2 = (ARITH_ADD | INSN_RD(TCG_REG_TB) | INSN_RS1(TCG_REG_TB)
58
- ctx->singlestep_enabled |= GDBSTUB_SINGLE_STEP;
117
- | INSN_IMM13(tb_disp));
118
- } else if (tb_disp >= 0) {
119
- i1 = SETHI | INSN_RD(TCG_REG_T1) | ((tb_disp & 0xfffffc00) >> 10);
120
- i2 = (ARITH_OR | INSN_RD(TCG_REG_T1) | INSN_RS1(TCG_REG_T1)
121
- | INSN_IMM13(tb_disp & 0x3ff));
122
- } else {
123
- i1 = SETHI | INSN_RD(TCG_REG_T1) | ((~tb_disp & 0xfffffc00) >> 10);
124
- i2 = (ARITH_XOR | INSN_RD(TCG_REG_T1) | INSN_RS1(TCG_REG_T1)
125
- | INSN_IMM13((tb_disp & 0x3ff) | -0x400));
59
- }
126
- }
60
-
127
-
61
- if (ctx->singlestep_enabled & (CPU_SINGLE_STEP | GDBSTUB_SINGLE_STEP)) {
128
- qatomic_set((uint64_t *)jmp_rw, deposit64(i2, 32, 32, i1));
62
- ctx->base.max_insns = 1;
129
- flush_idcache_range(jmp_rx, jmp_rw, 8);
63
- }
130
-}
64
}
65
66
static void ppc_tr_tb_start(DisasContextBase *db, CPUState *cs)
67
@@ -XXX,XX +XXX,XX @@ static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
68
DisasContext *ctx = container_of(dcbase, DisasContext, base);
69
DisasJumpType is_jmp = ctx->base.is_jmp;
70
target_ulong nip = ctx->base.pc_next;
71
- int sse;
72
73
if (is_jmp == DISAS_NORETURN) {
74
/* We have already exited the TB. */
75
@@ -XXX,XX +XXX,XX @@ static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
76
}
77
78
/* Honor single stepping. */
79
- sse = ctx->singlestep_enabled & (CPU_SINGLE_STEP | GDBSTUB_SINGLE_STEP);
80
- if (unlikely(sse)) {
81
+ if (unlikely(ctx->singlestep_enabled & CPU_SINGLE_STEP)
82
+ && (nip <= 0x100 || nip > 0xf00)) {
83
switch (is_jmp) {
84
case DISAS_TOO_MANY:
85
case DISAS_EXIT_UPDATE:
86
@@ -XXX,XX +XXX,XX @@ static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
87
g_assert_not_reached();
88
}
89
90
- if (sse & GDBSTUB_SINGLE_STEP) {
91
- gen_debug_exception(ctx);
92
- return;
93
- }
94
- /* else CPU_SINGLE_STEP... */
95
- if (nip <= 0x100 || nip > 0xf00) {
96
- gen_helper_raise_exception(cpu_env, tcg_constant_i32(gen_prep_dbgex(ctx)));
97
- return;
98
- }
99
+ gen_debug_exception(ctx);
100
+ return;
101
}
102
103
switch (is_jmp) {
104
--
131
--
105
2.25.1
132
2.34.1
106
133
107
134
diff view generated by jsdifflib
1
As per an ancient comment in mips_tr_translate_insn about the
1
Now that tcg can handle direct and indirect goto_tb
2
expectations of gdb, when restarting the insn in a delay slot
2
simultaneously, we can optimistically leave space for
3
we also re-execute the branch. Which means that we are
3
a direct branch and fall back to loading the pointer
4
expected to execute two insns in this case.
4
from the TB for an indirect branch.
5
5
6
This has been broken since 8b86d6d2580, where we forced max_insns
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
to 1 while single-stepping. This resulted in an exit from the
8
translator loop after the branch but before the delay slot is
9
translated.
10
11
Increase the max_insns to 2 for this case. In addition, bypass
12
the end-of-page check, for when the branch itself ends the page.
13
14
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
15
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
16
---
8
---
17
target/mips/tcg/translate.c | 25 ++++++++++++++++---------
9
tcg/arm/tcg-target.c.inc | 52 ++++++++++++++++++++++++++++------------
18
1 file changed, 16 insertions(+), 9 deletions(-)
10
1 file changed, 37 insertions(+), 15 deletions(-)
19
11
20
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
12
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
21
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
22
--- a/target/mips/tcg/translate.c
14
--- a/tcg/arm/tcg-target.c.inc
23
+++ b/target/mips/tcg/translate.c
15
+++ b/tcg/arm/tcg-target.c.inc
24
@@ -XXX,XX +XXX,XX @@ static void mips_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
16
@@ -XXX,XX +XXX,XX @@ typedef enum {
25
ctx->default_tcg_memop_mask = (ctx->insn_flags & (ISA_MIPS_R6 |
17
ARITH_BIC = 0xe << 21,
26
INSN_LOONGSON3A)) ? MO_UNALN : MO_ALIGN;
18
ARITH_MVN = 0xf << 21,
27
19
28
+ /*
20
+ INSN_B = 0x0a000000,
29
+ * Execute a branch and its delay slot as a single instruction.
21
+
30
+ * This is what GDB expects and is consistent with what the
22
INSN_CLZ = 0x016f0f10,
31
+ * hardware does (e.g. if a delay slot instruction faults, the
23
INSN_RBIT = 0x06ff0f30,
32
+ * reported PC is the PC of the branch).
24
33
+ */
25
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
34
+ if (ctx->base.singlestep_enabled && (ctx->hflags & MIPS_HFLAG_BMASK)) {
26
35
+ ctx->base.max_insns = 2;
27
static void tcg_out_b_imm(TCGContext *s, ARMCond cond, int32_t offset)
28
{
29
- tcg_out32(s, (cond << 28) | 0x0a000000 |
30
+ tcg_out32(s, (cond << 28) | INSN_B |
31
(((offset - 8) >> 2) & 0x00ffffff));
32
}
33
34
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
35
36
static void tcg_out_goto_tb(TCGContext *s, int which)
37
{
38
- /* Indirect jump method */
39
- intptr_t ptr, dif, dil;
40
- TCGReg base = TCG_REG_PC;
41
+ uintptr_t i_addr;
42
+ intptr_t i_disp;
43
44
- ptr = get_jmp_target_addr(s, which);
45
- dif = tcg_pcrel_diff(s, (void *)ptr) - 8;
46
- dil = sextract32(dif, 0, 12);
47
- if (dif != dil) {
48
+ /* Direct branch will be patched by tb_target_set_jmp_target. */
49
+ set_jmp_insn_offset(s, which);
50
+ tcg_out32(s, INSN_NOP);
51
+
52
+ /* When branch is out of range, fall through to indirect. */
53
+ i_addr = get_jmp_target_addr(s, which);
54
+ i_disp = tcg_pcrel_diff(s, (void *)i_addr) - 8;
55
+ tcg_debug_assert(i_disp < 0);
56
+ if (i_disp >= -0xfff) {
57
+ tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, i_disp);
58
+ } else {
59
/*
60
* The TB is close, but outside the 12 bits addressable by
61
* the load. We can extend this to 20 bits with a sub of a
62
- * shifted immediate from pc. In the vastly unlikely event
63
- * the code requires more than 1MB, we'll use 2 insns and
64
- * be no worse off.
65
+ * shifted immediate from pc.
66
*/
67
- base = TCG_REG_R0;
68
- tcg_out_movi32(s, COND_AL, base, ptr - dil);
69
+ int h = -i_disp;
70
+ int l = h & 0xfff;
71
+
72
+ h = encode_imm_nofail(h - l);
73
+ tcg_out_dat_imm(s, COND_AL, ARITH_SUB, TCG_REG_R0, TCG_REG_PC, h);
74
+ tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, l);
75
}
76
- tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, base, dil);
77
set_jmp_reset_offset(s, which);
78
}
79
80
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
81
uintptr_t jmp_rx, uintptr_t jmp_rw)
82
{
83
- /* Always indirect, nothing to do */
84
+ uintptr_t addr = tb->jmp_target_addr[n];
85
+ ptrdiff_t offset = addr - (jmp_rx + 8);
86
+ tcg_insn_unit insn;
87
+
88
+ /* Either directly branch, or fall through to indirect branch. */
89
+ if (offset == sextract64(offset, 0, 26)) {
90
+ /* B <addr> */
91
+ insn = deposit32((COND_AL << 28) | INSN_B, 0, 24, offset >> 2);
92
+ } else {
93
+ insn = INSN_NOP;
36
+ }
94
+ }
37
+
95
+
38
LOG_DISAS("\ntb %p idx %d hflags %04x\n", ctx->base.tb, ctx->mem_idx,
96
+ qatomic_set((uint32_t *)jmp_rw, insn);
39
ctx->hflags);
97
+ flush_idcache_range(jmp_rx, jmp_rw, 4);
40
}
98
}
41
@@ -XXX,XX +XXX,XX @@ static void mips_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
99
42
if (ctx->base.is_jmp != DISAS_NEXT) {
100
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
43
return;
44
}
45
+
46
/*
47
- * Execute a branch and its delay slot as a single instruction.
48
- * This is what GDB expects and is consistent with what the
49
- * hardware does (e.g. if a delay slot instruction faults, the
50
- * reported PC is the PC of the branch).
51
+ * End the TB on (most) page crossings.
52
+ * See mips_tr_init_disas_context about single-stepping a branch
53
+ * together with its delay slot.
54
*/
55
- if (ctx->base.singlestep_enabled &&
56
- (ctx->hflags & MIPS_HFLAG_BMASK) == 0) {
57
- ctx->base.is_jmp = DISAS_TOO_MANY;
58
- }
59
- if (ctx->base.pc_next - ctx->page_start >= TARGET_PAGE_SIZE) {
60
+ if (ctx->base.pc_next - ctx->page_start >= TARGET_PAGE_SIZE
61
+ && !ctx->base.singlestep_enabled) {
62
ctx->base.is_jmp = DISAS_TOO_MANY;
63
}
64
}
65
--
101
--
66
2.25.1
102
2.34.1
67
103
68
104
diff view generated by jsdifflib
1
GDB single-stepping is now handled generically.
1
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
2
2
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
4
---
6
target/tricore/helper.h | 1 -
5
tcg/riscv/tcg-target.c.inc | 3 ++-
7
target/tricore/op_helper.c | 7 -------
6
1 file changed, 2 insertions(+), 1 deletion(-)
8
target/tricore/translate.c | 14 +-------------
9
3 files changed, 1 insertion(+), 21 deletions(-)
10
7
11
diff --git a/target/tricore/helper.h b/target/tricore/helper.h
8
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
12
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
13
--- a/target/tricore/helper.h
10
--- a/tcg/riscv/tcg-target.c.inc
14
+++ b/target/tricore/helper.h
11
+++ b/tcg/riscv/tcg-target.c.inc
15
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_2(psw_write, void, env, i32)
12
@@ -XXX,XX +XXX,XX @@ typedef enum {
16
DEF_HELPER_1(psw_read, i32, env)
13
#endif
17
/* Exceptions */
14
18
DEF_HELPER_3(raise_exception_sync, noreturn, env, i32, i32)
15
OPC_FENCE = 0x0000000f,
19
-DEF_HELPER_2(qemu_excp, noreturn, env, i32)
16
+ OPC_NOP = OPC_ADDI, /* nop = addi r0,r0,0 */
20
diff --git a/target/tricore/op_helper.c b/target/tricore/op_helper.c
17
} RISCVInsn;
21
index XXXXXXX..XXXXXXX 100644
18
22
--- a/target/tricore/op_helper.c
19
/*
23
+++ b/target/tricore/op_helper.c
20
@@ -XXX,XX +XXX,XX @@ static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
24
@@ -XXX,XX +XXX,XX @@ static void raise_exception_sync_helper(CPUTriCoreState *env, uint32_t class,
25
raise_exception_sync_internal(env, class, tin, pc, 0);
26
}
27
28
-void helper_qemu_excp(CPUTriCoreState *env, uint32_t excp)
29
-{
30
- CPUState *cs = env_cpu(env);
31
- cs->exception_index = excp;
32
- cpu_loop_exit(cs);
33
-}
34
-
35
/* Addressing mode helper */
36
37
static uint16_t reverse16(uint16_t val)
38
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/target/tricore/translate.c
41
+++ b/target/tricore/translate.c
42
@@ -XXX,XX +XXX,XX @@ static inline void gen_save_pc(target_ulong pc)
43
tcg_gen_movi_tl(cpu_PC, pc);
44
}
45
46
-static void generate_qemu_excp(DisasContext *ctx, int excp)
47
-{
48
- TCGv_i32 tmp = tcg_const_i32(excp);
49
- gen_helper_qemu_excp(cpu_env, tmp);
50
- ctx->base.is_jmp = DISAS_NORETURN;
51
- tcg_temp_free(tmp);
52
-}
53
-
54
static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
55
{
21
{
56
if (translator_use_goto_tb(&ctx->base, dest)) {
22
int i;
57
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
23
for (i = 0; i < count; ++i) {
58
tcg_gen_exit_tb(ctx->base.tb, n);
24
- p[i] = encode_i(OPC_ADDI, TCG_REG_ZERO, TCG_REG_ZERO, 0);
59
} else {
25
+ p[i] = OPC_NOP;
60
gen_save_pc(dest);
61
- if (ctx->base.singlestep_enabled) {
62
- generate_qemu_excp(ctx, EXCP_DEBUG);
63
- } else {
64
- tcg_gen_lookup_and_goto_ptr();
65
- }
66
+ tcg_gen_lookup_and_goto_ptr();
67
}
26
}
68
}
27
}
69
28
70
--
29
--
71
2.25.1
30
2.34.1
72
31
73
32
diff view generated by jsdifflib
1
GDB single-stepping is now handled generically.
1
Now that tcg can handle direct and indirect goto_tb simultaneously,
2
we can optimistically leave space for a direct branch and fall back
3
to loading the pointer from the TB for an indirect branch.
2
4
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
7
---
5
target/i386/helper.h | 1 -
8
tcg/riscv/tcg-target.c.inc | 19 +++++++++++++++++--
6
target/i386/tcg/misc_helper.c | 8 --------
9
1 file changed, 17 insertions(+), 2 deletions(-)
7
target/i386/tcg/translate.c | 4 +---
8
3 files changed, 1 insertion(+), 12 deletions(-)
9
10
10
diff --git a/target/i386/helper.h b/target/i386/helper.h
11
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
11
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
12
--- a/target/i386/helper.h
13
--- a/tcg/riscv/tcg-target.c.inc
13
+++ b/target/i386/helper.h
14
+++ b/tcg/riscv/tcg-target.c.inc
14
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_2(syscall, void, env, int)
15
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
15
DEF_HELPER_2(sysret, void, env, int)
16
16
#endif
17
static void tcg_out_goto_tb(TCGContext *s, int which)
17
DEF_HELPER_FLAGS_2(pause, TCG_CALL_NO_WG, noreturn, env, int)
18
{
18
-DEF_HELPER_FLAGS_1(debug, TCG_CALL_NO_WG, noreturn, env)
19
- /* indirect jump method */
19
DEF_HELPER_1(reset_rf, void, env)
20
+ /* Direct branch will be patched by tb_target_set_jmp_target. */
20
DEF_HELPER_FLAGS_3(raise_interrupt, TCG_CALL_NO_WG, noreturn, env, int, int)
21
+ set_jmp_insn_offset(s, which);
21
DEF_HELPER_FLAGS_2(raise_exception, TCG_CALL_NO_WG, noreturn, env, int)
22
+ tcg_out32(s, OPC_JAL);
22
diff --git a/target/i386/tcg/misc_helper.c b/target/i386/tcg/misc_helper.c
23
+
23
index XXXXXXX..XXXXXXX 100644
24
+ /* When branch is out of range, fall through to indirect. */
24
--- a/target/i386/tcg/misc_helper.c
25
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO,
25
+++ b/target/i386/tcg/misc_helper.c
26
get_jmp_target_addr(s, which));
26
@@ -XXX,XX +XXX,XX @@ void QEMU_NORETURN helper_pause(CPUX86State *env, int next_eip_addend)
27
tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_TMP0, 0);
27
do_pause(env);
28
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which)
29
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
30
uintptr_t jmp_rx, uintptr_t jmp_rw)
31
{
32
- /* Always indirect, nothing to do */
33
+ uintptr_t addr = tb->jmp_target_addr[n];
34
+ ptrdiff_t offset = addr - jmp_rx;
35
+ tcg_insn_unit insn;
36
+
37
+ /* Either directly branch, or fall through to indirect branch. */
38
+ if (offset == sextreg(offset, 0, 20)) {
39
+ insn = encode_uj(OPC_JAL, TCG_REG_ZERO, offset);
40
+ } else {
41
+ insn = OPC_NOP;
42
+ }
43
+ qatomic_set((uint32_t *)jmp_rw, insn);
44
+ flush_idcache_range(jmp_rx, jmp_rw, 4);
28
}
45
}
29
46
30
-void QEMU_NORETURN helper_debug(CPUX86State *env)
47
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
31
-{
32
- CPUState *cs = env_cpu(env);
33
-
34
- cs->exception_index = EXCP_DEBUG;
35
- cpu_loop_exit(cs);
36
-}
37
-
38
uint64_t helper_rdpkru(CPUX86State *env, uint32_t ecx)
39
{
40
if ((env->cr[4] & CR4_PKE_MASK) == 0) {
41
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/target/i386/tcg/translate.c
44
+++ b/target/i386/tcg/translate.c
45
@@ -XXX,XX +XXX,XX @@ do_gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf, bool jr)
46
if (s->base.tb->flags & HF_RF_MASK) {
47
gen_helper_reset_rf(cpu_env);
48
}
49
- if (s->base.singlestep_enabled) {
50
- gen_helper_debug(cpu_env);
51
- } else if (recheck_tf) {
52
+ if (recheck_tf) {
53
gen_helper_rechecking_single_step(cpu_env);
54
tcg_gen_exit_tb(NULL, 0);
55
} else if (s->flags & HF_TF_MASK) {
56
--
48
--
57
2.25.1
49
2.34.1
58
50
59
51
diff view generated by jsdifflib