1
The following changes since commit 6587b0c1331d427b0939c37e763842550ed581db:
1
Second try's the charm today, right?
2
2
3
Merge remote-tracking branch 'remotes/ericb/tags/pull-nbd-2021-10-15' into staging (2021-10-15 14:16:28 -0700)
3
4
r~
5
6
7
The following changes since commit 00b1faea41d283e931256aa78aa975a369ec3ae6:
8
9
Merge tag 'pull-target-arm-20230123' of https://git.linaro.org/people/pmaydell/qemu-arm into staging (2023-01-23 13:40:28 +0000)
4
10
5
are available in the Git repository at:
11
are available in the Git repository at:
6
12
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20211016
13
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20230123
8
14
9
for you to fetch changes up to 995b87dedc78b0467f5f18bbc3546072ba97516a:
15
for you to fetch changes up to 709bcd7da3f6b4655d910634a0d520fa1439df38:
10
16
11
Revert "cpu: Move cpu_common_props to hw/core/cpu.c" (2021-10-15 16:39:15 -0700)
17
tcg/loongarch64: Reorg goto_tb implementation (2023-01-23 16:00:13 -1000)
12
18
13
----------------------------------------------------------------
19
----------------------------------------------------------------
14
Move gdb singlestep to generic code
20
common-user: Re-enable ppc32 host
15
Fix cpu_common_props
21
tcg: Avoid recursion in tcg_gen_mulu2_i32
22
tcg: Mark tcg helpers noinline to avoid an issue with LTO
23
tcg/arm: Use register pair allocation for qemu_{ld,st}_i64
24
disas: Enable loongarch disassembler, and fixes
25
tcg/loongarch64: Improve move immediate
26
tcg/loongarch64: Improve add immediate
27
tcg/loongarch64: Improve setcond
28
tcg/loongarch64: Implement movcond
29
tcg/loongarch64: Use tcg_pcrel_diff in tcg_out_ldst
30
tcg/loongarch64: Reorg goto_tb implementation
16
31
17
----------------------------------------------------------------
32
----------------------------------------------------------------
18
Richard Henderson (24):
33
Richard Henderson (14):
19
accel/tcg: Handle gdb singlestep in cpu_tb_exec
34
tcg: Avoid recursion in tcg_gen_mulu2_i32
20
target/alpha: Drop checks for singlestep_enabled
35
tcg/arm: Use register pair allocation for qemu_{ld,st}_i64
21
target/avr: Drop checks for singlestep_enabled
36
common-user/host/ppc: Implement safe-syscall.inc.S
22
target/cris: Drop checks for singlestep_enabled
37
linux-user: Implment host/ppc/host-signal.h
23
target/hexagon: Drop checks for singlestep_enabled
38
tcg: Mark tcg helpers noinline to avoid an issue with LTO
24
target/arm: Drop checks for singlestep_enabled
39
target/loongarch: Enable the disassembler for host tcg
25
target/hppa: Drop checks for singlestep_enabled
40
target/loongarch: Disassemble jirl properly
26
target/i386: Check CF_NO_GOTO_TB for dc->jmp_opt
41
target/loongarch: Disassemble pcadd* addresses
27
target/i386: Drop check for singlestep_enabled
42
tcg/loongarch64: Update tcg-insn-defs.c.inc
28
target/m68k: Drop checks for singlestep_enabled
43
tcg/loongarch64: Introduce tcg_out_addi
29
target/microblaze: Check CF_NO_GOTO_TB for DISAS_JUMP
44
tcg/loongarch64: Improve setcond expansion
30
target/microblaze: Drop checks for singlestep_enabled
45
tcg/loongarch64: Implement movcond
31
target/mips: Fix single stepping
46
tcg/loongarch64: Use tcg_pcrel_diff in tcg_out_ldst
32
target/mips: Drop exit checks for singlestep_enabled
47
tcg/loongarch64: Reorg goto_tb implementation
33
target/openrisc: Drop checks for singlestep_enabled
34
target/ppc: Drop exit checks for singlestep_enabled
35
target/riscv: Remove dead code after exception
36
target/riscv: Remove exit_tb and lookup_and_goto_ptr
37
target/rx: Drop checks for singlestep_enabled
38
target/s390x: Drop check for singlestep_enabled
39
target/sh4: Drop check for singlestep_enabled
40
target/tricore: Drop check for singlestep_enabled
41
target/xtensa: Drop check for singlestep_enabled
42
Revert "cpu: Move cpu_common_props to hw/core/cpu.c"
43
48
44
include/hw/core/cpu.h | 1 +
49
Rui Wang (1):
45
target/i386/helper.h | 1 -
50
tcg/loongarch64: Optimize immediate loading
46
target/rx/helper.h | 1 -
47
target/sh4/helper.h | 1 -
48
target/tricore/helper.h | 1 -
49
accel/tcg/cpu-exec.c | 11 ++++
50
cpu.c | 21 ++++++++
51
hw/core/cpu-common.c | 17 +-----
52
target/alpha/translate.c | 13 ++---
53
target/arm/translate-a64.c | 10 +---
54
target/arm/translate.c | 36 +++----------
55
target/avr/translate.c | 19 ++-----
56
target/cris/translate.c | 16 ------
57
target/hexagon/translate.c | 12 +----
58
target/hppa/translate.c | 17 ++----
59
target/i386/tcg/misc_helper.c | 8 ---
60
target/i386/tcg/translate.c | 9 ++--
61
target/m68k/translate.c | 44 ++++-----------
62
target/microblaze/translate.c | 18 ++-----
63
target/mips/tcg/translate.c | 75 ++++++++++++--------------
64
target/openrisc/translate.c | 18 ++-----
65
target/ppc/translate.c | 38 +++----------
66
target/riscv/translate.c | 27 +---------
67
target/rx/op_helper.c | 8 ---
68
target/rx/translate.c | 12 +----
69
target/s390x/tcg/translate.c | 8 +--
70
target/sh4/op_helper.c | 5 --
71
target/sh4/translate.c | 14 ++---
72
target/tricore/op_helper.c | 7 ---
73
target/tricore/translate.c | 14 +----
74
target/xtensa/translate.c | 25 +++------
75
target/riscv/insn_trans/trans_privileged.c.inc | 10 ++--
76
target/riscv/insn_trans/trans_rvi.c.inc | 8 ++-
77
target/riscv/insn_trans/trans_rvv.c.inc | 2 +-
78
34 files changed, 141 insertions(+), 386 deletions(-)
79
51
52
include/exec/helper-proto.h | 32 ++-
53
include/tcg/tcg.h | 7 -
54
linux-user/include/host/ppc/host-signal.h | 39 +++
55
tcg/arm/tcg-target-con-set.h | 7 +-
56
tcg/arm/tcg-target-con-str.h | 2 +
57
tcg/loongarch64/tcg-target-con-set.h | 5 +-
58
tcg/loongarch64/tcg-target-con-str.h | 2 +-
59
tcg/loongarch64/tcg-target.h | 11 +-
60
target/loongarch/insns.decode | 3 +-
61
disas.c | 2 +
62
target/loongarch/disas.c | 39 ++-
63
tcg/tcg-op.c | 4 +-
64
target/loongarch/insn_trans/trans_branch.c.inc | 2 +-
65
tcg/arm/tcg-target.c.inc | 28 +-
66
tcg/loongarch64/tcg-insn-defs.c.inc | 10 +-
67
tcg/loongarch64/tcg-target.c.inc | 364 ++++++++++++++++---------
68
common-user/host/ppc/safe-syscall.inc.S | 107 ++++++++
69
target/loongarch/meson.build | 3 +-
70
18 files changed, 497 insertions(+), 170 deletions(-)
71
create mode 100644 linux-user/include/host/ppc/host-signal.h
72
create mode 100644 common-user/host/ppc/safe-syscall.inc.S
diff view generated by jsdifflib
Deleted patch
1
Currently the change in cpu_tb_exec is masked by the debug exception
2
being raised by the translators. But this allows us to remove that code.
3
1
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
accel/tcg/cpu-exec.c | 11 +++++++++++
7
1 file changed, 11 insertions(+)
8
9
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/accel/tcg/cpu-exec.c
12
+++ b/accel/tcg/cpu-exec.c
13
@@ -XXX,XX +XXX,XX @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
14
cc->set_pc(cpu, last_tb->pc);
15
}
16
}
17
+
18
+ /*
19
+ * If gdb single-step, and we haven't raised another exception,
20
+ * raise a debug exception. Single-step with another exception
21
+ * is handled in cpu_handle_exception.
22
+ */
23
+ if (unlikely(cpu->singlestep_enabled) && cpu->exception_index == -1) {
24
+ cpu->exception_index = EXCP_DEBUG;
25
+ cpu_loop_exit(cpu);
26
+ }
27
+
28
return last_tb;
29
}
30
31
--
32
2.25.1
33
34
diff view generated by jsdifflib
Deleted patch
1
GDB single-stepping is now handled generically.
2
1
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/alpha/translate.c | 13 +++----------
7
1 file changed, 3 insertions(+), 10 deletions(-)
8
9
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/alpha/translate.c
12
+++ b/target/alpha/translate.c
13
@@ -XXX,XX +XXX,XX @@ static void alpha_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
14
tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
15
/* FALLTHRU */
16
case DISAS_PC_UPDATED:
17
- if (!ctx->base.singlestep_enabled) {
18
- tcg_gen_lookup_and_goto_ptr();
19
- break;
20
- }
21
- /* FALLTHRU */
22
+ tcg_gen_lookup_and_goto_ptr();
23
+ break;
24
case DISAS_PC_UPDATED_NOCHAIN:
25
- if (ctx->base.singlestep_enabled) {
26
- gen_excp_1(EXCP_DEBUG, 0);
27
- } else {
28
- tcg_gen_exit_tb(NULL, 0);
29
- }
30
+ tcg_gen_exit_tb(NULL, 0);
31
break;
32
default:
33
g_assert_not_reached();
34
--
35
2.25.1
36
37
diff view generated by jsdifflib
Deleted patch
1
GDB single-stepping is now handled generically.
2
1
3
Tested-by: Michael Rolnik <mrolnik@gmail.com>
4
Reviewed-by: Michael Rolnik <mrolnik@gmail.com>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
target/avr/translate.c | 19 ++++---------------
9
1 file changed, 4 insertions(+), 15 deletions(-)
10
11
diff --git a/target/avr/translate.c b/target/avr/translate.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/avr/translate.c
14
+++ b/target/avr/translate.c
15
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
16
tcg_gen_exit_tb(tb, n);
17
} else {
18
tcg_gen_movi_i32(cpu_pc, dest);
19
- if (ctx->base.singlestep_enabled) {
20
- gen_helper_debug(cpu_env);
21
- } else {
22
- tcg_gen_lookup_and_goto_ptr();
23
- }
24
+ tcg_gen_lookup_and_goto_ptr();
25
}
26
ctx->base.is_jmp = DISAS_NORETURN;
27
}
28
@@ -XXX,XX +XXX,XX @@ static void avr_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
29
tcg_gen_movi_tl(cpu_pc, ctx->npc);
30
/* fall through */
31
case DISAS_LOOKUP:
32
- if (!ctx->base.singlestep_enabled) {
33
- tcg_gen_lookup_and_goto_ptr();
34
- break;
35
- }
36
- /* fall through */
37
+ tcg_gen_lookup_and_goto_ptr();
38
+ break;
39
case DISAS_EXIT:
40
- if (ctx->base.singlestep_enabled) {
41
- gen_helper_debug(cpu_env);
42
- } else {
43
- tcg_gen_exit_tb(NULL, 0);
44
- }
45
+ tcg_gen_exit_tb(NULL, 0);
46
break;
47
default:
48
g_assert_not_reached();
49
--
50
2.25.1
51
52
diff view generated by jsdifflib
Deleted patch
1
GDB single-stepping is now handled generically.
2
1
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
target/cris/translate.c | 16 ----------------
6
1 file changed, 16 deletions(-)
7
8
diff --git a/target/cris/translate.c b/target/cris/translate.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/target/cris/translate.c
11
+++ b/target/cris/translate.c
12
@@ -XXX,XX +XXX,XX @@ static void cris_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
13
}
14
}
15
16
- if (unlikely(dc->base.singlestep_enabled)) {
17
- switch (is_jmp) {
18
- case DISAS_TOO_MANY:
19
- case DISAS_UPDATE_NEXT:
20
- tcg_gen_movi_tl(env_pc, npc);
21
- /* fall through */
22
- case DISAS_JUMP:
23
- case DISAS_UPDATE:
24
- t_gen_raise_exception(EXCP_DEBUG);
25
- return;
26
- default:
27
- break;
28
- }
29
- g_assert_not_reached();
30
- }
31
-
32
switch (is_jmp) {
33
case DISAS_TOO_MANY:
34
gen_goto_tb(dc, 0, npc);
35
--
36
2.25.1
37
38
diff view generated by jsdifflib
Deleted patch
1
GDB single-stepping is now handled generically.
2
1
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/hexagon/translate.c | 12 ++----------
7
1 file changed, 2 insertions(+), 10 deletions(-)
8
9
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/hexagon/translate.c
12
+++ b/target/hexagon/translate.c
13
@@ -XXX,XX +XXX,XX @@ static void gen_end_tb(DisasContext *ctx)
14
{
15
gen_exec_counters(ctx);
16
tcg_gen_mov_tl(hex_gpr[HEX_REG_PC], hex_next_PC);
17
- if (ctx->base.singlestep_enabled) {
18
- gen_exception_raw(EXCP_DEBUG);
19
- } else {
20
- tcg_gen_exit_tb(NULL, 0);
21
- }
22
+ tcg_gen_exit_tb(NULL, 0);
23
ctx->base.is_jmp = DISAS_NORETURN;
24
}
25
26
@@ -XXX,XX +XXX,XX @@ static void hexagon_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
27
case DISAS_TOO_MANY:
28
gen_exec_counters(ctx);
29
tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], ctx->base.pc_next);
30
- if (ctx->base.singlestep_enabled) {
31
- gen_exception_raw(EXCP_DEBUG);
32
- } else {
33
- tcg_gen_exit_tb(NULL, 0);
34
- }
35
+ tcg_gen_exit_tb(NULL, 0);
36
break;
37
case DISAS_NORETURN:
38
break;
39
--
40
2.25.1
41
42
diff view generated by jsdifflib
1
GDB single-stepping is now handled generically.
1
We have a test for one of TCG_TARGET_HAS_mulu2_i32 or
2
TCG_TARGET_HAS_muluh_i32 being defined, but the test
3
became non-functional when we changed to always define
4
all of these macros.
2
5
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Replace this with a build-time test in tcg_gen_mulu2_i32.
7
8
Fixes: 25c4d9cc845 ("tcg: Always define all of the TCGOpcode enum members.")
9
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1435
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
11
---
6
target/tricore/helper.h | 1 -
12
include/tcg/tcg.h | 7 -------
7
target/tricore/op_helper.c | 7 -------
13
tcg/tcg-op.c | 4 +++-
8
target/tricore/translate.c | 14 +-------------
14
2 files changed, 3 insertions(+), 8 deletions(-)
9
3 files changed, 1 insertion(+), 21 deletions(-)
10
15
11
diff --git a/target/tricore/helper.h b/target/tricore/helper.h
16
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
12
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
13
--- a/target/tricore/helper.h
18
--- a/include/tcg/tcg.h
14
+++ b/target/tricore/helper.h
19
+++ b/include/tcg/tcg.h
15
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_2(psw_write, void, env, i32)
20
@@ -XXX,XX +XXX,XX @@ typedef uint64_t TCGRegSet;
16
DEF_HELPER_1(psw_read, i32, env)
21
#define TCG_TARGET_HAS_rem_i64 0
17
/* Exceptions */
22
#endif
18
DEF_HELPER_3(raise_exception_sync, noreturn, env, i32, i32)
23
19
-DEF_HELPER_2(qemu_excp, noreturn, env, i32)
24
-/* For 32-bit targets, some sort of unsigned widening multiply is required. */
20
diff --git a/target/tricore/op_helper.c b/target/tricore/op_helper.c
25
-#if TCG_TARGET_REG_BITS == 32 \
26
- && !(defined(TCG_TARGET_HAS_mulu2_i32) \
27
- || defined(TCG_TARGET_HAS_muluh_i32))
28
-# error "Missing unsigned widening multiply"
29
-#endif
30
-
31
#if !defined(TCG_TARGET_HAS_v64) \
32
&& !defined(TCG_TARGET_HAS_v128) \
33
&& !defined(TCG_TARGET_HAS_v256)
34
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
21
index XXXXXXX..XXXXXXX 100644
35
index XXXXXXX..XXXXXXX 100644
22
--- a/target/tricore/op_helper.c
36
--- a/tcg/tcg-op.c
23
+++ b/target/tricore/op_helper.c
37
+++ b/tcg/tcg-op.c
24
@@ -XXX,XX +XXX,XX @@ static void raise_exception_sync_helper(CPUTriCoreState *env, uint32_t class,
38
@@ -XXX,XX +XXX,XX @@ void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
25
raise_exception_sync_internal(env, class, tin, pc, 0);
39
tcg_gen_op3_i32(INDEX_op_muluh_i32, rh, arg1, arg2);
26
}
40
tcg_gen_mov_i32(rl, t);
27
41
tcg_temp_free_i32(t);
28
-void helper_qemu_excp(CPUTriCoreState *env, uint32_t excp)
42
- } else {
29
-{
43
+ } else if (TCG_TARGET_REG_BITS == 64) {
30
- CPUState *cs = env_cpu(env);
44
TCGv_i64 t0 = tcg_temp_new_i64();
31
- cs->exception_index = excp;
45
TCGv_i64 t1 = tcg_temp_new_i64();
32
- cpu_loop_exit(cs);
46
tcg_gen_extu_i32_i64(t0, arg1);
33
-}
47
@@ -XXX,XX +XXX,XX @@ void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
34
-
48
tcg_gen_extr_i64_i32(rl, rh, t0);
35
/* Addressing mode helper */
49
tcg_temp_free_i64(t0);
36
50
tcg_temp_free_i64(t1);
37
static uint16_t reverse16(uint16_t val)
51
+ } else {
38
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
52
+ qemu_build_not_reached();
39
index XXXXXXX..XXXXXXX 100644
40
--- a/target/tricore/translate.c
41
+++ b/target/tricore/translate.c
42
@@ -XXX,XX +XXX,XX @@ static inline void gen_save_pc(target_ulong pc)
43
tcg_gen_movi_tl(cpu_PC, pc);
44
}
45
46
-static void generate_qemu_excp(DisasContext *ctx, int excp)
47
-{
48
- TCGv_i32 tmp = tcg_const_i32(excp);
49
- gen_helper_qemu_excp(cpu_env, tmp);
50
- ctx->base.is_jmp = DISAS_NORETURN;
51
- tcg_temp_free(tmp);
52
-}
53
-
54
static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
55
{
56
if (translator_use_goto_tb(&ctx->base, dest)) {
57
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
58
tcg_gen_exit_tb(ctx->base.tb, n);
59
} else {
60
gen_save_pc(dest);
61
- if (ctx->base.singlestep_enabled) {
62
- generate_qemu_excp(ctx, EXCP_DEBUG);
63
- } else {
64
- tcg_gen_lookup_and_goto_ptr();
65
- }
66
+ tcg_gen_lookup_and_goto_ptr();
67
}
53
}
68
}
54
}
69
55
70
--
56
--
71
2.25.1
57
2.34.1
72
73
diff view generated by jsdifflib
1
GDB single-stepping is now handled generically.
1
Although we still can't use ldrd and strd for all operations,
2
increase the chances by getting the register allocation correct.
2
3
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
---
5
target/xtensa/translate.c | 25 ++++++++-----------------
6
tcg/arm/tcg-target-con-set.h | 7 ++++---
6
1 file changed, 8 insertions(+), 17 deletions(-)
7
tcg/arm/tcg-target-con-str.h | 2 ++
8
tcg/arm/tcg-target.c.inc | 28 ++++++++++++++++++----------
9
3 files changed, 24 insertions(+), 13 deletions(-)
7
10
8
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
11
diff --git a/tcg/arm/tcg-target-con-set.h b/tcg/arm/tcg-target-con-set.h
9
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
10
--- a/target/xtensa/translate.c
13
--- a/tcg/arm/tcg-target-con-set.h
11
+++ b/target/xtensa/translate.c
14
+++ b/tcg/arm/tcg-target-con-set.h
12
@@ -XXX,XX +XXX,XX @@ static void gen_jump_slot(DisasContext *dc, TCGv dest, int slot)
15
@@ -XXX,XX +XXX,XX @@ C_O0_I2(r, rIN)
13
if (dc->icount) {
16
C_O0_I2(s, s)
14
tcg_gen_mov_i32(cpu_SR[ICOUNT], dc->next_icount);
17
C_O0_I2(w, r)
15
}
18
C_O0_I3(s, s, s)
16
- if (dc->base.singlestep_enabled) {
19
+C_O0_I3(S, p, s)
17
- gen_exception(dc, EXCP_DEBUG);
20
C_O0_I4(r, r, rI, rI)
18
+ if (dc->op_flags & XTENSA_OP_POSTPROCESS) {
21
-C_O0_I4(s, s, s, s)
19
+ slot = gen_postprocess(dc, slot);
22
+C_O0_I4(S, p, s, s)
20
+ }
23
C_O1_I1(r, l)
21
+ if (slot >= 0) {
24
C_O1_I1(r, r)
22
+ tcg_gen_goto_tb(slot);
25
C_O1_I1(w, r)
23
+ tcg_gen_exit_tb(dc->base.tb, slot);
26
@@ -XXX,XX +XXX,XX @@ C_O1_I2(w, w, wZ)
24
} else {
27
C_O1_I3(w, w, w, w)
25
- if (dc->op_flags & XTENSA_OP_POSTPROCESS) {
28
C_O1_I4(r, r, r, rI, rI)
26
- slot = gen_postprocess(dc, slot);
29
C_O1_I4(r, r, rIN, rIK, 0)
27
- }
30
-C_O2_I1(r, r, l)
28
- if (slot >= 0) {
31
-C_O2_I2(r, r, l, l)
29
- tcg_gen_goto_tb(slot);
32
+C_O2_I1(e, p, l)
30
- tcg_gen_exit_tb(dc->base.tb, slot);
33
+C_O2_I2(e, p, l, l)
31
- } else {
34
C_O2_I2(r, r, r, r)
32
- tcg_gen_exit_tb(NULL, 0);
35
C_O2_I4(r, r, r, r, rIN, rIK)
33
- }
36
C_O2_I4(r, r, rI, rI, rIN, rIK)
34
+ tcg_gen_exit_tb(NULL, 0);
37
diff --git a/tcg/arm/tcg-target-con-str.h b/tcg/arm/tcg-target-con-str.h
35
}
38
index XXXXXXX..XXXXXXX 100644
36
dc->base.is_jmp = DISAS_NORETURN;
39
--- a/tcg/arm/tcg-target-con-str.h
37
}
40
+++ b/tcg/arm/tcg-target-con-str.h
38
@@ -XXX,XX +XXX,XX @@ static void xtensa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
41
@@ -XXX,XX +XXX,XX @@
39
case DISAS_NORETURN:
42
* Define constraint letters for register sets:
43
* REGS(letter, register_mask)
44
*/
45
+REGS('e', ALL_GENERAL_REGS & 0x5555) /* even regs */
46
REGS('r', ALL_GENERAL_REGS)
47
REGS('l', ALL_QLOAD_REGS)
48
REGS('s', ALL_QSTORE_REGS)
49
+REGS('S', ALL_QSTORE_REGS & 0x5555) /* even qstore */
50
REGS('w', ALL_VECTOR_REGS)
51
52
/*
53
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
54
index XXXXXXX..XXXXXXX 100644
55
--- a/tcg/arm/tcg-target.c.inc
56
+++ b/tcg/arm/tcg-target.c.inc
57
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
58
tcg_out_ld32_r(s, COND_AL, datalo, addrlo, addend);
40
break;
59
break;
41
case DISAS_TOO_MANY:
60
case MO_UQ:
42
- if (dc->base.singlestep_enabled) {
61
+ /* We used pair allocation for datalo, so already should be aligned. */
43
- tcg_gen_movi_i32(cpu_pc, dc->pc);
62
+ tcg_debug_assert((datalo & 1) == 0);
44
- gen_exception(dc, EXCP_DEBUG);
63
+ tcg_debug_assert(datahi == datalo + 1);
45
- } else {
64
/* LDRD requires alignment; double-check that. */
46
- gen_jumpi(dc, dc->pc, 0);
65
- if (get_alignment_bits(opc) >= MO_64
47
- }
66
- && (datalo & 1) == 0 && datahi == datalo + 1) {
48
+ gen_jumpi(dc, dc->pc, 0);
67
+ if (get_alignment_bits(opc) >= MO_64) {
68
/*
69
* Rm (the second address op) must not overlap Rt or Rt + 1.
70
* Since datalo is aligned, we can simplify the test via alignment.
71
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo,
72
tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
49
break;
73
break;
50
default:
74
case MO_UQ:
51
g_assert_not_reached();
75
+ /* We used pair allocation for datalo, so already should be aligned. */
76
+ tcg_debug_assert((datalo & 1) == 0);
77
+ tcg_debug_assert(datahi == datalo + 1);
78
/* LDRD requires alignment; double-check that. */
79
- if (get_alignment_bits(opc) >= MO_64
80
- && (datalo & 1) == 0 && datahi == datalo + 1) {
81
+ if (get_alignment_bits(opc) >= MO_64) {
82
tcg_out_ldrd_8(s, COND_AL, datalo, addrlo, 0);
83
} else if (datalo == addrlo) {
84
tcg_out_ld32_12(s, COND_AL, datahi, addrlo, 4);
85
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_index(TCGContext *s, ARMCond cond, MemOp opc,
86
tcg_out_st32_r(s, cond, datalo, addrlo, addend);
87
break;
88
case MO_64:
89
+ /* We used pair allocation for datalo, so already should be aligned. */
90
+ tcg_debug_assert((datalo & 1) == 0);
91
+ tcg_debug_assert(datahi == datalo + 1);
92
/* STRD requires alignment; double-check that. */
93
- if (get_alignment_bits(opc) >= MO_64
94
- && (datalo & 1) == 0 && datahi == datalo + 1) {
95
+ if (get_alignment_bits(opc) >= MO_64) {
96
tcg_out_strd_r(s, cond, datalo, addrlo, addend);
97
} else if (scratch_addend) {
98
tcg_out_st32_rwb(s, cond, datalo, addend, addrlo);
99
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
100
tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0);
101
break;
102
case MO_64:
103
+ /* We used pair allocation for datalo, so already should be aligned. */
104
+ tcg_debug_assert((datalo & 1) == 0);
105
+ tcg_debug_assert(datahi == datalo + 1);
106
/* STRD requires alignment; double-check that. */
107
- if (get_alignment_bits(opc) >= MO_64
108
- && (datalo & 1) == 0 && datahi == datalo + 1) {
109
+ if (get_alignment_bits(opc) >= MO_64) {
110
tcg_out_strd_8(s, COND_AL, datalo, addrlo, 0);
111
} else {
112
tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0);
113
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
114
case INDEX_op_qemu_ld_i32:
115
return TARGET_LONG_BITS == 32 ? C_O1_I1(r, l) : C_O1_I2(r, l, l);
116
case INDEX_op_qemu_ld_i64:
117
- return TARGET_LONG_BITS == 32 ? C_O2_I1(r, r, l) : C_O2_I2(r, r, l, l);
118
+ return TARGET_LONG_BITS == 32 ? C_O2_I1(e, p, l) : C_O2_I2(e, p, l, l);
119
case INDEX_op_qemu_st_i32:
120
return TARGET_LONG_BITS == 32 ? C_O0_I2(s, s) : C_O0_I3(s, s, s);
121
case INDEX_op_qemu_st_i64:
122
- return TARGET_LONG_BITS == 32 ? C_O0_I3(s, s, s) : C_O0_I4(s, s, s, s);
123
+ return TARGET_LONG_BITS == 32 ? C_O0_I3(S, p, s) : C_O0_I4(S, p, s, s);
124
125
case INDEX_op_st_vec:
126
return C_O0_I2(w, r);
52
--
127
--
53
2.25.1
128
2.34.1
54
55
diff view generated by jsdifflib
1
GDB single-stepping is now handled generically.
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Reviewed-by: Daniel Henrique Barboza <danielhb413@gmail.com>
3
Message-Id: <20220729172141.1789105-2-richard.henderson@linaro.org>
4
---
5
common-user/host/ppc/safe-syscall.inc.S | 107 ++++++++++++++++++++++++
6
1 file changed, 107 insertions(+)
7
create mode 100644 common-user/host/ppc/safe-syscall.inc.S
2
8
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
diff --git a/common-user/host/ppc/safe-syscall.inc.S b/common-user/host/ppc/safe-syscall.inc.S
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
new file mode 100644
5
---
11
index XXXXXXX..XXXXXXX
6
target/sh4/helper.h | 1 -
12
--- /dev/null
7
target/sh4/op_helper.c | 5 -----
13
+++ b/common-user/host/ppc/safe-syscall.inc.S
8
target/sh4/translate.c | 14 +++-----------
14
@@ -XXX,XX +XXX,XX @@
9
3 files changed, 3 insertions(+), 17 deletions(-)
15
+/*
10
16
+ * safe-syscall.inc.S : host-specific assembly fragment
11
diff --git a/target/sh4/helper.h b/target/sh4/helper.h
17
+ * to handle signals occurring at the same time as system calls.
12
index XXXXXXX..XXXXXXX 100644
18
+ * This is intended to be included by common-user/safe-syscall.S
13
--- a/target/sh4/helper.h
19
+ *
14
+++ b/target/sh4/helper.h
20
+ * Copyright (C) 2022 Linaro, Ltd.
15
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_1(raise_illegal_instruction, noreturn, env)
21
+ *
16
DEF_HELPER_1(raise_slot_illegal_instruction, noreturn, env)
22
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
17
DEF_HELPER_1(raise_fpu_disable, noreturn, env)
23
+ * See the COPYING file in the top-level directory.
18
DEF_HELPER_1(raise_slot_fpu_disable, noreturn, env)
24
+ */
19
-DEF_HELPER_1(debug, noreturn, env)
25
+
20
DEF_HELPER_1(sleep, noreturn, env)
26
+/*
21
DEF_HELPER_2(trapa, noreturn, env, i32)
27
+ * Standardize on the _CALL_FOO symbols used by GCC:
22
DEF_HELPER_1(exclusive, noreturn, env)
28
+ * Apple XCode does not define _CALL_DARWIN.
23
diff --git a/target/sh4/op_helper.c b/target/sh4/op_helper.c
29
+ * Clang defines _CALL_ELF (64-bit) but not _CALL_SYSV (32-bit).
24
index XXXXXXX..XXXXXXX 100644
30
+ */
25
--- a/target/sh4/op_helper.c
31
+#if !defined(_CALL_SYSV) && \
26
+++ b/target/sh4/op_helper.c
32
+ !defined(_CALL_DARWIN) && \
27
@@ -XXX,XX +XXX,XX @@ void helper_raise_slot_fpu_disable(CPUSH4State *env)
33
+ !defined(_CALL_AIX) && \
28
raise_exception(env, 0x820, 0);
34
+ !defined(_CALL_ELF)
29
}
35
+# if defined(__APPLE__)
30
36
+# define _CALL_DARWIN
31
-void helper_debug(CPUSH4State *env)
37
+# elif defined(__ELF__) && TCG_TARGET_REG_BITS == 32
32
-{
38
+# define _CALL_SYSV
33
- raise_exception(env, EXCP_DEBUG, 0);
39
+# else
34
-}
40
+# error "Unknown ABI"
35
-
41
+# endif
36
void helper_sleep(CPUSH4State *env)
42
+#endif
37
{
43
+
38
CPUState *cs = env_cpu(env);
44
+#ifndef _CALL_SYSV
39
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
45
+# error "Unsupported ABI"
40
index XXXXXXX..XXXXXXX 100644
46
+#endif
41
--- a/target/sh4/translate.c
47
+
42
+++ b/target/sh4/translate.c
48
+
43
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
49
+ .global safe_syscall_base
44
tcg_gen_exit_tb(ctx->base.tb, n);
50
+ .global safe_syscall_start
45
} else {
51
+ .global safe_syscall_end
46
tcg_gen_movi_i32(cpu_pc, dest);
52
+ .type safe_syscall_base, @function
47
- if (ctx->base.singlestep_enabled) {
53
+
48
- gen_helper_debug(cpu_env);
54
+ .text
49
- } else if (use_exit_tb(ctx)) {
55
+
50
+ if (use_exit_tb(ctx)) {
56
+ /*
51
tcg_gen_exit_tb(NULL, 0);
57
+ * This is the entry point for making a system call. The calling
52
} else {
58
+ * convention here is that of a C varargs function with the
53
tcg_gen_lookup_and_goto_ptr();
59
+ * first argument an 'int *' to the signal_pending flag, the
54
@@ -XXX,XX +XXX,XX @@ static void gen_jump(DisasContext * ctx)
60
+ * second one the system call number (as a 'long'), and all further
55
     delayed jump as immediate jump are conditinal jumps */
61
+ * arguments being syscall arguments (also 'long').
56
    tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
62
+ */
57
tcg_gen_discard_i32(cpu_delayed_pc);
63
+safe_syscall_base:
58
- if (ctx->base.singlestep_enabled) {
64
+ .cfi_startproc
59
- gen_helper_debug(cpu_env);
65
+ stwu 1, -8(1)
60
- } else if (use_exit_tb(ctx)) {
66
+ .cfi_def_cfa_offset 8
61
+ if (use_exit_tb(ctx)) {
67
+ stw 30, 4(1)
62
tcg_gen_exit_tb(NULL, 0);
68
+ .cfi_offset 30, -4
63
} else {
69
+
64
tcg_gen_lookup_and_goto_ptr();
70
+ /*
65
@@ -XXX,XX +XXX,XX @@ static void sh4_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
71
+ * We enter with r3 == &signal_pending
66
switch (ctx->base.is_jmp) {
72
+ * r4 == syscall number
67
case DISAS_STOP:
73
+ * r5 ... r10 == syscall arguments
68
gen_save_cpu_state(ctx, true);
74
+ * and return the result in r3
69
- if (ctx->base.singlestep_enabled) {
75
+ * and the syscall instruction needs
70
- gen_helper_debug(cpu_env);
76
+ * r0 == syscall number
71
- } else {
77
+ * r3 ... r8 == syscall arguments
72
- tcg_gen_exit_tb(NULL, 0);
78
+ * and returns the result in r3
73
- }
79
+ * Shuffle everything around appropriately.
74
+ tcg_gen_exit_tb(NULL, 0);
80
+ */
75
break;
81
+ mr 30, 3 /* signal_pending */
76
case DISAS_NEXT:
82
+ mr 0, 4 /* syscall number */
77
case DISAS_TOO_MANY:
83
+ mr 3, 5 /* syscall arguments */
84
+ mr 4, 6
85
+ mr 5, 7
86
+ mr 6, 8
87
+ mr 7, 9
88
+ mr 8, 10
89
+
90
+ /*
91
+ * This next sequence of code works in conjunction with the
92
+ * rewind_if_safe_syscall_function(). If a signal is taken
93
+ * and the interrupted PC is anywhere between 'safe_syscall_start'
94
+ * and 'safe_syscall_end' then we rewind it to 'safe_syscall_start'.
95
+ * The code sequence must therefore be able to cope with this, and
96
+ * the syscall instruction must be the final one in the sequence.
97
+ */
98
+safe_syscall_start:
99
+ /* if signal_pending is non-zero, don't do the call */
100
+ lwz 12, 0(30)
101
+ cmpwi 0, 12, 0
102
+ bne- 2f
103
+ sc
104
+safe_syscall_end:
105
+ /* code path when we did execute the syscall */
106
+ lwz 30, 4(1) /* restore r30 */
107
+ addi 1, 1, 8 /* restore stack */
108
+ .cfi_restore 30
109
+ .cfi_def_cfa_offset 0
110
+ bnslr+ /* return on success */
111
+ b safe_syscall_set_errno_tail
112
+
113
+ /* code path when we didn't execute the syscall */
114
+2: lwz 30, 4(1)
115
+ addi 1, 1, 8
116
+ addi 3, 0, QEMU_ERESTARTSYS
117
+ b safe_syscall_set_errno_tail
118
+
119
+ .cfi_endproc
120
+
121
+ .size safe_syscall_base, .-safe_syscall_base
78
--
122
--
79
2.25.1
123
2.34.1
80
81
diff view generated by jsdifflib
1
GDB single-stepping is now handled generically.
1
This commit re-enables ppc32 as a linux-user host,
2
as existance of the directory is noted by configure.
2
3
4
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1097
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Daniel Henrique Barboza <danielhb413@gmail.com>
7
Message-Id: <20220729172141.1789105-3-richard.henderson@linaro.org>
4
---
8
---
5
target/s390x/tcg/translate.c | 8 ++------
9
linux-user/include/host/ppc/host-signal.h | 39 +++++++++++++++++++++++
6
1 file changed, 2 insertions(+), 6 deletions(-)
10
1 file changed, 39 insertions(+)
11
create mode 100644 linux-user/include/host/ppc/host-signal.h
7
12
8
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
13
diff --git a/linux-user/include/host/ppc/host-signal.h b/linux-user/include/host/ppc/host-signal.h
9
index XXXXXXX..XXXXXXX 100644
14
new file mode 100644
10
--- a/target/s390x/tcg/translate.c
15
index XXXXXXX..XXXXXXX
11
+++ b/target/s390x/tcg/translate.c
16
--- /dev/null
12
@@ -XXX,XX +XXX,XX @@ struct DisasContext {
17
+++ b/linux-user/include/host/ppc/host-signal.h
13
uint64_t pc_tmp;
18
@@ -XXX,XX +XXX,XX @@
14
uint32_t ilen;
19
+/*
15
enum cc_op cc_op;
20
+ * host-signal.h: signal info dependent on the host architecture
16
- bool do_debug;
21
+ *
17
};
22
+ * Copyright (c) 2022 Linaro Ltd.
18
23
+ *
19
/* Information carried about a condition to be evaluated. */
24
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
20
@@ -XXX,XX +XXX,XX @@ static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
25
+ * See the COPYING file in the top-level directory.
21
26
+ */
22
dc->cc_op = CC_OP_DYNAMIC;
27
+
23
dc->ex_value = dc->base.tb->cs_base;
28
+#ifndef PPC_HOST_SIGNAL_H
24
- dc->do_debug = dc->base.singlestep_enabled;
29
+#define PPC_HOST_SIGNAL_H
25
}
30
+
26
31
+#include <asm/ptrace.h>
27
static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs)
32
+
28
@@ -XXX,XX +XXX,XX @@ static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
33
+/* The third argument to a SA_SIGINFO handler is ucontext_t. */
29
/* FALLTHRU */
34
+typedef ucontext_t host_sigcontext;
30
case DISAS_PC_CC_UPDATED:
35
+
31
/* Exit the TB, either by raising a debug exception or by return. */
36
+static inline uintptr_t host_signal_pc(host_sigcontext *uc)
32
- if (dc->do_debug) {
37
+{
33
- gen_exception(EXCP_DEBUG);
38
+ return uc->uc_mcontext.regs->nip;
34
- } else if ((dc->base.tb->flags & FLAG_MASK_PER) ||
39
+}
35
- dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) {
40
+
36
+ if ((dc->base.tb->flags & FLAG_MASK_PER) ||
41
+static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
37
+ dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) {
42
+{
38
tcg_gen_exit_tb(NULL, 0);
43
+ uc->uc_mcontext.regs->nip = pc;
39
} else {
44
+}
40
tcg_gen_lookup_and_goto_ptr();
45
+
46
+static inline void *host_signal_mask(host_sigcontext *uc)
47
+{
48
+ return &uc->uc_sigmask;
49
+}
50
+
51
+static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
52
+{
53
+ return uc->uc_mcontext.regs->trap != 0x400
54
+ && (uc->uc_mcontext.regs->dsisr & 0x02000000);
55
+}
56
+
57
+#endif
41
--
58
--
42
2.25.1
59
2.34.1
43
44
diff view generated by jsdifflib
1
GDB single-stepping is now handled generically.
1
Marking helpers __attribute__((noinline)) prevents an issue
2
Reuse gen_debug_exception to handle architectural debug exceptions.
2
with GCC's ipa-split pass under --enable-lto.
3
3
4
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1454
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Tested-by: Idan Horowitz <idan.horowitz@gmail.com>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
8
---
6
target/ppc/translate.c | 38 ++++++++------------------------------
9
include/exec/helper-proto.h | 32 ++++++++++++++++++++++++--------
7
1 file changed, 8 insertions(+), 30 deletions(-)
10
1 file changed, 24 insertions(+), 8 deletions(-)
8
11
9
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
12
diff --git a/include/exec/helper-proto.h b/include/exec/helper-proto.h
10
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
11
--- a/target/ppc/translate.c
14
--- a/include/exec/helper-proto.h
12
+++ b/target/ppc/translate.c
15
+++ b/include/exec/helper-proto.h
13
@@ -XXX,XX +XXX,XX @@
16
@@ -XXX,XX +XXX,XX @@
14
17
15
#define CPU_SINGLE_STEP 0x1
18
#include "exec/helper-head.h"
16
#define CPU_BRANCH_STEP 0x2
19
17
-#define GDBSTUB_SINGLE_STEP 0x4
20
+/*
18
21
+ * Work around an issue with --enable-lto, in which GCC's ipa-split pass
19
/* Include definitions for instructions classes and implementations flags */
22
+ * decides to split out the noreturn code paths that raise an exception,
20
/* #define PPC_DEBUG_DISAS */
23
+ * taking the __builtin_return_address() along into the new function,
21
@@ -XXX,XX +XXX,XX @@ static uint32_t gen_prep_dbgex(DisasContext *ctx)
24
+ * where it no longer computes a value that returns to TCG generated code.
22
25
+ * Despite the name, the noinline attribute affects splitter, so this
23
static void gen_debug_exception(DisasContext *ctx)
26
+ * prevents the optimization in question. Given that helpers should not
24
{
27
+ * otherwise be called directly, this should have any other visible effect.
25
- gen_helper_raise_exception(cpu_env, tcg_constant_i32(EXCP_DEBUG));
28
+ *
26
+ gen_helper_raise_exception(cpu_env, tcg_constant_i32(gen_prep_dbgex(ctx)));
29
+ * See https://gitlab.com/qemu-project/qemu/-/issues/1454
27
ctx->base.is_jmp = DISAS_NORETURN;
30
+ */
28
}
31
+#define DEF_HELPER_ATTR __attribute__((noinline))
29
32
+
30
@@ -XXX,XX +XXX,XX @@ static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
33
#define DEF_HELPER_FLAGS_0(name, flags, ret) \
31
34
-dh_ctype(ret) HELPER(name) (void);
32
static void gen_lookup_and_goto_ptr(DisasContext *ctx)
35
+dh_ctype(ret) HELPER(name) (void) DEF_HELPER_ATTR;
33
{
36
34
- int sse = ctx->singlestep_enabled;
37
#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
35
- if (unlikely(sse)) {
38
-dh_ctype(ret) HELPER(name) (dh_ctype(t1));
36
- if (sse & GDBSTUB_SINGLE_STEP) {
39
+dh_ctype(ret) HELPER(name) (dh_ctype(t1)) DEF_HELPER_ATTR;
37
- gen_debug_exception(ctx);
40
38
- } else if (sse & (CPU_SINGLE_STEP | CPU_BRANCH_STEP)) {
41
#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
39
- gen_helper_raise_exception(cpu_env, tcg_constant_i32(gen_prep_dbgex(ctx)));
42
-dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2));
40
- } else {
43
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2)) DEF_HELPER_ATTR;
41
- tcg_gen_exit_tb(NULL, 0);
44
42
- }
45
#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
43
+ if (unlikely(ctx->singlestep_enabled)) {
46
-dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3));
44
+ gen_debug_exception(ctx);
47
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), \
45
} else {
48
+ dh_ctype(t3)) DEF_HELPER_ATTR;
46
tcg_gen_lookup_and_goto_ptr();
49
47
}
50
#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
48
@@ -XXX,XX +XXX,XX @@ static void ppc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
51
dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
49
ctx->singlestep_enabled = 0;
52
- dh_ctype(t4));
50
if ((hflags >> HFLAGS_SE) & 1) {
53
+ dh_ctype(t4)) DEF_HELPER_ATTR;
51
ctx->singlestep_enabled |= CPU_SINGLE_STEP;
54
52
+ ctx->base.max_insns = 1;
55
#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
53
}
56
dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
54
if ((hflags >> HFLAGS_BE) & 1) {
57
- dh_ctype(t4), dh_ctype(t5));
55
ctx->singlestep_enabled |= CPU_BRANCH_STEP;
58
+ dh_ctype(t4), dh_ctype(t5)) DEF_HELPER_ATTR;
56
}
59
57
- if (unlikely(ctx->base.singlestep_enabled)) {
60
#define DEF_HELPER_FLAGS_6(name, flags, ret, t1, t2, t3, t4, t5, t6) \
58
- ctx->singlestep_enabled |= GDBSTUB_SINGLE_STEP;
61
dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
59
- }
62
- dh_ctype(t4), dh_ctype(t5), dh_ctype(t6));
60
-
63
+ dh_ctype(t4), dh_ctype(t5), \
61
- if (ctx->singlestep_enabled & (CPU_SINGLE_STEP | GDBSTUB_SINGLE_STEP)) {
64
+ dh_ctype(t6)) DEF_HELPER_ATTR;
62
- ctx->base.max_insns = 1;
65
63
- }
66
#define DEF_HELPER_FLAGS_7(name, flags, ret, t1, t2, t3, t4, t5, t6, t7) \
64
}
67
dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
65
68
dh_ctype(t4), dh_ctype(t5), dh_ctype(t6), \
66
static void ppc_tr_tb_start(DisasContextBase *db, CPUState *cs)
69
- dh_ctype(t7));
67
@@ -XXX,XX +XXX,XX @@ static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
70
+ dh_ctype(t7)) DEF_HELPER_ATTR;
68
DisasContext *ctx = container_of(dcbase, DisasContext, base);
71
69
DisasJumpType is_jmp = ctx->base.is_jmp;
72
#define IN_HELPER_PROTO
70
target_ulong nip = ctx->base.pc_next;
73
71
- int sse;
74
@@ -XXX,XX +XXX,XX @@ dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
72
75
#undef DEF_HELPER_FLAGS_5
73
if (is_jmp == DISAS_NORETURN) {
76
#undef DEF_HELPER_FLAGS_6
74
/* We have already exited the TB. */
77
#undef DEF_HELPER_FLAGS_7
75
@@ -XXX,XX +XXX,XX @@ static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
78
+#undef DEF_HELPER_ATTR
76
}
79
77
80
#endif /* HELPER_PROTO_H */
78
/* Honor single stepping. */
79
- sse = ctx->singlestep_enabled & (CPU_SINGLE_STEP | GDBSTUB_SINGLE_STEP);
80
- if (unlikely(sse)) {
81
+ if (unlikely(ctx->singlestep_enabled & CPU_SINGLE_STEP)
82
+ && (nip <= 0x100 || nip > 0xf00)) {
83
switch (is_jmp) {
84
case DISAS_TOO_MANY:
85
case DISAS_EXIT_UPDATE:
86
@@ -XXX,XX +XXX,XX @@ static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
87
g_assert_not_reached();
88
}
89
90
- if (sse & GDBSTUB_SINGLE_STEP) {
91
- gen_debug_exception(ctx);
92
- return;
93
- }
94
- /* else CPU_SINGLE_STEP... */
95
- if (nip <= 0x100 || nip > 0xf00) {
96
- gen_helper_raise_exception(cpu_env, tcg_constant_i32(gen_prep_dbgex(ctx)));
97
- return;
98
- }
99
+ gen_debug_exception(ctx);
100
+ return;
101
}
102
103
switch (is_jmp) {
104
--
81
--
105
2.25.1
82
2.34.1
106
83
107
84
diff view generated by jsdifflib
1
GDB single-stepping is now handled generically.
1
Reuse the decodetree based disassembler from
2
target/loongarch/ for tcg/loongarch64/.
2
3
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
The generation of decode-insns.c.inc into ./libcommon.fa.p/ could
5
eventually result in conflict, if any other host requires the same
6
trick, but this is good enough for now.
7
8
Reviewed-by: WANG Xuerui <git@xen0n.name>
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
11
---
6
target/mips/tcg/translate.c | 50 +++++++++++++------------------------
12
disas.c | 2 ++
7
1 file changed, 18 insertions(+), 32 deletions(-)
13
target/loongarch/meson.build | 3 ++-
14
2 files changed, 4 insertions(+), 1 deletion(-)
8
15
9
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
16
diff --git a/disas.c b/disas.c
10
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
11
--- a/target/mips/tcg/translate.c
18
--- a/disas.c
12
+++ b/target/mips/tcg/translate.c
19
+++ b/disas.c
13
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
20
@@ -XXX,XX +XXX,XX @@ static void initialize_debug_host(CPUDebug *s)
14
tcg_gen_exit_tb(ctx->base.tb, n);
21
s->info.cap_insn_split = 6;
15
} else {
22
#elif defined(__hppa__)
16
gen_save_pc(dest);
23
s->info.print_insn = print_insn_hppa;
17
- if (ctx->base.singlestep_enabled) {
24
+#elif defined(__loongarch__)
18
- save_cpu_state(ctx, 0);
25
+ s->info.print_insn = print_insn_loongarch;
19
- gen_helper_raise_exception_debug(cpu_env);
26
#endif
20
- } else {
21
- tcg_gen_lookup_and_goto_ptr();
22
- }
23
+ tcg_gen_lookup_and_goto_ptr();
24
}
25
}
27
}
26
28
27
@@ -XXX,XX +XXX,XX @@ static void gen_branch(DisasContext *ctx, int insn_bytes)
29
diff --git a/target/loongarch/meson.build b/target/loongarch/meson.build
28
} else {
30
index XXXXXXX..XXXXXXX 100644
29
tcg_gen_mov_tl(cpu_PC, btarget);
31
--- a/target/loongarch/meson.build
30
}
32
+++ b/target/loongarch/meson.build
31
- if (ctx->base.singlestep_enabled) {
33
@@ -XXX,XX +XXX,XX @@ gen = decodetree.process('insns.decode')
32
- save_cpu_state(ctx, 0);
34
loongarch_ss = ss.source_set()
33
- gen_helper_raise_exception_debug(cpu_env);
35
loongarch_ss.add(files(
34
- }
36
'cpu.c',
35
tcg_gen_lookup_and_goto_ptr();
37
- 'disas.c',
36
break;
38
))
37
default:
39
loongarch_tcg_ss = ss.source_set()
38
@@ -XXX,XX +XXX,XX @@ static void mips_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
40
loongarch_tcg_ss.add(gen)
39
{
41
@@ -XXX,XX +XXX,XX @@ loongarch_softmmu_ss.add(files(
40
DisasContext *ctx = container_of(dcbase, DisasContext, base);
42
'iocsr_helper.c',
41
43
))
42
- if (ctx->base.singlestep_enabled && ctx->base.is_jmp != DISAS_NORETURN) {
44
43
- save_cpu_state(ctx, ctx->base.is_jmp != DISAS_EXIT);
45
+common_ss.add(when: 'CONFIG_LOONGARCH_DIS', if_true: [files('disas.c'), gen])
44
- gen_helper_raise_exception_debug(cpu_env);
46
+
45
- } else {
47
loongarch_ss.add_all(when: 'CONFIG_TCG', if_true: [loongarch_tcg_ss])
46
- switch (ctx->base.is_jmp) {
48
47
- case DISAS_STOP:
49
target_arch += {'loongarch': loongarch_ss}
48
- gen_save_pc(ctx->base.pc_next);
49
- tcg_gen_lookup_and_goto_ptr();
50
- break;
51
- case DISAS_NEXT:
52
- case DISAS_TOO_MANY:
53
- save_cpu_state(ctx, 0);
54
- gen_goto_tb(ctx, 0, ctx->base.pc_next);
55
- break;
56
- case DISAS_EXIT:
57
- tcg_gen_exit_tb(NULL, 0);
58
- break;
59
- case DISAS_NORETURN:
60
- break;
61
- default:
62
- g_assert_not_reached();
63
- }
64
+ switch (ctx->base.is_jmp) {
65
+ case DISAS_STOP:
66
+ gen_save_pc(ctx->base.pc_next);
67
+ tcg_gen_lookup_and_goto_ptr();
68
+ break;
69
+ case DISAS_NEXT:
70
+ case DISAS_TOO_MANY:
71
+ save_cpu_state(ctx, 0);
72
+ gen_goto_tb(ctx, 0, ctx->base.pc_next);
73
+ break;
74
+ case DISAS_EXIT:
75
+ tcg_gen_exit_tb(NULL, 0);
76
+ break;
77
+ case DISAS_NORETURN:
78
+ break;
79
+ default:
80
+ g_assert_not_reached();
81
}
82
}
83
84
--
50
--
85
2.25.1
51
2.34.1
86
52
87
53
diff view generated by jsdifflib
1
GDB single-stepping is now handled generically.
1
While jirl shares the same instruction format as bne etc,
2
it is not assembled the same. In particular, rd is printed
3
first not second and the immediate is not pc-relative.
2
4
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Decode into the arg_rr_i structure, which prints correctly.
6
This changes the "offs" member to "imm", to update translate.
7
8
Reviewed-by: WANG Xuerui <git@xen0n.name>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
10
---
6
target/rx/helper.h | 1 -
11
target/loongarch/insns.decode | 3 ++-
7
target/rx/op_helper.c | 8 --------
12
target/loongarch/disas.c | 2 +-
8
target/rx/translate.c | 12 ++----------
13
target/loongarch/insn_trans/trans_branch.c.inc | 2 +-
9
3 files changed, 2 insertions(+), 19 deletions(-)
14
3 files changed, 4 insertions(+), 3 deletions(-)
10
15
11
diff --git a/target/rx/helper.h b/target/rx/helper.h
16
diff --git a/target/loongarch/insns.decode b/target/loongarch/insns.decode
12
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
13
--- a/target/rx/helper.h
18
--- a/target/loongarch/insns.decode
14
+++ b/target/rx/helper.h
19
+++ b/target/loongarch/insns.decode
15
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_1(raise_illegal_instruction, noreturn, env)
20
@@ -XXX,XX +XXX,XX @@
16
DEF_HELPER_1(raise_access_fault, noreturn, env)
21
@rr_ui12 .... ...... imm:12 rj:5 rd:5 &rr_i
17
DEF_HELPER_1(raise_privilege_violation, noreturn, env)
22
@rr_i14s2 .... .... .............. rj:5 rd:5 &rr_i imm=%i14s2
18
DEF_HELPER_1(wait, noreturn, env)
23
@rr_i16 .... .. imm:s16 rj:5 rd:5 &rr_i
19
-DEF_HELPER_1(debug, noreturn, env)
24
+@rr_i16s2 .... .. ................ rj:5 rd:5 &rr_i imm=%offs16
20
DEF_HELPER_2(rxint, noreturn, env, i32)
25
@hint_r_i12 .... ...... imm:s12 rj:5 hint:5 &hint_r_i
21
DEF_HELPER_1(rxbrk, noreturn, env)
26
@rrr_sa2p1 .... ........ ... .. rk:5 rj:5 rd:5 &rrr_sa sa=%sa2p1
22
DEF_HELPER_FLAGS_3(fadd, TCG_CALL_NO_WG, f32, env, f32, f32)
27
@rrr_sa2 .... ........ ... sa:2 rk:5 rj:5 rd:5 &rrr_sa
23
diff --git a/target/rx/op_helper.c b/target/rx/op_helper.c
28
@@ -XXX,XX +XXX,XX @@ beqz 0100 00 ................ ..... ..... @r_offs21
29
bnez 0100 01 ................ ..... ..... @r_offs21
30
bceqz 0100 10 ................ 00 ... ..... @c_offs21
31
bcnez 0100 10 ................ 01 ... ..... @c_offs21
32
-jirl 0100 11 ................ ..... ..... @rr_offs16
33
+jirl 0100 11 ................ ..... ..... @rr_i16s2
34
b 0101 00 .......................... @offs26
35
bl 0101 01 .......................... @offs26
36
beq 0101 10 ................ ..... ..... @rr_offs16
37
diff --git a/target/loongarch/disas.c b/target/loongarch/disas.c
24
index XXXXXXX..XXXXXXX 100644
38
index XXXXXXX..XXXXXXX 100644
25
--- a/target/rx/op_helper.c
39
--- a/target/loongarch/disas.c
26
+++ b/target/rx/op_helper.c
40
+++ b/target/loongarch/disas.c
27
@@ -XXX,XX +XXX,XX @@ void QEMU_NORETURN helper_wait(CPURXState *env)
41
@@ -XXX,XX +XXX,XX @@ INSN(beqz, r_offs)
28
raise_exception(env, EXCP_HLT, 0);
42
INSN(bnez, r_offs)
29
}
43
INSN(bceqz, c_offs)
30
44
INSN(bcnez, c_offs)
31
-void QEMU_NORETURN helper_debug(CPURXState *env)
45
-INSN(jirl, rr_offs)
32
-{
46
+INSN(jirl, rr_i)
33
- CPUState *cs = env_cpu(env);
47
INSN(b, offs)
34
-
48
INSN(bl, offs)
35
- cs->exception_index = EXCP_DEBUG;
49
INSN(beq, rr_offs)
36
- cpu_loop_exit(cs);
50
diff --git a/target/loongarch/insn_trans/trans_branch.c.inc b/target/loongarch/insn_trans/trans_branch.c.inc
37
-}
38
-
39
void QEMU_NORETURN helper_rxint(CPURXState *env, uint32_t vec)
40
{
41
raise_exception(env, 0x100 + vec, 0);
42
diff --git a/target/rx/translate.c b/target/rx/translate.c
43
index XXXXXXX..XXXXXXX 100644
51
index XXXXXXX..XXXXXXX 100644
44
--- a/target/rx/translate.c
52
--- a/target/loongarch/insn_trans/trans_branch.c.inc
45
+++ b/target/rx/translate.c
53
+++ b/target/loongarch/insn_trans/trans_branch.c.inc
46
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
54
@@ -XXX,XX +XXX,XX @@ static bool trans_jirl(DisasContext *ctx, arg_jirl *a)
47
tcg_gen_exit_tb(dc->base.tb, n);
55
TCGv dest = gpr_dst(ctx, a->rd, EXT_NONE);
48
} else {
56
TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE);
49
tcg_gen_movi_i32(cpu_pc, dest);
57
50
- if (dc->base.singlestep_enabled) {
58
- tcg_gen_addi_tl(cpu_pc, src1, a->offs);
51
- gen_helper_debug(cpu_env);
59
+ tcg_gen_addi_tl(cpu_pc, src1, a->imm);
52
- } else {
60
tcg_gen_movi_tl(dest, ctx->base.pc_next + 4);
53
- tcg_gen_lookup_and_goto_ptr();
61
gen_set_gpr(a->rd, dest, EXT_NONE);
54
- }
62
tcg_gen_lookup_and_goto_ptr();
55
+ tcg_gen_lookup_and_goto_ptr();
56
}
57
dc->base.is_jmp = DISAS_NORETURN;
58
}
59
@@ -XXX,XX +XXX,XX @@ static void rx_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
60
gen_goto_tb(ctx, 0, dcbase->pc_next);
61
break;
62
case DISAS_JUMP:
63
- if (ctx->base.singlestep_enabled) {
64
- gen_helper_debug(cpu_env);
65
- } else {
66
- tcg_gen_lookup_and_goto_ptr();
67
- }
68
+ tcg_gen_lookup_and_goto_ptr();
69
break;
70
case DISAS_UPDATE:
71
tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
72
--
63
--
73
2.25.1
64
2.34.1
74
75
diff view generated by jsdifflib
1
GDB single-stepping is now handled generically.
1
Print both the raw field and the resolved pc-relative
2
address, as we do for branches.
2
3
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Reviewed-by: WANG Xuerui <git@xen0n.name>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
---
6
target/openrisc/translate.c | 18 +++---------------
7
target/loongarch/disas.c | 37 +++++++++++++++++++++++++++++++++----
7
1 file changed, 3 insertions(+), 15 deletions(-)
8
1 file changed, 33 insertions(+), 4 deletions(-)
8
9
9
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
10
diff --git a/target/loongarch/disas.c b/target/loongarch/disas.c
10
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
11
--- a/target/openrisc/translate.c
12
--- a/target/loongarch/disas.c
12
+++ b/target/openrisc/translate.c
13
+++ b/target/loongarch/disas.c
13
@@ -XXX,XX +XXX,XX @@ static void openrisc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
14
@@ -XXX,XX +XXX,XX @@ INSN(fsel, fffc)
14
/* The jump destination is indirect/computed; use jmp_pc. */
15
INSN(addu16i_d, rr_i)
15
tcg_gen_mov_tl(cpu_pc, jmp_pc);
16
INSN(lu12i_w, r_i)
16
tcg_gen_discard_tl(jmp_pc);
17
INSN(lu32i_d, r_i)
17
- if (unlikely(dc->base.singlestep_enabled)) {
18
-INSN(pcaddi, r_i)
18
- gen_exception(dc, EXCP_DEBUG);
19
-INSN(pcalau12i, r_i)
19
- } else {
20
-INSN(pcaddu12i, r_i)
20
- tcg_gen_lookup_and_goto_ptr();
21
-INSN(pcaddu18i, r_i)
21
- }
22
INSN(ll_w, rr_i)
22
+ tcg_gen_lookup_and_goto_ptr();
23
INSN(sc_w, rr_i)
23
break;
24
INSN(ll_d, rr_i)
24
}
25
@@ -XXX,XX +XXX,XX @@ static bool trans_fcmp_cond_##suffix(DisasContext *ctx, \
25
/* The jump destination is direct; use jmp_pc_imm.
26
26
@@ -XXX,XX +XXX,XX @@ static void openrisc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
27
FCMP_INSN(s)
27
break;
28
FCMP_INSN(d)
28
}
29
+
29
tcg_gen_movi_tl(cpu_pc, jmp_dest);
30
+#define PCADD_INSN(name) \
30
- if (unlikely(dc->base.singlestep_enabled)) {
31
+static bool trans_##name(DisasContext *ctx, arg_##name *a) \
31
- gen_exception(dc, EXCP_DEBUG);
32
+{ \
32
- } else {
33
+ output(ctx, #name, "r%d, %d # 0x%" PRIx64, \
33
- tcg_gen_lookup_and_goto_ptr();
34
+ a->rd, a->imm, gen_##name(ctx->pc, a->imm)); \
34
- }
35
+ return true; \
35
+ tcg_gen_lookup_and_goto_ptr();
36
+}
36
break;
37
+
37
38
+static uint64_t gen_pcaddi(uint64_t pc, int imm)
38
case DISAS_EXIT:
39
+{
39
- if (unlikely(dc->base.singlestep_enabled)) {
40
+ return pc + (imm << 2);
40
- gen_exception(dc, EXCP_DEBUG);
41
+}
41
- } else {
42
+
42
- tcg_gen_exit_tb(NULL, 0);
43
+static uint64_t gen_pcalau12i(uint64_t pc, int imm)
43
- }
44
+{
44
+ tcg_gen_exit_tb(NULL, 0);
45
+ return (pc + (imm << 12)) & ~0xfff;
45
break;
46
+}
46
default:
47
+
47
g_assert_not_reached();
48
+static uint64_t gen_pcaddu12i(uint64_t pc, int imm)
49
+{
50
+ return pc + (imm << 12);
51
+}
52
+
53
+static uint64_t gen_pcaddu18i(uint64_t pc, int imm)
54
+{
55
+ return pc + ((uint64_t)(imm) << 18);
56
+}
57
+
58
+PCADD_INSN(pcaddi)
59
+PCADD_INSN(pcalau12i)
60
+PCADD_INSN(pcaddu12i)
61
+PCADD_INSN(pcaddu18i)
48
--
62
--
49
2.25.1
63
2.34.1
50
51
diff view generated by jsdifflib
1
GDB single-stepping is now handled generically, which means
1
From: Rui Wang <wangrui@loongson.cn>
2
we don't need to do anything in the wrappers.
3
2
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
3
diff:
4
Imm Before After
5
0000000000000000 addi.w rd, zero, 0 addi.w rd, zero, 0
6
lu52i.d rd, zero, 0
7
00000000fffff800 lu12i.w rd, -1 addi.w rd, zero, -2048
8
ori rd, rd, 2048 lu32i.d rd, 0
9
lu32i.d rd, 0
10
11
Reviewed-by: WANG Xuerui <git@xen0n.name>
12
Signed-off-by: Rui Wang <wangrui@loongson.cn>
13
Message-Id: <20221107144713.845550-1-wangrui@loongson.cn>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
15
---
7
target/riscv/translate.c | 27 +------------------
16
tcg/loongarch64/tcg-target.c.inc | 35 +++++++++++---------------------
8
.../riscv/insn_trans/trans_privileged.c.inc | 4 +--
17
1 file changed, 12 insertions(+), 23 deletions(-)
9
target/riscv/insn_trans/trans_rvi.c.inc | 8 +++---
10
target/riscv/insn_trans/trans_rvv.c.inc | 2 +-
11
4 files changed, 7 insertions(+), 34 deletions(-)
12
18
13
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
19
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
14
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
15
--- a/target/riscv/translate.c
21
--- a/tcg/loongarch64/tcg-target.c.inc
16
+++ b/target/riscv/translate.c
22
+++ b/tcg/loongarch64/tcg-target.c.inc
17
@@ -XXX,XX +XXX,XX @@ static void generate_exception_mtval(DisasContext *ctx, int excp)
23
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
18
ctx->base.is_jmp = DISAS_NORETURN;
24
return true;
19
}
25
}
20
26
21
-static void gen_exception_debug(void)
27
-static bool imm_part_needs_loading(bool high_bits_are_ones,
28
- tcg_target_long part)
22
-{
29
-{
23
- gen_helper_raise_exception(cpu_env, tcg_constant_i32(EXCP_DEBUG));
30
- if (high_bits_are_ones) {
24
-}
31
- return part != -1;
25
-
26
-/* Wrapper around tcg_gen_exit_tb that handles single stepping */
27
-static void exit_tb(DisasContext *ctx)
28
-{
29
- if (ctx->base.singlestep_enabled) {
30
- gen_exception_debug();
31
- } else {
32
- } else {
32
- tcg_gen_exit_tb(NULL, 0);
33
- return part != 0;
33
- }
34
- }
34
-}
35
-}
35
-
36
-
36
-/* Wrapper around tcg_gen_lookup_and_goto_ptr that handles single stepping */
37
/* Loads a 32-bit immediate into rd, sign-extended. */
37
-static void lookup_and_goto_ptr(DisasContext *ctx)
38
static void tcg_out_movi_i32(TCGContext *s, TCGReg rd, int32_t val)
38
-{
39
{
39
- if (ctx->base.singlestep_enabled) {
40
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_i32(TCGContext *s, TCGReg rd, int32_t val)
40
- gen_exception_debug();
41
tcg_target_long hi12 = sextreg(val, 12, 20);
41
- } else {
42
42
- tcg_gen_lookup_and_goto_ptr();
43
/* Single-instruction cases. */
44
- if (lo == val) {
45
- /* val fits in simm12: addi.w rd, zero, val */
46
- tcg_out_opc_addi_w(s, rd, TCG_REG_ZERO, val);
47
- return;
43
- }
48
- }
44
-}
49
- if (0x800 <= val && val <= 0xfff) {
45
-
50
+ if (hi12 == 0) {
46
static void gen_exception_illegal(DisasContext *ctx)
51
/* val fits in uimm12: ori rd, zero, val */
47
{
52
tcg_out_opc_ori(s, rd, TCG_REG_ZERO, val);
48
generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST);
53
return;
49
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
54
}
50
tcg_gen_exit_tb(ctx->base.tb, n);
55
+ if (hi12 == sextreg(lo, 12, 20)) {
51
} else {
56
+ /* val fits in simm12: addi.w rd, zero, val */
52
tcg_gen_movi_tl(cpu_pc, dest);
57
+ tcg_out_opc_addi_w(s, rd, TCG_REG_ZERO, val);
53
- lookup_and_goto_ptr(ctx);
58
+ return;
54
+ tcg_gen_lookup_and_goto_ptr();
59
+ }
60
61
/* High bits must be set; load with lu12i.w + optional ori. */
62
tcg_out_opc_lu12i_w(s, rd, hi12);
63
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
64
65
intptr_t pc_offset;
66
tcg_target_long val_lo, val_hi, pc_hi, offset_hi;
67
- tcg_target_long hi32, hi52;
68
- bool rd_high_bits_are_ones;
69
+ tcg_target_long hi12, hi32, hi52;
70
71
/* Value fits in signed i32. */
72
if (type == TCG_TYPE_I32 || val == (int32_t)val) {
73
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
74
return;
75
}
76
77
+ hi12 = sextreg(val, 12, 20);
78
hi32 = sextreg(val, 32, 20);
79
hi52 = sextreg(val, 52, 12);
80
81
/* Single cu52i.d case. */
82
- if (ctz64(val) >= 52) {
83
+ if ((hi52 != 0) && (ctz64(val) >= 52)) {
84
tcg_out_opc_cu52i_d(s, rd, TCG_REG_ZERO, hi52);
85
return;
86
}
87
88
/* Slow path. Initialize the low 32 bits, then concat high bits. */
89
tcg_out_movi_i32(s, rd, val);
90
- rd_high_bits_are_ones = (int32_t)val < 0;
91
92
- if (imm_part_needs_loading(rd_high_bits_are_ones, hi32)) {
93
+ /* Load hi32 and hi52 explicitly when they are unexpected values. */
94
+ if (hi32 != sextreg(hi12, 20, 20)) {
95
tcg_out_opc_cu32i_d(s, rd, hi32);
96
- rd_high_bits_are_ones = hi32 < 0;
97
}
98
99
- if (imm_part_needs_loading(rd_high_bits_are_ones, hi52)) {
100
+ if (hi52 != sextreg(hi32, 20, 12)) {
101
tcg_out_opc_cu52i_d(s, rd, rd, hi52);
55
}
102
}
56
}
103
}
57
58
diff --git a/target/riscv/insn_trans/trans_privileged.c.inc b/target/riscv/insn_trans/trans_privileged.c.inc
59
index XXXXXXX..XXXXXXX 100644
60
--- a/target/riscv/insn_trans/trans_privileged.c.inc
61
+++ b/target/riscv/insn_trans/trans_privileged.c.inc
62
@@ -XXX,XX +XXX,XX @@ static bool trans_sret(DisasContext *ctx, arg_sret *a)
63
64
if (has_ext(ctx, RVS)) {
65
gen_helper_sret(cpu_pc, cpu_env, cpu_pc);
66
- exit_tb(ctx); /* no chaining */
67
+ tcg_gen_exit_tb(NULL, 0); /* no chaining */
68
ctx->base.is_jmp = DISAS_NORETURN;
69
} else {
70
return false;
71
@@ -XXX,XX +XXX,XX @@ static bool trans_mret(DisasContext *ctx, arg_mret *a)
72
#ifndef CONFIG_USER_ONLY
73
tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next);
74
gen_helper_mret(cpu_pc, cpu_env, cpu_pc);
75
- exit_tb(ctx); /* no chaining */
76
+ tcg_gen_exit_tb(NULL, 0); /* no chaining */
77
ctx->base.is_jmp = DISAS_NORETURN;
78
return true;
79
#else
80
diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc
81
index XXXXXXX..XXXXXXX 100644
82
--- a/target/riscv/insn_trans/trans_rvi.c.inc
83
+++ b/target/riscv/insn_trans/trans_rvi.c.inc
84
@@ -XXX,XX +XXX,XX @@ static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
85
if (a->rd != 0) {
86
tcg_gen_movi_tl(cpu_gpr[a->rd], ctx->pc_succ_insn);
87
}
88
-
89
- /* No chaining with JALR. */
90
- lookup_and_goto_ptr(ctx);
91
+ tcg_gen_lookup_and_goto_ptr();
92
93
if (misaligned) {
94
gen_set_label(misaligned);
95
@@ -XXX,XX +XXX,XX @@ static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a)
96
* however we need to end the translation block
97
*/
98
tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
99
- exit_tb(ctx);
100
+ tcg_gen_exit_tb(NULL, 0);
101
ctx->base.is_jmp = DISAS_NORETURN;
102
return true;
103
}
104
@@ -XXX,XX +XXX,XX @@ static bool do_csr_post(DisasContext *ctx)
105
{
106
/* We may have changed important cpu state -- exit to main loop. */
107
tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
108
- exit_tb(ctx);
109
+ tcg_gen_exit_tb(NULL, 0);
110
ctx->base.is_jmp = DISAS_NORETURN;
111
return true;
112
}
113
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
114
index XXXXXXX..XXXXXXX 100644
115
--- a/target/riscv/insn_trans/trans_rvv.c.inc
116
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
117
@@ -XXX,XX +XXX,XX @@ static bool trans_vsetvl(DisasContext *ctx, arg_vsetvl *a)
118
gen_set_gpr(ctx, a->rd, dst);
119
120
tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
121
- lookup_and_goto_ptr(ctx);
122
+ tcg_gen_lookup_and_goto_ptr();
123
ctx->base.is_jmp = DISAS_NORETURN;
124
return true;
125
}
126
--
104
--
127
2.25.1
105
2.34.1
128
129
diff view generated by jsdifflib
1
This reverts commit 1b36e4f5a5de585210ea95f2257839c2312be28f.
1
Regenerate with ADDU16I included:
2
2
3
Despite a comment saying why cpu_common_props cannot be placed in
3
$ cd loongarch-opcodes/scripts/go
4
a file that is compiled once, it was moved anyway. Revert that.
4
$ go run ./genqemutcgdefs > $QEMU/tcg/loongarch64/tcg-insn-defs.c.inc
5
5
6
Since then, Property is not defined in hw/core/cpu.h, so it is now
6
Reviewed-by: WANG Xuerui <git@xen0n.name>
7
easier to declare a function to install the properties rather than
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
the Property array itself.
9
10
Cc: Eduardo Habkost <ehabkost@redhat.com>
11
Suggested-by: Peter Maydell <peter.maydell@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
9
---
14
include/hw/core/cpu.h | 1 +
10
tcg/loongarch64/tcg-insn-defs.c.inc | 10 +++++++++-
15
cpu.c | 21 +++++++++++++++++++++
11
1 file changed, 9 insertions(+), 1 deletion(-)
16
hw/core/cpu-common.c | 17 +----------------
17
3 files changed, 23 insertions(+), 16 deletions(-)
18
12
19
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
13
diff --git a/tcg/loongarch64/tcg-insn-defs.c.inc b/tcg/loongarch64/tcg-insn-defs.c.inc
20
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
21
--- a/include/hw/core/cpu.h
15
--- a/tcg/loongarch64/tcg-insn-defs.c.inc
22
+++ b/include/hw/core/cpu.h
16
+++ b/tcg/loongarch64/tcg-insn-defs.c.inc
23
@@ -XXX,XX +XXX,XX @@ void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...)
17
@@ -XXX,XX +XXX,XX @@
24
GCC_FMT_ATTR(2, 3);
18
*
25
19
* This file is auto-generated by genqemutcgdefs from
26
/* $(top_srcdir)/cpu.c */
20
* https://github.com/loongson-community/loongarch-opcodes,
27
+void cpu_class_init_props(DeviceClass *dc);
21
- * from commit 961f0c60f5b63e574d785995600c71ad5413fdc4.
28
void cpu_exec_initfn(CPUState *cpu);
22
+ * from commit 25ca7effe9d88101c1cf96c4005423643386d81f.
29
void cpu_exec_realizefn(CPUState *cpu, Error **errp);
23
* DO NOT EDIT.
30
void cpu_exec_unrealizefn(CPUState *cpu);
24
*/
31
diff --git a/cpu.c b/cpu.c
25
32
index XXXXXXX..XXXXXXX 100644
26
@@ -XXX,XX +XXX,XX @@ typedef enum {
33
--- a/cpu.c
27
OPC_ANDI = 0x03400000,
34
+++ b/cpu.c
28
OPC_ORI = 0x03800000,
35
@@ -XXX,XX +XXX,XX @@ void cpu_exec_unrealizefn(CPUState *cpu)
29
OPC_XORI = 0x03c00000,
36
cpu_list_remove(cpu);
30
+ OPC_ADDU16I_D = 0x10000000,
31
OPC_LU12I_W = 0x14000000,
32
OPC_CU32I_D = 0x16000000,
33
OPC_PCADDU2I = 0x18000000,
34
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_xori(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk12)
35
tcg_out32(s, encode_djuk12_insn(OPC_XORI, d, j, uk12));
37
}
36
}
38
37
39
+static Property cpu_common_props[] = {
38
+/* Emits the `addu16i.d d, j, sk16` instruction. */
40
+#ifndef CONFIG_USER_ONLY
39
+static void __attribute__((unused))
41
+ /*
40
+tcg_out_opc_addu16i_d(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16)
42
+ * Create a memory property for softmmu CPU object,
43
+ * so users can wire up its memory. (This can't go in hw/core/cpu.c
44
+ * because that file is compiled only once for both user-mode
45
+ * and system builds.) The default if no link is set up is to use
46
+ * the system address space.
47
+ */
48
+ DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION,
49
+ MemoryRegion *),
50
+#endif
51
+ DEFINE_PROP_BOOL("start-powered-off", CPUState, start_powered_off, false),
52
+ DEFINE_PROP_END_OF_LIST(),
53
+};
54
+
55
+void cpu_class_init_props(DeviceClass *dc)
56
+{
41
+{
57
+ device_class_set_props(dc, cpu_common_props);
42
+ tcg_out32(s, encode_djsk16_insn(OPC_ADDU16I_D, d, j, sk16));
58
+}
43
+}
59
+
44
+
60
void cpu_exec_initfn(CPUState *cpu)
45
/* Emits the `lu12i.w d, sj20` instruction. */
61
{
46
static void __attribute__((unused))
62
cpu->as = NULL;
47
tcg_out_opc_lu12i_w(TCGContext *s, TCGReg d, int32_t sj20)
63
diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c
64
index XXXXXXX..XXXXXXX 100644
65
--- a/hw/core/cpu-common.c
66
+++ b/hw/core/cpu-common.c
67
@@ -XXX,XX +XXX,XX @@ static int64_t cpu_common_get_arch_id(CPUState *cpu)
68
return cpu->cpu_index;
69
}
70
71
-static Property cpu_common_props[] = {
72
-#ifndef CONFIG_USER_ONLY
73
- /* Create a memory property for softmmu CPU object,
74
- * so users can wire up its memory. (This can't go in hw/core/cpu.c
75
- * because that file is compiled only once for both user-mode
76
- * and system builds.) The default if no link is set up is to use
77
- * the system address space.
78
- */
79
- DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION,
80
- MemoryRegion *),
81
-#endif
82
- DEFINE_PROP_BOOL("start-powered-off", CPUState, start_powered_off, false),
83
- DEFINE_PROP_END_OF_LIST(),
84
-};
85
-
86
static void cpu_class_init(ObjectClass *klass, void *data)
87
{
88
DeviceClass *dc = DEVICE_CLASS(klass);
89
@@ -XXX,XX +XXX,XX @@ static void cpu_class_init(ObjectClass *klass, void *data)
90
dc->realize = cpu_common_realizefn;
91
dc->unrealize = cpu_common_unrealizefn;
92
dc->reset = cpu_common_reset;
93
- device_class_set_props(dc, cpu_common_props);
94
+ cpu_class_init_props(dc);
95
/*
96
* Reason: CPUs still need special care by board code: wiring up
97
* IRQs, adding reset handlers, halting non-first CPUs, ...
98
--
48
--
99
2.25.1
49
2.34.1
100
50
101
51
diff view generated by jsdifflib
1
GDB single-stepping is now handled generically.
1
Adjust the constraints to allow any int32_t for immediate
2
addition. Split immediate adds into addu16i + addi, which
3
covers quite a lot of the immediate space. For the hole in
4
the middle, load the constant into TMP0 instead.
2
5
3
Acked-by: Laurent Vivier <laurent@vivier.eu>
6
Reviewed-by: WANG Xuerui <git@xen0n.name>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
8
---
6
target/m68k/translate.c | 44 +++++++++--------------------------------
9
tcg/loongarch64/tcg-target-con-set.h | 4 +-
7
1 file changed, 9 insertions(+), 35 deletions(-)
10
tcg/loongarch64/tcg-target-con-str.h | 2 +-
11
tcg/loongarch64/tcg-target.c.inc | 57 ++++++++++++++++++++++++----
12
3 files changed, 53 insertions(+), 10 deletions(-)
8
13
9
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
14
diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h
10
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
11
--- a/target/m68k/translate.c
16
--- a/tcg/loongarch64/tcg-target-con-set.h
12
+++ b/target/m68k/translate.c
17
+++ b/tcg/loongarch64/tcg-target-con-set.h
13
@@ -XXX,XX +XXX,XX @@ static void do_writebacks(DisasContext *s)
18
@@ -XXX,XX +XXX,XX @@ C_O1_I1(r, L)
19
C_O1_I2(r, r, rC)
20
C_O1_I2(r, r, ri)
21
C_O1_I2(r, r, rI)
22
+C_O1_I2(r, r, rJ)
23
C_O1_I2(r, r, rU)
24
C_O1_I2(r, r, rW)
25
C_O1_I2(r, r, rZ)
26
C_O1_I2(r, 0, rZ)
27
-C_O1_I2(r, rZ, rN)
28
+C_O1_I2(r, rZ, ri)
29
+C_O1_I2(r, rZ, rJ)
30
C_O1_I2(r, rZ, rZ)
31
diff --git a/tcg/loongarch64/tcg-target-con-str.h b/tcg/loongarch64/tcg-target-con-str.h
32
index XXXXXXX..XXXXXXX 100644
33
--- a/tcg/loongarch64/tcg-target-con-str.h
34
+++ b/tcg/loongarch64/tcg-target-con-str.h
35
@@ -XXX,XX +XXX,XX @@ REGS('L', ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS)
36
* CONST(letter, TCG_CT_CONST_* bit set)
37
*/
38
CONST('I', TCG_CT_CONST_S12)
39
-CONST('N', TCG_CT_CONST_N12)
40
+CONST('J', TCG_CT_CONST_S32)
41
CONST('U', TCG_CT_CONST_U12)
42
CONST('Z', TCG_CT_CONST_ZERO)
43
CONST('C', TCG_CT_CONST_C12)
44
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
45
index XXXXXXX..XXXXXXX 100644
46
--- a/tcg/loongarch64/tcg-target.c.inc
47
+++ b/tcg/loongarch64/tcg-target.c.inc
48
@@ -XXX,XX +XXX,XX @@ static const int tcg_target_call_oarg_regs[] = {
49
50
#define TCG_CT_CONST_ZERO 0x100
51
#define TCG_CT_CONST_S12 0x200
52
-#define TCG_CT_CONST_N12 0x400
53
+#define TCG_CT_CONST_S32 0x400
54
#define TCG_CT_CONST_U12 0x800
55
#define TCG_CT_CONST_C12 0x1000
56
#define TCG_CT_CONST_WSZ 0x2000
57
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
58
if ((ct & TCG_CT_CONST_S12) && val == sextreg(val, 0, 12)) {
59
return true;
60
}
61
- if ((ct & TCG_CT_CONST_N12) && -val == sextreg(-val, 0, 12)) {
62
+ if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
63
return true;
64
}
65
if ((ct & TCG_CT_CONST_U12) && val >= 0 && val <= 0xfff) {
66
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
14
}
67
}
15
}
68
}
16
69
17
-static bool is_singlestepping(DisasContext *s)
70
+static void tcg_out_addi(TCGContext *s, TCGType type, TCGReg rd,
18
-{
71
+ TCGReg rs, tcg_target_long imm)
19
- /*
72
+{
20
- * Return true if we are singlestepping either because of
73
+ tcg_target_long lo12 = sextreg(imm, 0, 12);
21
- * architectural singlestep or QEMU gdbstub singlestep. This does
74
+ tcg_target_long hi16 = sextreg(imm - lo12, 16, 16);
22
- * not include the command line '-singlestep' mode which is rather
75
+
23
- * misnamed as it only means "one instruction per TB" and doesn't
76
+ /*
24
- * affect the code we generate.
77
+ * Note that there's a hole in between hi16 and lo12:
25
- */
78
+ *
26
- return s->base.singlestep_enabled || s->ss_active;
79
+ * 3 2 1 0
27
-}
80
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
28
-
81
+ * ...+-------------------------------+-------+-----------------------+
29
/* is_jmp field values */
82
+ * | hi16 | | lo12 |
30
#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
83
+ * ...+-------------------------------+-------+-----------------------+
31
#define DISAS_EXIT DISAS_TARGET_1 /* cpu state was modified dynamically */
84
+ *
32
@@ -XXX,XX +XXX,XX @@ static void gen_exception(DisasContext *s, uint32_t dest, int nr)
85
+ * For bits within that hole, it's more efficient to use LU12I and ADD.
33
s->base.is_jmp = DISAS_NORETURN;
86
+ */
34
}
87
+ if (imm == (hi16 << 16) + lo12) {
35
88
+ if (hi16) {
36
-static void gen_singlestep_exception(DisasContext *s)
89
+ tcg_out_opc_addu16i_d(s, rd, rs, hi16);
37
-{
90
+ rs = rd;
38
- /*
91
+ }
39
- * Generate the right kind of exception for singlestep, which is
92
+ if (type == TCG_TYPE_I32) {
40
- * either the architectural singlestep or EXCP_DEBUG for QEMU's
93
+ tcg_out_opc_addi_w(s, rd, rs, lo12);
41
- * gdb singlestepping.
94
+ } else if (lo12) {
42
- */
95
+ tcg_out_opc_addi_d(s, rd, rs, lo12);
43
- if (s->ss_active) {
96
+ } else {
44
- gen_raise_exception(EXCP_TRACE);
97
+ tcg_out_mov(s, type, rd, rs);
45
- } else {
98
+ }
46
- gen_raise_exception(EXCP_DEBUG);
99
+ } else {
47
- }
100
+ tcg_out_movi(s, type, TCG_REG_TMP0, imm);
48
-}
101
+ if (type == TCG_TYPE_I32) {
49
-
102
+ tcg_out_opc_add_w(s, rd, rs, TCG_REG_TMP0);
50
static inline void gen_addr_fault(DisasContext *s)
103
+ } else {
104
+ tcg_out_opc_add_d(s, rd, rs, TCG_REG_TMP0);
105
+ }
106
+ }
107
+}
108
+
109
static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg)
51
{
110
{
52
gen_exception(s, s->base.pc_next, EXCP_ADDRESS);
111
tcg_out_opc_andi(s, ret, arg, 0xff);
53
@@ -XXX,XX +XXX,XX @@ static void gen_exit_tb(DisasContext *s)
112
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
54
/* Generate a jump to an immediate address. */
113
55
static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest)
114
case INDEX_op_add_i32:
56
{
115
if (c2) {
57
- if (unlikely(is_singlestepping(s))) {
116
- tcg_out_opc_addi_w(s, a0, a1, a2);
58
+ if (unlikely(s->ss_active)) {
117
+ tcg_out_addi(s, TCG_TYPE_I32, a0, a1, a2);
59
update_cc_op(s);
60
tcg_gen_movi_i32(QREG_PC, dest);
61
- gen_singlestep_exception(s);
62
+ gen_raise_exception(EXCP_TRACE);
63
} else if (translator_use_goto_tb(&s->base, dest)) {
64
tcg_gen_goto_tb(n);
65
tcg_gen_movi_i32(QREG_PC, dest);
66
@@ -XXX,XX +XXX,XX @@ static void m68k_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
67
68
dc->ss_active = (M68K_SR_TRACE(env->sr) == M68K_SR_TRACE_ANY_INS);
69
/* If architectural single step active, limit to 1 */
70
- if (is_singlestepping(dc)) {
71
+ if (dc->ss_active) {
72
dc->base.max_insns = 1;
73
}
74
}
75
@@ -XXX,XX +XXX,XX @@ static void m68k_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
76
break;
77
case DISAS_TOO_MANY:
78
update_cc_op(dc);
79
- if (is_singlestepping(dc)) {
80
+ if (dc->ss_active) {
81
tcg_gen_movi_i32(QREG_PC, dc->pc);
82
- gen_singlestep_exception(dc);
83
+ gen_raise_exception(EXCP_TRACE);
84
} else {
118
} else {
85
gen_jmp_tb(dc, 0, dc->pc);
119
tcg_out_opc_add_w(s, a0, a1, a2);
86
}
120
}
87
break;
121
break;
88
case DISAS_JUMP:
122
case INDEX_op_add_i64:
89
/* We updated CC_OP and PC in gen_jmp/gen_jmp_im. */
123
if (c2) {
90
- if (is_singlestepping(dc)) {
124
- tcg_out_opc_addi_d(s, a0, a1, a2);
91
- gen_singlestep_exception(dc);
125
+ tcg_out_addi(s, TCG_TYPE_I64, a0, a1, a2);
92
+ if (dc->ss_active) {
93
+ gen_raise_exception(EXCP_TRACE);
94
} else {
126
} else {
95
tcg_gen_lookup_and_goto_ptr();
127
tcg_out_opc_add_d(s, a0, a1, a2);
96
}
128
}
97
@@ -XXX,XX +XXX,XX @@ static void m68k_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
129
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
98
* We updated CC_OP and PC in gen_exit_tb, but also modified
130
99
* other state that may require returning to the main loop.
131
case INDEX_op_sub_i32:
100
*/
132
if (c2) {
101
- if (is_singlestepping(dc)) {
133
- tcg_out_opc_addi_w(s, a0, a1, -a2);
102
- gen_singlestep_exception(dc);
134
+ tcg_out_addi(s, TCG_TYPE_I32, a0, a1, -a2);
103
+ if (dc->ss_active) {
104
+ gen_raise_exception(EXCP_TRACE);
105
} else {
135
} else {
106
tcg_gen_exit_tb(NULL, 0);
136
tcg_out_opc_sub_w(s, a0, a1, a2);
107
}
137
}
138
break;
139
case INDEX_op_sub_i64:
140
if (c2) {
141
- tcg_out_opc_addi_d(s, a0, a1, -a2);
142
+ tcg_out_addi(s, TCG_TYPE_I64, a0, a1, -a2);
143
} else {
144
tcg_out_opc_sub_d(s, a0, a1, a2);
145
}
146
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
147
return C_O1_I2(r, r, ri);
148
149
case INDEX_op_add_i32:
150
+ return C_O1_I2(r, r, ri);
151
case INDEX_op_add_i64:
152
- return C_O1_I2(r, r, rI);
153
+ return C_O1_I2(r, r, rJ);
154
155
case INDEX_op_and_i32:
156
case INDEX_op_and_i64:
157
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
158
return C_O1_I2(r, 0, rZ);
159
160
case INDEX_op_sub_i32:
161
+ return C_O1_I2(r, rZ, ri);
162
case INDEX_op_sub_i64:
163
- return C_O1_I2(r, rZ, rN);
164
+ return C_O1_I2(r, rZ, rJ);
165
166
case INDEX_op_mul_i32:
167
case INDEX_op_mul_i64:
108
--
168
--
109
2.25.1
169
2.34.1
110
111
diff view generated by jsdifflib
1
We have already set DISAS_NORETURN in generate_exception,
1
Split out a helper function, tcg_out_setcond_int, which
2
which makes the exit_tb unreachable.
2
does not always produce the complete boolean result, but
3
returns a set of flags to do so.
3
4
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
5
Accept all int32_t as constant input, so that LE/GT can
6
adjust the constant to LT.
7
8
Reviewed-by: WANG Xuerui <git@xen0n.name>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
10
---
7
target/riscv/insn_trans/trans_privileged.c.inc | 6 +-----
11
tcg/loongarch64/tcg-target.c.inc | 165 +++++++++++++++++++++----------
8
1 file changed, 1 insertion(+), 5 deletions(-)
12
1 file changed, 115 insertions(+), 50 deletions(-)
9
13
10
diff --git a/target/riscv/insn_trans/trans_privileged.c.inc b/target/riscv/insn_trans/trans_privileged.c.inc
14
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
11
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
12
--- a/target/riscv/insn_trans/trans_privileged.c.inc
16
--- a/tcg/loongarch64/tcg-target.c.inc
13
+++ b/target/riscv/insn_trans/trans_privileged.c.inc
17
+++ b/tcg/loongarch64/tcg-target.c.inc
14
@@ -XXX,XX +XXX,XX @@ static bool trans_ecall(DisasContext *ctx, arg_ecall *a)
18
@@ -XXX,XX +XXX,XX @@ static void tcg_out_clzctz(TCGContext *s, LoongArchInsn opc,
15
{
19
tcg_out_opc_or(s, a0, TCG_REG_TMP0, a0);
16
/* always generates U-level ECALL, fixed in do_interrupt handler */
17
generate_exception(ctx, RISCV_EXCP_U_ECALL);
18
- exit_tb(ctx); /* no chaining */
19
- ctx->base.is_jmp = DISAS_NORETURN;
20
return true;
21
}
20
}
22
21
23
@@ -XXX,XX +XXX,XX @@ static bool trans_ebreak(DisasContext *ctx, arg_ebreak *a)
22
-static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
24
post = opcode_at(&ctx->base, post_addr);
23
- TCGReg arg1, TCGReg arg2, bool c2)
24
-{
25
- TCGReg tmp;
26
+#define SETCOND_INV TCG_TARGET_NB_REGS
27
+#define SETCOND_NEZ (SETCOND_INV << 1)
28
+#define SETCOND_FLAGS (SETCOND_INV | SETCOND_NEZ)
29
30
- if (c2) {
31
- tcg_debug_assert(arg2 == 0);
32
+static int tcg_out_setcond_int(TCGContext *s, TCGCond cond, TCGReg ret,
33
+ TCGReg arg1, tcg_target_long arg2, bool c2)
34
+{
35
+ int flags = 0;
36
+
37
+ switch (cond) {
38
+ case TCG_COND_EQ: /* -> NE */
39
+ case TCG_COND_GE: /* -> LT */
40
+ case TCG_COND_GEU: /* -> LTU */
41
+ case TCG_COND_GT: /* -> LE */
42
+ case TCG_COND_GTU: /* -> LEU */
43
+ cond = tcg_invert_cond(cond);
44
+ flags ^= SETCOND_INV;
45
+ break;
46
+ default:
47
+ break;
25
}
48
}
26
49
27
- if (pre == 0x01f01013 && ebreak == 0x00100073 && post == 0x40705013) {
50
switch (cond) {
28
+ if (pre == 0x01f01013 && ebreak == 0x00100073 && post == 0x40705013) {
51
- case TCG_COND_EQ:
29
generate_exception(ctx, RISCV_EXCP_SEMIHOST);
52
- if (c2) {
30
} else {
53
- tmp = arg1;
31
generate_exception(ctx, RISCV_EXCP_BREAKPOINT);
54
- } else {
55
- tcg_out_opc_sub_d(s, ret, arg1, arg2);
56
- tmp = ret;
57
- }
58
- tcg_out_opc_sltui(s, ret, tmp, 1);
59
- break;
60
- case TCG_COND_NE:
61
- if (c2) {
62
- tmp = arg1;
63
- } else {
64
- tcg_out_opc_sub_d(s, ret, arg1, arg2);
65
- tmp = ret;
66
- }
67
- tcg_out_opc_sltu(s, ret, TCG_REG_ZERO, tmp);
68
- break;
69
- case TCG_COND_LT:
70
- tcg_out_opc_slt(s, ret, arg1, arg2);
71
- break;
72
- case TCG_COND_GE:
73
- tcg_out_opc_slt(s, ret, arg1, arg2);
74
- tcg_out_opc_xori(s, ret, ret, 1);
75
- break;
76
case TCG_COND_LE:
77
- tcg_out_setcond(s, TCG_COND_GE, ret, arg2, arg1, false);
78
- break;
79
- case TCG_COND_GT:
80
- tcg_out_setcond(s, TCG_COND_LT, ret, arg2, arg1, false);
81
- break;
82
- case TCG_COND_LTU:
83
- tcg_out_opc_sltu(s, ret, arg1, arg2);
84
- break;
85
- case TCG_COND_GEU:
86
- tcg_out_opc_sltu(s, ret, arg1, arg2);
87
- tcg_out_opc_xori(s, ret, ret, 1);
88
- break;
89
case TCG_COND_LEU:
90
- tcg_out_setcond(s, TCG_COND_GEU, ret, arg2, arg1, false);
91
+ /*
92
+ * If we have a constant input, the most efficient way to implement
93
+ * LE is by adding 1 and using LT. Watch out for wrap around for LEU.
94
+ * We don't need to care for this for LE because the constant input
95
+ * is still constrained to int32_t, and INT32_MAX+1 is representable
96
+ * in the 64-bit temporary register.
97
+ */
98
+ if (c2) {
99
+ if (cond == TCG_COND_LEU) {
100
+ /* unsigned <= -1 is true */
101
+ if (arg2 == -1) {
102
+ tcg_out_movi(s, TCG_TYPE_REG, ret, !(flags & SETCOND_INV));
103
+ return ret;
104
+ }
105
+ cond = TCG_COND_LTU;
106
+ } else {
107
+ cond = TCG_COND_LT;
108
+ }
109
+ arg2 += 1;
110
+ } else {
111
+ TCGReg tmp = arg2;
112
+ arg2 = arg1;
113
+ arg1 = tmp;
114
+ cond = tcg_swap_cond(cond); /* LE -> GE */
115
+ cond = tcg_invert_cond(cond); /* GE -> LT */
116
+ flags ^= SETCOND_INV;
117
+ }
118
break;
119
- case TCG_COND_GTU:
120
- tcg_out_setcond(s, TCG_COND_LTU, ret, arg2, arg1, false);
121
+ default:
122
break;
123
+ }
124
+
125
+ switch (cond) {
126
+ case TCG_COND_NE:
127
+ flags |= SETCOND_NEZ;
128
+ if (!c2) {
129
+ tcg_out_opc_xor(s, ret, arg1, arg2);
130
+ } else if (arg2 == 0) {
131
+ ret = arg1;
132
+ } else if (arg2 >= 0 && arg2 <= 0xfff) {
133
+ tcg_out_opc_xori(s, ret, arg1, arg2);
134
+ } else {
135
+ tcg_out_addi(s, TCG_TYPE_REG, ret, arg1, -arg2);
136
+ }
137
+ break;
138
+
139
+ case TCG_COND_LT:
140
+ case TCG_COND_LTU:
141
+ if (c2) {
142
+ if (arg2 >= -0x800 && arg2 <= 0x7ff) {
143
+ if (cond == TCG_COND_LT) {
144
+ tcg_out_opc_slti(s, ret, arg1, arg2);
145
+ } else {
146
+ tcg_out_opc_sltui(s, ret, arg1, arg2);
147
+ }
148
+ break;
149
+ }
150
+ tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP0, arg2);
151
+ arg2 = TCG_REG_TMP0;
152
+ }
153
+ if (cond == TCG_COND_LT) {
154
+ tcg_out_opc_slt(s, ret, arg1, arg2);
155
+ } else {
156
+ tcg_out_opc_sltu(s, ret, arg1, arg2);
157
+ }
158
+ break;
159
+
160
default:
161
g_assert_not_reached();
162
break;
32
}
163
}
33
- exit_tb(ctx); /* no chaining */
164
+
34
- ctx->base.is_jmp = DISAS_NORETURN;
165
+ return ret | flags;
35
return true;
166
+}
167
+
168
+static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
169
+ TCGReg arg1, tcg_target_long arg2, bool c2)
170
+{
171
+ int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2);
172
+
173
+ if (tmpflags != ret) {
174
+ TCGReg tmp = tmpflags & ~SETCOND_FLAGS;
175
+
176
+ switch (tmpflags & SETCOND_FLAGS) {
177
+ case SETCOND_INV:
178
+ /* Intermediate result is boolean: simply invert. */
179
+ tcg_out_opc_xori(s, ret, tmp, 1);
180
+ break;
181
+ case SETCOND_NEZ:
182
+ /* Intermediate result is zero/non-zero: test != 0. */
183
+ tcg_out_opc_sltu(s, ret, TCG_REG_ZERO, tmp);
184
+ break;
185
+ case SETCOND_NEZ | SETCOND_INV:
186
+ /* Intermediate result is zero/non-zero: test == 0. */
187
+ tcg_out_opc_sltui(s, ret, tmp, 1);
188
+ break;
189
+ default:
190
+ g_assert_not_reached();
191
+ }
192
+ }
36
}
193
}
37
194
195
/*
196
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
197
case INDEX_op_ctz_i64:
198
return C_O1_I2(r, r, rW);
199
200
- case INDEX_op_setcond_i32:
201
- case INDEX_op_setcond_i64:
202
- return C_O1_I2(r, r, rZ);
203
-
204
case INDEX_op_deposit_i32:
205
case INDEX_op_deposit_i64:
206
/* Must deposit into the same register as input */
207
return C_O1_I2(r, 0, rZ);
208
209
case INDEX_op_sub_i32:
210
+ case INDEX_op_setcond_i32:
211
return C_O1_I2(r, rZ, ri);
212
case INDEX_op_sub_i64:
213
+ case INDEX_op_setcond_i64:
214
return C_O1_I2(r, rZ, rJ);
215
216
case INDEX_op_mul_i32:
38
--
217
--
39
2.25.1
218
2.34.1
40
41
diff view generated by jsdifflib
1
GDB single-stepping is now handled generically.
1
Reviewed-by: WANG Xuerui <git@xen0n.name>
2
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
3
---
6
target/hppa/translate.c | 17 ++++-------------
4
tcg/loongarch64/tcg-target-con-set.h | 1 +
7
1 file changed, 4 insertions(+), 13 deletions(-)
5
tcg/loongarch64/tcg-target.h | 4 ++--
6
tcg/loongarch64/tcg-target.c.inc | 33 ++++++++++++++++++++++++++++
7
3 files changed, 36 insertions(+), 2 deletions(-)
8
8
9
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
9
diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h
10
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/hppa/translate.c
11
--- a/tcg/loongarch64/tcg-target-con-set.h
12
+++ b/target/hppa/translate.c
12
+++ b/tcg/loongarch64/tcg-target-con-set.h
13
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *ctx, int which,
13
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, 0, rZ)
14
} else {
14
C_O1_I2(r, rZ, ri)
15
copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
15
C_O1_I2(r, rZ, rJ)
16
copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
16
C_O1_I2(r, rZ, rZ)
17
- if (ctx->base.singlestep_enabled) {
17
+C_O1_I4(r, rZ, rJ, rZ, rZ)
18
- gen_excp_1(EXCP_DEBUG);
18
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
19
- } else {
19
index XXXXXXX..XXXXXXX 100644
20
- tcg_gen_lookup_and_goto_ptr();
20
--- a/tcg/loongarch64/tcg-target.h
21
- }
21
+++ b/tcg/loongarch64/tcg-target.h
22
+ tcg_gen_lookup_and_goto_ptr();
22
@@ -XXX,XX +XXX,XX @@ typedef enum {
23
#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
24
25
/* optional instructions */
26
-#define TCG_TARGET_HAS_movcond_i32 0
27
+#define TCG_TARGET_HAS_movcond_i32 1
28
#define TCG_TARGET_HAS_div_i32 1
29
#define TCG_TARGET_HAS_rem_i32 1
30
#define TCG_TARGET_HAS_div2_i32 0
31
@@ -XXX,XX +XXX,XX @@ typedef enum {
32
#define TCG_TARGET_HAS_qemu_st8_i32 0
33
34
/* 64-bit operations */
35
-#define TCG_TARGET_HAS_movcond_i64 0
36
+#define TCG_TARGET_HAS_movcond_i64 1
37
#define TCG_TARGET_HAS_div_i64 1
38
#define TCG_TARGET_HAS_rem_i64 1
39
#define TCG_TARGET_HAS_div2_i64 0
40
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
41
index XXXXXXX..XXXXXXX 100644
42
--- a/tcg/loongarch64/tcg-target.c.inc
43
+++ b/tcg/loongarch64/tcg-target.c.inc
44
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
23
}
45
}
24
}
46
}
25
47
26
@@ -XXX,XX +XXX,XX @@ static bool do_rfi(DisasContext *ctx, bool rfi_r)
48
+static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
27
gen_helper_rfi(cpu_env);
49
+ TCGReg c1, tcg_target_long c2, bool const2,
50
+ TCGReg v1, TCGReg v2)
51
+{
52
+ int tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, c1, c2, const2);
53
+ TCGReg t;
54
+
55
+ /* Standardize the test below to t != 0. */
56
+ if (tmpflags & SETCOND_INV) {
57
+ t = v1, v1 = v2, v2 = t;
58
+ }
59
+
60
+ t = tmpflags & ~SETCOND_FLAGS;
61
+ if (v1 == TCG_REG_ZERO) {
62
+ tcg_out_opc_masknez(s, ret, v2, t);
63
+ } else if (v2 == TCG_REG_ZERO) {
64
+ tcg_out_opc_maskeqz(s, ret, v1, t);
65
+ } else {
66
+ tcg_out_opc_masknez(s, TCG_REG_TMP2, v2, t); /* t ? 0 : v2 */
67
+ tcg_out_opc_maskeqz(s, TCG_REG_TMP1, v1, t); /* t ? v1 : 0 */
68
+ tcg_out_opc_or(s, ret, TCG_REG_TMP1, TCG_REG_TMP2);
69
+ }
70
+}
71
+
72
/*
73
* Branch helpers
74
*/
75
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
76
tcg_out_setcond(s, args[3], a0, a1, a2, c2);
77
break;
78
79
+ case INDEX_op_movcond_i32:
80
+ case INDEX_op_movcond_i64:
81
+ tcg_out_movcond(s, args[5], a0, a1, a2, c2, args[3], args[4]);
82
+ break;
83
+
84
case INDEX_op_ld8s_i32:
85
case INDEX_op_ld8s_i64:
86
tcg_out_ldst(s, OPC_LD_B, a0, a1, a2);
87
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
88
case INDEX_op_remu_i64:
89
return C_O1_I2(r, rZ, rZ);
90
91
+ case INDEX_op_movcond_i32:
92
+ case INDEX_op_movcond_i64:
93
+ return C_O1_I4(r, rZ, rJ, rZ, rZ);
94
+
95
default:
96
g_assert_not_reached();
28
}
97
}
29
/* Exit the TB to recognize new interrupts. */
30
- if (ctx->base.singlestep_enabled) {
31
- gen_excp_1(EXCP_DEBUG);
32
- } else {
33
- tcg_gen_exit_tb(NULL, 0);
34
- }
35
+ tcg_gen_exit_tb(NULL, 0);
36
ctx->base.is_jmp = DISAS_NORETURN;
37
38
return nullify_end(ctx);
39
@@ -XXX,XX +XXX,XX @@ static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
40
nullify_save(ctx);
41
/* FALLTHRU */
42
case DISAS_IAQ_N_UPDATED:
43
- if (ctx->base.singlestep_enabled) {
44
- gen_excp_1(EXCP_DEBUG);
45
- } else if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
46
+ if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
47
tcg_gen_lookup_and_goto_ptr();
48
+ break;
49
}
50
/* FALLTHRU */
51
case DISAS_EXIT:
52
--
98
--
53
2.25.1
99
2.34.1
54
55
diff view generated by jsdifflib
1
As per an ancient comment in mips_tr_translate_insn about the
1
Take the w^x split into account when computing the
2
expectations of gdb, when restarting the insn in a delay slot
2
pc-relative distance to an absolute pointer.
3
we also re-execute the branch. Which means that we are
4
expected to execute two insns in this case.
5
3
6
This has been broken since 8b86d6d2580, where we forced max_insns
4
Reviewed-by: WANG Xuerui <git@xen0n.name>
7
to 1 while single-stepping. This resulted in an exit from the
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
translator loop after the branch but before the delay slot is
9
translated.
10
11
Increase the max_insns to 2 for this case. In addition, bypass
12
the end-of-page check, for when the branch itself ends the page.
13
14
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
15
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
16
---
7
---
17
target/mips/tcg/translate.c | 25 ++++++++++++++++---------
8
tcg/loongarch64/tcg-target.c.inc | 2 +-
18
1 file changed, 16 insertions(+), 9 deletions(-)
9
1 file changed, 1 insertion(+), 1 deletion(-)
19
10
20
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
11
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
21
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
22
--- a/target/mips/tcg/translate.c
13
--- a/tcg/loongarch64/tcg-target.c.inc
23
+++ b/target/mips/tcg/translate.c
14
+++ b/tcg/loongarch64/tcg-target.c.inc
24
@@ -XXX,XX +XXX,XX @@ static void mips_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
15
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ldst(TCGContext *s, LoongArchInsn opc, TCGReg data,
25
ctx->default_tcg_memop_mask = (ctx->insn_flags & (ISA_MIPS_R6 |
16
intptr_t imm12 = sextreg(offset, 0, 12);
26
INSN_LOONGSON3A)) ? MO_UNALN : MO_ALIGN;
17
27
18
if (offset != imm12) {
28
+ /*
19
- intptr_t diff = offset - (uintptr_t)s->code_ptr;
29
+ * Execute a branch and its delay slot as a single instruction.
20
+ intptr_t diff = tcg_pcrel_diff(s, (void *)offset);
30
+ * This is what GDB expects and is consistent with what the
21
31
+ * hardware does (e.g. if a delay slot instruction faults, the
22
if (addr == TCG_REG_ZERO && diff == (int32_t)diff) {
32
+ * reported PC is the PC of the branch).
23
imm12 = sextreg(diff, 0, 12);
33
+ */
34
+ if (ctx->base.singlestep_enabled && (ctx->hflags & MIPS_HFLAG_BMASK)) {
35
+ ctx->base.max_insns = 2;
36
+ }
37
+
38
LOG_DISAS("\ntb %p idx %d hflags %04x\n", ctx->base.tb, ctx->mem_idx,
39
ctx->hflags);
40
}
41
@@ -XXX,XX +XXX,XX @@ static void mips_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
42
if (ctx->base.is_jmp != DISAS_NEXT) {
43
return;
44
}
45
+
46
/*
47
- * Execute a branch and its delay slot as a single instruction.
48
- * This is what GDB expects and is consistent with what the
49
- * hardware does (e.g. if a delay slot instruction faults, the
50
- * reported PC is the PC of the branch).
51
+ * End the TB on (most) page crossings.
52
+ * See mips_tr_init_disas_context about single-stepping a branch
53
+ * together with its delay slot.
54
*/
55
- if (ctx->base.singlestep_enabled &&
56
- (ctx->hflags & MIPS_HFLAG_BMASK) == 0) {
57
- ctx->base.is_jmp = DISAS_TOO_MANY;
58
- }
59
- if (ctx->base.pc_next - ctx->page_start >= TARGET_PAGE_SIZE) {
60
+ if (ctx->base.pc_next - ctx->page_start >= TARGET_PAGE_SIZE
61
+ && !ctx->base.singlestep_enabled) {
62
ctx->base.is_jmp = DISAS_TOO_MANY;
63
}
64
}
65
--
24
--
66
2.25.1
25
2.34.1
67
26
68
27
diff view generated by jsdifflib
1
GDB single-stepping is now handled generically.
1
The old implementation replaces two insns, swapping between
2
2
3
b <dest>
4
nop
5
and
6
pcaddu18i tmp, <dest>
7
jirl zero, tmp, <dest> & 0xffff
8
9
There is a race condition in which a thread could be stopped at
10
the jirl, i.e. with the top of the address loaded, and when
11
restarted we have re-linked to a different TB, so that the top
12
half no longer matches the bottom half.
13
14
Note that while we never directly re-link to a different TB, we
15
can link, unlink, and link again all while the stopped thread
16
remains stopped.
17
18
The new implementation replaces only one insn, swapping between
19
20
b <dest>
21
and
22
pcadd tmp, <jmp_addr>
23
24
falling through to load the address from tmp, and branch.
25
26
Reviewed-by: WANG Xuerui <git@xen0n.name>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
27
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
28
---
5
target/arm/translate-a64.c | 10 ++--------
29
tcg/loongarch64/tcg-target.h | 7 +---
6
target/arm/translate.c | 36 ++++++------------------------------
30
tcg/loongarch64/tcg-target.c.inc | 72 ++++++++++++++------------------
7
2 files changed, 8 insertions(+), 38 deletions(-)
31
2 files changed, 33 insertions(+), 46 deletions(-)
8
32
9
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
33
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
10
index XXXXXXX..XXXXXXX 100644
34
index XXXXXXX..XXXXXXX 100644
11
--- a/target/arm/translate-a64.c
35
--- a/tcg/loongarch64/tcg-target.h
12
+++ b/target/arm/translate-a64.c
36
+++ b/tcg/loongarch64/tcg-target.h
13
@@ -XXX,XX +XXX,XX @@ static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
37
@@ -XXX,XX +XXX,XX @@
14
gen_a64_set_pc_im(dest);
38
15
if (s->ss_active) {
39
#define TCG_TARGET_INSN_UNIT_SIZE 4
16
gen_step_complete_exception(s);
40
#define TCG_TARGET_NB_REGS 32
17
- } else if (s->base.singlestep_enabled) {
41
-/*
18
- gen_exception_internal(EXCP_DEBUG);
42
- * PCADDU18I + JIRL sequence can give 20 + 16 + 2 = 38 bits
19
} else {
43
- * signed offset, which is +/- 128 GiB.
20
tcg_gen_lookup_and_goto_ptr();
44
- */
21
s->base.is_jmp = DISAS_NORETURN;
45
-#define MAX_CODE_GEN_BUFFER_SIZE (128 * GiB)
22
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
46
+
23
{
47
+#define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
24
DisasContext *dc = container_of(dcbase, DisasContext, base);
48
25
49
typedef enum {
26
- if (unlikely(dc->base.singlestep_enabled || dc->ss_active)) {
50
TCG_REG_ZERO,
27
+ if (unlikely(dc->ss_active)) {
51
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
28
/* Note that this means single stepping WFI doesn't halt the CPU.
29
* For conditional branch insns this is harmless unreachable code as
30
* gen_goto_tb() has already handled emitting the debug exception
31
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
32
/* fall through */
33
case DISAS_EXIT:
34
case DISAS_JUMP:
35
- if (dc->base.singlestep_enabled) {
36
- gen_exception_internal(EXCP_DEBUG);
37
- } else {
38
- gen_step_complete_exception(dc);
39
- }
40
+ gen_step_complete_exception(dc);
41
break;
42
case DISAS_NORETURN:
43
break;
44
diff --git a/target/arm/translate.c b/target/arm/translate.c
45
index XXXXXXX..XXXXXXX 100644
52
index XXXXXXX..XXXXXXX 100644
46
--- a/target/arm/translate.c
53
--- a/tcg/loongarch64/tcg-target.c.inc
47
+++ b/target/arm/translate.c
54
+++ b/tcg/loongarch64/tcg-target.c.inc
48
@@ -XXX,XX +XXX,XX @@ static void gen_exception_internal(int excp)
55
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args)
49
tcg_temp_free_i32(tcg_excp);
56
#endif
50
}
57
}
51
58
52
-static void gen_step_complete_exception(DisasContext *s)
59
-/* LoongArch uses `andi zero, zero, 0` as NOP. */
53
+static void gen_singlestep_exception(DisasContext *s)
60
-#define NOP OPC_ANDI
54
{
61
-static void tcg_out_nop(TCGContext *s)
55
/* We just completed step of an insn. Move from Active-not-pending
56
* to Active-pending, and then also take the swstep exception.
57
@@ -XXX,XX +XXX,XX @@ static void gen_step_complete_exception(DisasContext *s)
58
s->base.is_jmp = DISAS_NORETURN;
59
}
60
61
-static void gen_singlestep_exception(DisasContext *s)
62
-{
62
-{
63
- /* Generate the right kind of exception for singlestep, which is
63
- tcg_out32(s, NOP);
64
- * either the architectural singlestep or EXCP_DEBUG for QEMU's
65
- * gdb singlestepping.
66
- */
67
- if (s->ss_active) {
68
- gen_step_complete_exception(s);
69
- } else {
70
- gen_exception_internal(EXCP_DEBUG);
71
- }
72
-}
64
-}
73
-
65
-
74
-static inline bool is_singlestepping(DisasContext *s)
66
-void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
67
- uintptr_t jmp_rx, uintptr_t jmp_rw)
75
-{
68
-{
76
- /* Return true if we are singlestepping either because of
69
- tcg_insn_unit i1, i2;
77
- * architectural singlestep or QEMU gdbstub singlestep. This does
70
- ptrdiff_t upper, lower;
78
- * not include the command line '-singlestep' mode which is rather
71
- uintptr_t addr = tb->jmp_target_addr[n];
79
- * misnamed as it only means "one instruction per TB" and doesn't
72
- ptrdiff_t offset = (ptrdiff_t)(addr - jmp_rx) >> 2;
80
- * affect the code we generate.
73
-
81
- */
74
- if (offset == sextreg(offset, 0, 26)) {
82
- return s->base.singlestep_enabled || s->ss_active;
75
- i1 = encode_sd10k16_insn(OPC_B, offset);
76
- i2 = NOP;
77
- } else {
78
- tcg_debug_assert(offset == sextreg(offset, 0, 36));
79
- lower = (int16_t)offset;
80
- upper = (offset - lower) >> 16;
81
-
82
- i1 = encode_dsj20_insn(OPC_PCADDU18I, TCG_REG_TMP0, upper);
83
- i2 = encode_djsk16_insn(OPC_JIRL, TCG_REG_ZERO, TCG_REG_TMP0, lower);
84
- }
85
- uint64_t pair = ((uint64_t)i2 << 32) | i1;
86
- qatomic_set((uint64_t *)jmp_rw, pair);
87
- flush_idcache_range(jmp_rx, jmp_rw, 8);
83
-}
88
-}
84
-
89
-
85
void clear_eci_state(DisasContext *s)
90
/*
91
* Entry-points
92
*/
93
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
94
static void tcg_out_goto_tb(TCGContext *s, int which)
86
{
95
{
87
/*
96
/*
88
@@ -XXX,XX +XXX,XX @@ static inline void gen_bx_excret_final_code(DisasContext *s)
97
- * Ensure that patch area is 8-byte aligned so that an
89
/* Is the new PC value in the magic range indicating exception return? */
98
- * atomic write can be used to patch the target address.
90
tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
99
+ * Direct branch, or load indirect address, to be patched
91
/* No: end the TB as we would for a DISAS_JMP */
100
+ * by tb_target_set_jmp_target. Check indirect load offset
92
- if (is_singlestepping(s)) {
101
+ * in range early, regardless of direct branch distance,
93
+ if (s->ss_active) {
102
+ * via assert within tcg_out_opc_pcaddu2i.
94
gen_singlestep_exception(s);
103
*/
95
} else {
104
- if ((uintptr_t)s->code_ptr & 7) {
96
tcg_gen_exit_tb(NULL, 0);
105
- tcg_out_nop(s);
97
@@ -XXX,XX +XXX,XX @@ static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
106
- }
98
/* Jump, specifying which TB number to use if we gen_goto_tb() */
107
+ uintptr_t i_addr = get_jmp_target_addr(s, which);
99
static inline void gen_jmp_tb(DisasContext *s, uint32_t dest, int tbno)
108
+ intptr_t i_disp = tcg_pcrel_diff(s, (void *)i_addr);
100
{
109
+
101
- if (unlikely(is_singlestepping(s))) {
110
set_jmp_insn_offset(s, which);
102
+ if (unlikely(s->ss_active)) {
111
- /*
103
/* An indirect jump so that we still trigger the debug exception. */
112
- * actual branch destination will be patched by
104
gen_set_pc_im(s, dest);
113
- * tb_target_set_jmp_target later
105
s->base.is_jmp = DISAS_JUMP;
114
- */
106
@@ -XXX,XX +XXX,XX @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
115
- tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, 0);
107
dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
116
+ tcg_out_opc_pcaddu2i(s, TCG_REG_TMP0, i_disp >> 2);
108
117
+
109
/* If architectural single step active, limit to 1. */
118
+ /* Finish the load and indirect branch. */
110
- if (is_singlestepping(dc)) {
119
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_TMP0, 0);
111
+ if (dc->ss_active) {
120
tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0);
112
dc->base.max_insns = 1;
121
set_jmp_reset_offset(s, which);
113
}
122
}
114
123
115
@@ -XXX,XX +XXX,XX @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
124
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
116
* insn codepath itself.
125
+ uintptr_t jmp_rx, uintptr_t jmp_rw)
117
*/
126
+{
118
gen_bx_excret_final_code(dc);
127
+ uintptr_t d_addr = tb->jmp_target_addr[n];
119
- } else if (unlikely(is_singlestepping(dc))) {
128
+ ptrdiff_t d_disp = (ptrdiff_t)(d_addr - jmp_rx) >> 2;
120
+ } else if (unlikely(dc->ss_active)) {
129
+ tcg_insn_unit insn;
121
/* Unconditional and "condition passed" instruction codepath. */
130
+
122
switch (dc->base.is_jmp) {
131
+ /* Either directly branch, or load slot address for indirect branch. */
123
case DISAS_SWI:
132
+ if (d_disp == sextreg(d_disp, 0, 26)) {
124
@@ -XXX,XX +XXX,XX @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
133
+ insn = encode_sd10k16_insn(OPC_B, d_disp);
125
/* "Condition failed" instruction codepath for the branch/trap insn */
134
+ } else {
126
gen_set_label(dc->condlabel);
135
+ uintptr_t i_addr = (uintptr_t)&tb->jmp_target_addr[n];
127
gen_set_condexec(dc);
136
+ intptr_t i_disp = i_addr - jmp_rx;
128
- if (unlikely(is_singlestepping(dc))) {
137
+ insn = encode_dsj20_insn(OPC_PCADDU2I, TCG_REG_TMP0, i_disp >> 2);
129
+ if (unlikely(dc->ss_active)) {
138
+ }
130
gen_set_pc_im(dc, dc->base.pc_next);
139
+
131
gen_singlestep_exception(dc);
140
+ qatomic_set((tcg_insn_unit *)jmp_rw, insn);
132
} else {
141
+ flush_idcache_range(jmp_rx, jmp_rw, 4);
142
+}
143
+
144
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
145
const TCGArg args[TCG_MAX_OP_ARGS],
146
const int const_args[TCG_MAX_OP_ARGS])
133
--
147
--
134
2.25.1
148
2.34.1
135
136
diff view generated by jsdifflib
Deleted patch
1
We were using singlestep_enabled as a proxy for whether
2
translator_use_goto_tb would always return false.
3
1
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/i386/tcg/translate.c | 5 +++--
7
1 file changed, 3 insertions(+), 2 deletions(-)
8
9
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/i386/tcg/translate.c
12
+++ b/target/i386/tcg/translate.c
13
@@ -XXX,XX +XXX,XX @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
14
DisasContext *dc = container_of(dcbase, DisasContext, base);
15
CPUX86State *env = cpu->env_ptr;
16
uint32_t flags = dc->base.tb->flags;
17
+ uint32_t cflags = tb_cflags(dc->base.tb);
18
int cpl = (flags >> HF_CPL_SHIFT) & 3;
19
int iopl = (flags >> IOPL_SHIFT) & 3;
20
21
@@ -XXX,XX +XXX,XX @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
22
dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
23
dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
24
dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
25
- dc->jmp_opt = !(dc->base.singlestep_enabled ||
26
+ dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) ||
27
(flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)));
28
/*
29
* If jmp_opt, we want to handle each string instruction individually.
30
* For icount also disable repz optimization so that each iteration
31
* is accounted separately.
32
*/
33
- dc->repz_opt = !dc->jmp_opt && !(tb_cflags(dc->base.tb) & CF_USE_ICOUNT);
34
+ dc->repz_opt = !dc->jmp_opt && !(cflags & CF_USE_ICOUNT);
35
36
dc->T0 = tcg_temp_new();
37
dc->T1 = tcg_temp_new();
38
--
39
2.25.1
40
41
diff view generated by jsdifflib
Deleted patch
1
GDB single-stepping is now handled generically.
2
1
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
target/i386/helper.h | 1 -
6
target/i386/tcg/misc_helper.c | 8 --------
7
target/i386/tcg/translate.c | 4 +---
8
3 files changed, 1 insertion(+), 12 deletions(-)
9
10
diff --git a/target/i386/helper.h b/target/i386/helper.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/target/i386/helper.h
13
+++ b/target/i386/helper.h
14
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_2(syscall, void, env, int)
15
DEF_HELPER_2(sysret, void, env, int)
16
#endif
17
DEF_HELPER_FLAGS_2(pause, TCG_CALL_NO_WG, noreturn, env, int)
18
-DEF_HELPER_FLAGS_1(debug, TCG_CALL_NO_WG, noreturn, env)
19
DEF_HELPER_1(reset_rf, void, env)
20
DEF_HELPER_FLAGS_3(raise_interrupt, TCG_CALL_NO_WG, noreturn, env, int, int)
21
DEF_HELPER_FLAGS_2(raise_exception, TCG_CALL_NO_WG, noreturn, env, int)
22
diff --git a/target/i386/tcg/misc_helper.c b/target/i386/tcg/misc_helper.c
23
index XXXXXXX..XXXXXXX 100644
24
--- a/target/i386/tcg/misc_helper.c
25
+++ b/target/i386/tcg/misc_helper.c
26
@@ -XXX,XX +XXX,XX @@ void QEMU_NORETURN helper_pause(CPUX86State *env, int next_eip_addend)
27
do_pause(env);
28
}
29
30
-void QEMU_NORETURN helper_debug(CPUX86State *env)
31
-{
32
- CPUState *cs = env_cpu(env);
33
-
34
- cs->exception_index = EXCP_DEBUG;
35
- cpu_loop_exit(cs);
36
-}
37
-
38
uint64_t helper_rdpkru(CPUX86State *env, uint32_t ecx)
39
{
40
if ((env->cr[4] & CR4_PKE_MASK) == 0) {
41
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/target/i386/tcg/translate.c
44
+++ b/target/i386/tcg/translate.c
45
@@ -XXX,XX +XXX,XX @@ do_gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf, bool jr)
46
if (s->base.tb->flags & HF_RF_MASK) {
47
gen_helper_reset_rf(cpu_env);
48
}
49
- if (s->base.singlestep_enabled) {
50
- gen_helper_debug(cpu_env);
51
- } else if (recheck_tf) {
52
+ if (recheck_tf) {
53
gen_helper_rechecking_single_step(cpu_env);
54
tcg_gen_exit_tb(NULL, 0);
55
} else if (s->flags & HF_TF_MASK) {
56
--
57
2.25.1
58
59
diff view generated by jsdifflib
Deleted patch
1
We were using singlestep_enabled as a proxy for whether
2
translator_use_goto_tb would always return false.
3
1
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/microblaze/translate.c | 4 ++--
7
1 file changed, 2 insertions(+), 2 deletions(-)
8
9
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/microblaze/translate.c
12
+++ b/target/microblaze/translate.c
13
@@ -XXX,XX +XXX,XX @@ static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
14
break;
15
16
case DISAS_JUMP:
17
- if (dc->jmp_dest != -1 && !cs->singlestep_enabled) {
18
+ if (dc->jmp_dest != -1 && !(tb_cflags(dc->base.tb) & CF_NO_GOTO_TB)) {
19
/* Direct jump. */
20
tcg_gen_discard_i32(cpu_btarget);
21
22
@@ -XXX,XX +XXX,XX @@ static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
23
return;
24
}
25
26
- /* Indirect jump (or direct jump w/ singlestep) */
27
+ /* Indirect jump (or direct jump w/ goto_tb disabled) */
28
tcg_gen_mov_i32(cpu_pc, cpu_btarget);
29
tcg_gen_discard_i32(cpu_btarget);
30
31
--
32
2.25.1
33
34
diff view generated by jsdifflib
Deleted patch
1
GDB single-stepping is now handled generically.
2
1
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
target/microblaze/translate.c | 14 ++------------
6
1 file changed, 2 insertions(+), 12 deletions(-)
7
8
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/target/microblaze/translate.c
11
+++ b/target/microblaze/translate.c
12
@@ -XXX,XX +XXX,XX @@ static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
13
14
static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
15
{
16
- if (dc->base.singlestep_enabled) {
17
- TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
18
- tcg_gen_movi_i32(cpu_pc, dest);
19
- gen_helper_raise_exception(cpu_env, tmp);
20
- tcg_temp_free_i32(tmp);
21
- } else if (translator_use_goto_tb(&dc->base, dest)) {
22
+ if (translator_use_goto_tb(&dc->base, dest)) {
23
tcg_gen_goto_tb(n);
24
tcg_gen_movi_i32(cpu_pc, dest);
25
tcg_gen_exit_tb(dc->base.tb, n);
26
@@ -XXX,XX +XXX,XX @@ static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
27
/* Indirect jump (or direct jump w/ goto_tb disabled) */
28
tcg_gen_mov_i32(cpu_pc, cpu_btarget);
29
tcg_gen_discard_i32(cpu_btarget);
30
-
31
- if (unlikely(cs->singlestep_enabled)) {
32
- gen_raise_exception(dc, EXCP_DEBUG);
33
- } else {
34
- tcg_gen_lookup_and_goto_ptr();
35
- }
36
+ tcg_gen_lookup_and_goto_ptr();
37
return;
38
39
default:
40
--
41
2.25.1
42
43
diff view generated by jsdifflib