1
TCG patch queue, plus one target/sh4 patch that
1
The following changes since commit a9fe9e191b4305b88c356a1ed9ac3baf89eb18aa:
2
Yoshinori Sato asked me to process.
3
2
4
3
Merge tag 'pull-riscv-to-apply-20230505-1' of https://github.com/alistair23/qemu into staging (2023-05-05 09:25:13 +0100)
5
r~
6
7
8
The following changes since commit efbf38d73e5dcc4d5f8b98c6e7a12be1f3b91745:
9
10
Merge tag 'for-upstream' of git://repo.or.cz/qemu/kevin into staging (2022-10-03 15:06:07 -0400)
11
4
12
are available in the Git repository at:
5
are available in the Git repository at:
13
6
14
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20221004
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20230505
15
8
16
for you to fetch changes up to ab419fd8a035a65942de4e63effcd55ccbf1a9fe:
9
for you to fetch changes up to 35a0bd63b458f30389b6bc6b7471c1665fe7b9d8:
17
10
18
target/sh4: Fix TB_FLAG_UNALIGN (2022-10-04 12:33:05 -0700)
11
tcg: Widen helper_*_st[bw]_mmu val arguments (2023-05-05 17:21:03 +0100)
19
12
20
----------------------------------------------------------------
13
----------------------------------------------------------------
21
Cache CPUClass for use in hot code paths.
14
softfloat: Fix the incorrect computation in float32_exp2
22
Add CPUTLBEntryFull, probe_access_full, tlb_set_page_full.
15
tcg: Remove compatability helpers for qemu ld/st
23
Add generic support for TARGET_TB_PCREL.
16
target/alpha: Remove TARGET_ALIGNED_ONLY
24
tcg/ppc: Optimize 26-bit jumps using STQ for POWER 2.07
17
target/hppa: Remove TARGET_ALIGNED_ONLY
25
target/sh4: Fix TB_FLAG_UNALIGN
18
target/sparc: Remove TARGET_ALIGNED_ONLY
19
tcg: Cleanups preparing to unify calls to qemu_ld/st helpers
26
20
27
----------------------------------------------------------------
21
----------------------------------------------------------------
28
Alex Bennée (3):
22
Richard Henderson (41):
29
cpu: cache CPUClass in CPUState for hot code paths
23
target/avr: Finish conversion to tcg_gen_qemu_{ld,st}_*
30
hw/core/cpu-sysemu: used cached class in cpu_asidx_from_attrs
24
target/cris: Finish conversion to tcg_gen_qemu_{ld,st}_*
31
cputlb: used cached CPUClass in our hot-paths
25
target/Hexagon: Finish conversion to tcg_gen_qemu_{ld, st}_*
26
target/m68k: Finish conversion to tcg_gen_qemu_{ld,st}_*
27
target/mips: Finish conversion to tcg_gen_qemu_{ld,st}_*
28
target/s390x: Finish conversion to tcg_gen_qemu_{ld, st}_*
29
target/sparc: Finish conversion to tcg_gen_qemu_{ld, st}_*
30
target/xtensa: Finish conversion to tcg_gen_qemu_{ld, st}_*
31
tcg: Remove compatability helpers for qemu ld/st
32
target/alpha: Use MO_ALIGN for system UNALIGN()
33
target/alpha: Use MO_ALIGN where required
34
target/alpha: Remove TARGET_ALIGNED_ONLY
35
target/hppa: Use MO_ALIGN for system UNALIGN()
36
target/hppa: Remove TARGET_ALIGNED_ONLY
37
target/sparc: Use MO_ALIGN where required
38
target/sparc: Use cpu_ld*_code_mmu
39
target/sparc: Remove TARGET_ALIGNED_ONLY
40
tcg/i386: Rationalize args to tcg_out_qemu_{ld,st}
41
tcg/i386: Generalize multi-part load overlap test
42
tcg/i386: Introduce HostAddress
43
tcg/i386: Drop r0+r1 local variables from tcg_out_tlb_load
44
tcg/i386: Introduce tcg_out_testi
45
tcg/aarch64: Rationalize args to tcg_out_qemu_{ld,st}
46
tcg/aarch64: Introduce HostAddress
47
tcg/arm: Rationalize args to tcg_out_qemu_{ld,st}
48
tcg/arm: Introduce HostAddress
49
tcg/loongarch64: Rationalize args to tcg_out_qemu_{ld,st}
50
tcg/loongarch64: Introduce HostAddress
51
tcg/mips: Rationalize args to tcg_out_qemu_{ld,st}
52
tcg/ppc: Rationalize args to tcg_out_qemu_{ld,st}
53
tcg/ppc: Introduce HostAddress
54
tcg/riscv: Require TCG_TARGET_REG_BITS == 64
55
tcg/riscv: Rationalize args to tcg_out_qemu_{ld,st}
56
tcg/s390x: Pass TCGType to tcg_out_qemu_{ld,st}
57
tcg/s390x: Introduce HostAddress
58
tcg/sparc64: Drop is_64 test from tcg_out_qemu_ld data return
59
tcg/sparc64: Pass TCGType to tcg_out_qemu_{ld,st}
60
tcg: Move TCGLabelQemuLdst to tcg.c
61
tcg: Replace REG_P with arg_loc_reg_p
62
tcg: Introduce arg_slot_stk_ofs
63
tcg: Widen helper_*_st[bw]_mmu val arguments
32
64
33
Leandro Lupori (1):
65
Shivaprasad G Bhat (1):
34
tcg/ppc: Optimize 26-bit jumps
66
softfloat: Fix the incorrect computation in float32_exp2
35
67
36
Richard Henderson (16):
68
configs/targets/alpha-linux-user.mak | 1 -
37
accel/tcg: Rename CPUIOTLBEntry to CPUTLBEntryFull
69
configs/targets/alpha-softmmu.mak | 1 -
38
accel/tcg: Drop addr member from SavedIOTLB
70
configs/targets/hppa-linux-user.mak | 1 -
39
accel/tcg: Suppress auto-invalidate in probe_access_internal
71
configs/targets/hppa-softmmu.mak | 1 -
40
accel/tcg: Introduce probe_access_full
72
configs/targets/sparc-linux-user.mak | 1 -
41
accel/tcg: Introduce tlb_set_page_full
73
configs/targets/sparc-softmmu.mak | 1 -
42
include/exec: Introduce TARGET_PAGE_ENTRY_EXTRA
74
configs/targets/sparc32plus-linux-user.mak | 1 -
43
accel/tcg: Remove PageDesc code_bitmap
75
configs/targets/sparc64-linux-user.mak | 1 -
44
accel/tcg: Use bool for page_find_alloc
76
configs/targets/sparc64-softmmu.mak | 1 -
45
accel/tcg: Use DisasContextBase in plugin_gen_tb_start
77
include/tcg/tcg-ldst.h | 10 +-
46
accel/tcg: Do not align tb->page_addr[0]
78
include/tcg/tcg-op.h | 55 -----
47
accel/tcg: Inline tb_flush_jmp_cache
79
target/hexagon/macros.h | 14 +-
48
include/hw/core: Create struct CPUJumpCache
80
tcg/riscv/tcg-target-con-set.h | 8 -
49
hw/core: Add CPUClass.get_pc
81
tcg/riscv/tcg-target.h | 22 +-
50
accel/tcg: Introduce tb_pc and log_pc
82
tcg/tcg-internal.h | 4 -
51
accel/tcg: Introduce TARGET_TB_PCREL
83
accel/tcg/cputlb.c | 6 +-
52
target/sh4: Fix TB_FLAG_UNALIGN
84
fpu/softfloat.c | 2 +-
53
85
target/alpha/translate.c | 38 +--
54
accel/tcg/internal.h | 10 ++
86
target/avr/translate.c | 16 +-
55
accel/tcg/tb-hash.h | 1 +
87
target/hexagon/genptr.c | 8 +-
56
accel/tcg/tb-jmp-cache.h | 65 ++++++++
88
target/hexagon/idef-parser/parser-helpers.c | 28 +--
57
include/exec/cpu-common.h | 1 +
89
target/hexagon/translate.c | 32 +--
58
include/exec/cpu-defs.h | 48 ++++--
90
target/hppa/translate.c | 2 +-
59
include/exec/exec-all.h | 75 ++++++++-
91
target/m68k/translate.c | 76 ++----
60
include/exec/plugin-gen.h | 7 +-
92
target/mips/tcg/translate.c | 8 +-
61
include/hw/core/cpu.h | 28 ++--
93
target/s390x/tcg/translate.c | 152 ++++++------
62
include/qemu/typedefs.h | 2 +
94
target/sparc/ldst_helper.c | 10 +-
63
include/tcg/tcg.h | 2 +-
95
target/sparc/translate.c | 85 ++++---
64
target/sh4/cpu.h | 56 ++++---
96
target/xtensa/translate.c | 4 +-
65
accel/stubs/tcg-stub.c | 4 +
97
tcg/tcg.c | 58 +++--
66
accel/tcg/cpu-exec.c | 80 +++++-----
98
target/cris/translate_v10.c.inc | 18 +-
67
accel/tcg/cputlb.c | 259 ++++++++++++++++++--------------
99
target/mips/tcg/nanomips_translate.c.inc | 2 +-
68
accel/tcg/plugin-gen.c | 22 +--
100
tcg/aarch64/tcg-target.c.inc | 108 ++++++---
69
accel/tcg/translate-all.c | 214 ++++++++++++--------------
101
tcg/arm/tcg-target.c.inc | 357 +++++++++++++---------------
70
accel/tcg/translator.c | 2 +-
102
tcg/i386/tcg-target.c.inc | 345 ++++++++++++++-------------
71
cpu.c | 9 +-
103
tcg/loongarch64/tcg-target.c.inc | 135 +++++------
72
hw/core/cpu-common.c | 3 +-
104
tcg/mips/tcg-target.c.inc | 186 ++++++++-------
73
hw/core/cpu-sysemu.c | 5 +-
105
tcg/ppc/tcg-target.c.inc | 192 ++++++++-------
74
linux-user/sh4/signal.c | 6 +-
106
tcg/riscv/tcg-target.c.inc | 268 ++++++---------------
75
plugins/core.c | 2 +-
107
tcg/s390x/tcg-target.c.inc | 131 +++++-----
76
target/alpha/cpu.c | 9 ++
108
tcg/sparc64/tcg-target.c.inc | 8 +-
77
target/arm/cpu.c | 17 ++-
109
tcg/tcg-ldst.c.inc | 14 --
78
target/arm/mte_helper.c | 14 +-
110
42 files changed, 1120 insertions(+), 1291 deletions(-)
79
target/arm/sve_helper.c | 4 +-
80
target/arm/translate-a64.c | 2 +-
81
target/avr/cpu.c | 10 +-
82
target/cris/cpu.c | 8 +
83
target/hexagon/cpu.c | 10 +-
84
target/hppa/cpu.c | 12 +-
85
target/i386/cpu.c | 9 ++
86
target/i386/tcg/tcg-cpu.c | 2 +-
87
target/loongarch/cpu.c | 11 +-
88
target/m68k/cpu.c | 8 +
89
target/microblaze/cpu.c | 10 +-
90
target/mips/cpu.c | 8 +
91
target/mips/tcg/exception.c | 2 +-
92
target/mips/tcg/sysemu/special_helper.c | 2 +-
93
target/nios2/cpu.c | 9 ++
94
target/openrisc/cpu.c | 10 +-
95
target/ppc/cpu_init.c | 8 +
96
target/riscv/cpu.c | 17 ++-
97
target/rx/cpu.c | 10 +-
98
target/s390x/cpu.c | 8 +
99
target/s390x/tcg/mem_helper.c | 4 -
100
target/sh4/cpu.c | 18 ++-
101
target/sh4/helper.c | 6 +-
102
target/sh4/translate.c | 90 +++++------
103
target/sparc/cpu.c | 10 +-
104
target/tricore/cpu.c | 11 +-
105
target/xtensa/cpu.c | 8 +
106
tcg/tcg.c | 8 +-
107
trace/control-target.c | 2 +-
108
tcg/ppc/tcg-target.c.inc | 119 +++++++++++----
109
55 files changed, 915 insertions(+), 462 deletions(-)
110
create mode 100644 accel/tcg/tb-jmp-cache.h
111
diff view generated by jsdifflib
New patch
1
From: Shivaprasad G Bhat <sbhat@linux.ibm.com>
1
2
3
The float32_exp2 function is computing wrong exponent of 2.
4
5
For example, with the following set of values {0.1, 2.0, 2.0, -1.0},
6
the expected output would be {1.071773, 4.000000, 4.000000, 0.500000}.
7
Instead, the function is computing {1.119102, 3.382044, 3.382044, -0.191022}
8
9
Looking at the code, the float32_exp2() attempts to do this
10
11
2 3 4 5 n
12
x x x x x x x
13
e = 1 + --- + --- + --- + --- + --- + ... + --- + ...
14
1! 2! 3! 4! 5! n!
15
16
But because of the typo it ends up doing
17
18
x x x x x x x
19
e = 1 + --- + --- + --- + --- + --- + ... + --- + ...
20
1! 2! 3! 4! 5! n!
21
22
This is because instead of the xnp which holds the numerator, parts_muladd
23
is using the xp which is just 'x'. Commit '572c4d862ff2' refactored this
24
function, and mistakenly used xp instead of xnp.
25
26
Cc: qemu-stable@nongnu.org
27
Fixes: 572c4d862ff2 "softfloat: Convert float32_exp2 to FloatParts"
28
Partially-Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1623
29
Reported-By: Luca Barbato (https://gitlab.com/lu-zero)
30
Signed-off-by: Shivaprasad G Bhat <sbhat@linux.ibm.com>
31
Signed-off-by: Vaibhav Jain <vaibhav@linux.ibm.com>
32
Message-Id: <168304110865.537992.13059030916325018670.stgit@localhost.localdomain>
33
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
34
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
35
---
36
fpu/softfloat.c | 2 +-
37
1 file changed, 1 insertion(+), 1 deletion(-)
38
39
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/fpu/softfloat.c
42
+++ b/fpu/softfloat.c
43
@@ -XXX,XX +XXX,XX @@ float32 float32_exp2(float32 a, float_status *status)
44
float64_unpack_canonical(&rp, float64_one, status);
45
for (i = 0 ; i < 15 ; i++) {
46
float64_unpack_canonical(&tp, float32_exp2_coefficients[i], status);
47
- rp = *parts_muladd(&tp, &xp, &rp, 0, status);
48
+ rp = *parts_muladd(&tp, &xnp, &rp, 0, status);
49
xnp = *parts_mul(&xnp, &xp, status);
50
}
51
52
--
53
2.34.1
diff view generated by jsdifflib
New patch
1
Convert away from the old interface with the implicit
2
MemOp argument.
1
3
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Anton Johansson <anjo@rev.ng>
6
Message-Id: <20230502135741.1158035-2-richard.henderson@linaro.org>
7
---
8
target/avr/translate.c | 16 ++++++++--------
9
1 file changed, 8 insertions(+), 8 deletions(-)
10
11
diff --git a/target/avr/translate.c b/target/avr/translate.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/avr/translate.c
14
+++ b/target/avr/translate.c
15
@@ -XXX,XX +XXX,XX @@ static void gen_data_store(DisasContext *ctx, TCGv data, TCGv addr)
16
if (ctx->base.tb->flags & TB_FLAGS_FULL_ACCESS) {
17
gen_helper_fullwr(cpu_env, data, addr);
18
} else {
19
- tcg_gen_qemu_st8(data, addr, MMU_DATA_IDX); /* mem[addr] = data */
20
+ tcg_gen_qemu_st_tl(data, addr, MMU_DATA_IDX, MO_UB);
21
}
22
}
23
24
@@ -XXX,XX +XXX,XX @@ static void gen_data_load(DisasContext *ctx, TCGv data, TCGv addr)
25
if (ctx->base.tb->flags & TB_FLAGS_FULL_ACCESS) {
26
gen_helper_fullrd(data, cpu_env, addr);
27
} else {
28
- tcg_gen_qemu_ld8u(data, addr, MMU_DATA_IDX); /* data = mem[addr] */
29
+ tcg_gen_qemu_ld_tl(data, addr, MMU_DATA_IDX, MO_UB);
30
}
31
}
32
33
@@ -XXX,XX +XXX,XX @@ static bool trans_LPM1(DisasContext *ctx, arg_LPM1 *a)
34
35
tcg_gen_shli_tl(addr, H, 8); /* addr = H:L */
36
tcg_gen_or_tl(addr, addr, L);
37
- tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */
38
+ tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB);
39
return true;
40
}
41
42
@@ -XXX,XX +XXX,XX @@ static bool trans_LPM2(DisasContext *ctx, arg_LPM2 *a)
43
44
tcg_gen_shli_tl(addr, H, 8); /* addr = H:L */
45
tcg_gen_or_tl(addr, addr, L);
46
- tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */
47
+ tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB);
48
return true;
49
}
50
51
@@ -XXX,XX +XXX,XX @@ static bool trans_LPMX(DisasContext *ctx, arg_LPMX *a)
52
53
tcg_gen_shli_tl(addr, H, 8); /* addr = H:L */
54
tcg_gen_or_tl(addr, addr, L);
55
- tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */
56
+ tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB);
57
tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */
58
tcg_gen_andi_tl(L, addr, 0xff);
59
tcg_gen_shri_tl(addr, addr, 8);
60
@@ -XXX,XX +XXX,XX @@ static bool trans_ELPM1(DisasContext *ctx, arg_ELPM1 *a)
61
TCGv Rd = cpu_r[0];
62
TCGv addr = gen_get_zaddr();
63
64
- tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */
65
+ tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB);
66
return true;
67
}
68
69
@@ -XXX,XX +XXX,XX @@ static bool trans_ELPM2(DisasContext *ctx, arg_ELPM2 *a)
70
TCGv Rd = cpu_r[a->rd];
71
TCGv addr = gen_get_zaddr();
72
73
- tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */
74
+ tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB);
75
return true;
76
}
77
78
@@ -XXX,XX +XXX,XX @@ static bool trans_ELPMX(DisasContext *ctx, arg_ELPMX *a)
79
TCGv Rd = cpu_r[a->rd];
80
TCGv addr = gen_get_zaddr();
81
82
- tcg_gen_qemu_ld8u(Rd, addr, MMU_CODE_IDX); /* Rd = mem[addr] */
83
+ tcg_gen_qemu_ld_tl(Rd, addr, MMU_CODE_IDX, MO_UB);
84
tcg_gen_addi_tl(addr, addr, 1); /* addr = addr + 1 */
85
gen_set_zaddr(addr);
86
return true;
87
--
88
2.34.1
diff view generated by jsdifflib
New patch
1
Convert away from the old interface with the implicit
2
MemOp argument. In this case we can fold the calls
3
using the size bits of MemOp.
1
4
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Anton Johansson <anjo@rev.ng>
7
Message-Id: <20230502135741.1158035-3-richard.henderson@linaro.org>
8
---
9
target/cris/translate_v10.c.inc | 18 ++++--------------
10
1 file changed, 4 insertions(+), 14 deletions(-)
11
12
diff --git a/target/cris/translate_v10.c.inc b/target/cris/translate_v10.c.inc
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/cris/translate_v10.c.inc
15
+++ b/target/cris/translate_v10.c.inc
16
@@ -XXX,XX +XXX,XX @@ static void gen_store_v10_conditional(DisasContext *dc, TCGv addr, TCGv val,
17
/* Store only if F flag isn't set */
18
tcg_gen_andi_tl(t1, cpu_PR[PR_CCS], F_FLAG_V10);
19
tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1);
20
- if (size == 1) {
21
- tcg_gen_qemu_st8(tval, taddr, mem_index);
22
- } else if (size == 2) {
23
- tcg_gen_qemu_st16(tval, taddr, mem_index);
24
- } else {
25
- tcg_gen_qemu_st32(tval, taddr, mem_index);
26
- }
27
+
28
+ tcg_gen_qemu_st_tl(tval, taddr, mem_index, ctz32(size) | MO_TE);
29
+
30
gen_set_label(l1);
31
tcg_gen_shri_tl(t1, t1, 1); /* shift F to P position */
32
tcg_gen_or_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], t1); /*P=F*/
33
@@ -XXX,XX +XXX,XX @@ static void gen_store_v10(DisasContext *dc, TCGv addr, TCGv val,
34
return;
35
}
36
37
- if (size == 1) {
38
- tcg_gen_qemu_st8(val, addr, mem_index);
39
- } else if (size == 2) {
40
- tcg_gen_qemu_st16(val, addr, mem_index);
41
- } else {
42
- tcg_gen_qemu_st32(val, addr, mem_index);
43
- }
44
+ tcg_gen_qemu_st_tl(val, addr, mem_index, ctz32(size) | MO_TE);
45
}
46
47
48
--
49
2.34.1
diff view generated by jsdifflib
New patch
1
Convert away from the old interface with the implicit
2
MemOp argument. Importantly, this removes some incorrect
3
casts generated by idef-parser's gen_load().
1
4
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Tested-by: Taylor Simpson <tsimpson@quicinc.com>
7
Reviewed-by: Taylor Simpson <tsimpson@quicinc.com>
8
Reviewed-by: Anton Johansson <anjo@rev.ng>
9
Message-Id: <20230502135741.1158035-4-richard.henderson@linaro.org>
10
---
11
target/hexagon/macros.h | 14 ++++-----
12
target/hexagon/genptr.c | 8 +++---
13
target/hexagon/idef-parser/parser-helpers.c | 28 +++++++++---------
14
target/hexagon/translate.c | 32 ++++++++++-----------
15
4 files changed, 40 insertions(+), 42 deletions(-)
16
17
diff --git a/target/hexagon/macros.h b/target/hexagon/macros.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/hexagon/macros.h
20
+++ b/target/hexagon/macros.h
21
@@ -XXX,XX +XXX,XX @@
22
#define MEM_LOAD1s(DST, VA) \
23
do { \
24
CHECK_NOSHUF(VA, 1); \
25
- tcg_gen_qemu_ld8s(DST, VA, ctx->mem_idx); \
26
+ tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_SB); \
27
} while (0)
28
#define MEM_LOAD1u(DST, VA) \
29
do { \
30
CHECK_NOSHUF(VA, 1); \
31
- tcg_gen_qemu_ld8u(DST, VA, ctx->mem_idx); \
32
+ tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_UB); \
33
} while (0)
34
#define MEM_LOAD2s(DST, VA) \
35
do { \
36
CHECK_NOSHUF(VA, 2); \
37
- tcg_gen_qemu_ld16s(DST, VA, ctx->mem_idx); \
38
+ tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_TESW); \
39
} while (0)
40
#define MEM_LOAD2u(DST, VA) \
41
do { \
42
CHECK_NOSHUF(VA, 2); \
43
- tcg_gen_qemu_ld16u(DST, VA, ctx->mem_idx); \
44
+ tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_TEUW); \
45
} while (0)
46
#define MEM_LOAD4s(DST, VA) \
47
do { \
48
CHECK_NOSHUF(VA, 4); \
49
- tcg_gen_qemu_ld32s(DST, VA, ctx->mem_idx); \
50
+ tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_TESL); \
51
} while (0)
52
#define MEM_LOAD4u(DST, VA) \
53
do { \
54
CHECK_NOSHUF(VA, 4); \
55
- tcg_gen_qemu_ld32s(DST, VA, ctx->mem_idx); \
56
+ tcg_gen_qemu_ld_tl(DST, VA, ctx->mem_idx, MO_TEUL); \
57
} while (0)
58
#define MEM_LOAD8u(DST, VA) \
59
do { \
60
CHECK_NOSHUF(VA, 8); \
61
- tcg_gen_qemu_ld64(DST, VA, ctx->mem_idx); \
62
+ tcg_gen_qemu_ld_i64(DST, VA, ctx->mem_idx, MO_TEUQ); \
63
} while (0)
64
65
#define MEM_STORE1_FUNC(X) \
66
diff --git a/target/hexagon/genptr.c b/target/hexagon/genptr.c
67
index XXXXXXX..XXXXXXX 100644
68
--- a/target/hexagon/genptr.c
69
+++ b/target/hexagon/genptr.c
70
@@ -XXX,XX +XXX,XX @@ void gen_set_byte_i64(int N, TCGv_i64 result, TCGv src)
71
72
static inline void gen_load_locked4u(TCGv dest, TCGv vaddr, int mem_index)
73
{
74
- tcg_gen_qemu_ld32u(dest, vaddr, mem_index);
75
+ tcg_gen_qemu_ld_tl(dest, vaddr, mem_index, MO_TEUL);
76
tcg_gen_mov_tl(hex_llsc_addr, vaddr);
77
tcg_gen_mov_tl(hex_llsc_val, dest);
78
}
79
80
static inline void gen_load_locked8u(TCGv_i64 dest, TCGv vaddr, int mem_index)
81
{
82
- tcg_gen_qemu_ld64(dest, vaddr, mem_index);
83
+ tcg_gen_qemu_ld_i64(dest, vaddr, mem_index, MO_TEUQ);
84
tcg_gen_mov_tl(hex_llsc_addr, vaddr);
85
tcg_gen_mov_i64(hex_llsc_val_i64, dest);
86
}
87
@@ -XXX,XX +XXX,XX @@ static void gen_load_frame(DisasContext *ctx, TCGv_i64 frame, TCGv EA)
88
{
89
Insn *insn = ctx->insn; /* Needed for CHECK_NOSHUF */
90
CHECK_NOSHUF(EA, 8);
91
- tcg_gen_qemu_ld64(frame, EA, ctx->mem_idx);
92
+ tcg_gen_qemu_ld_i64(frame, EA, ctx->mem_idx, MO_TEUQ);
93
}
94
95
static void gen_return(DisasContext *ctx, TCGv_i64 dst, TCGv src)
96
@@ -XXX,XX +XXX,XX @@ static void gen_vreg_load(DisasContext *ctx, intptr_t dstoff, TCGv src,
97
tcg_gen_andi_tl(src, src, ~((int32_t)sizeof(MMVector) - 1));
98
}
99
for (int i = 0; i < sizeof(MMVector) / 8; i++) {
100
- tcg_gen_qemu_ld64(tmp, src, ctx->mem_idx);
101
+ tcg_gen_qemu_ld_i64(tmp, src, ctx->mem_idx, MO_TEUQ);
102
tcg_gen_addi_tl(src, src, 8);
103
tcg_gen_st_i64(tmp, cpu_env, dstoff + i * 8);
104
}
105
diff --git a/target/hexagon/idef-parser/parser-helpers.c b/target/hexagon/idef-parser/parser-helpers.c
106
index XXXXXXX..XXXXXXX 100644
107
--- a/target/hexagon/idef-parser/parser-helpers.c
108
+++ b/target/hexagon/idef-parser/parser-helpers.c
109
@@ -XXX,XX +XXX,XX @@ void gen_load_cancel(Context *c, YYLTYPE *locp)
110
void gen_load(Context *c, YYLTYPE *locp, HexValue *width,
111
HexSignedness signedness, HexValue *ea, HexValue *dst)
112
{
113
- char size_suffix[4] = {0};
114
- const char *sign_suffix;
115
+ unsigned dst_bit_width;
116
+ unsigned src_bit_width;
117
+
118
/* Memop width is specified in the load macro */
119
assert_signedness(c, locp, signedness);
120
- sign_suffix = (width->imm.value > 4)
121
- ? ""
122
- : ((signedness == UNSIGNED) ? "u" : "s");
123
+
124
/* If dst is a variable, assert that is declared and load the type info */
125
if (dst->type == VARID) {
126
find_variable(c, locp, dst, dst);
127
}
128
129
- snprintf(size_suffix, 4, "%" PRIu64, width->imm.value * 8);
130
+ src_bit_width = width->imm.value * 8;
131
+ dst_bit_width = MAX(dst->bit_width, 32);
132
+
133
/* Lookup the effective address EA */
134
find_variable(c, locp, ea, ea);
135
OUT(c, locp, "if (insn->slot == 0 && pkt->pkt_has_store_s1) {\n");
136
OUT(c, locp, "probe_noshuf_load(", ea, ", ", width, ", ctx->mem_idx);\n");
137
OUT(c, locp, "process_store(ctx, 1);\n");
138
OUT(c, locp, "}\n");
139
- OUT(c, locp, "tcg_gen_qemu_ld", size_suffix, sign_suffix);
140
+
141
+ OUT(c, locp, "tcg_gen_qemu_ld_i", &dst_bit_width);
142
OUT(c, locp, "(");
143
- if (dst->bit_width > width->imm.value * 8) {
144
- /*
145
- * Cast to the correct TCG type if necessary, to avoid implict cast
146
- * warnings. This is needed when the width of the destination var is
147
- * larger than the size of the requested load.
148
- */
149
- OUT(c, locp, "(TCGv) ");
150
+ OUT(c, locp, dst, ", ", ea, ", ctx->mem_idx, MO_", &src_bit_width);
151
+ if (signedness == SIGNED) {
152
+ OUT(c, locp, " | MO_SIGN");
153
}
154
- OUT(c, locp, dst, ", ", ea, ", ctx->mem_idx);\n");
155
+ OUT(c, locp, " | MO_TE);\n");
156
}
157
158
void gen_store(Context *c, YYLTYPE *locp, HexValue *width, HexValue *ea,
159
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
160
index XXXXXXX..XXXXXXX 100644
161
--- a/target/hexagon/translate.c
162
+++ b/target/hexagon/translate.c
163
@@ -XXX,XX +XXX,XX @@ void process_store(DisasContext *ctx, int slot_num)
164
switch (ctx->store_width[slot_num]) {
165
case 1:
166
gen_check_store_width(ctx, slot_num);
167
- tcg_gen_qemu_st8(hex_store_val32[slot_num],
168
- hex_store_addr[slot_num],
169
- ctx->mem_idx);
170
+ tcg_gen_qemu_st_tl(hex_store_val32[slot_num],
171
+ hex_store_addr[slot_num],
172
+ ctx->mem_idx, MO_UB);
173
break;
174
case 2:
175
gen_check_store_width(ctx, slot_num);
176
- tcg_gen_qemu_st16(hex_store_val32[slot_num],
177
- hex_store_addr[slot_num],
178
- ctx->mem_idx);
179
+ tcg_gen_qemu_st_tl(hex_store_val32[slot_num],
180
+ hex_store_addr[slot_num],
181
+ ctx->mem_idx, MO_TEUW);
182
break;
183
case 4:
184
gen_check_store_width(ctx, slot_num);
185
- tcg_gen_qemu_st32(hex_store_val32[slot_num],
186
- hex_store_addr[slot_num],
187
- ctx->mem_idx);
188
+ tcg_gen_qemu_st_tl(hex_store_val32[slot_num],
189
+ hex_store_addr[slot_num],
190
+ ctx->mem_idx, MO_TEUL);
191
break;
192
case 8:
193
gen_check_store_width(ctx, slot_num);
194
- tcg_gen_qemu_st64(hex_store_val64[slot_num],
195
- hex_store_addr[slot_num],
196
- ctx->mem_idx);
197
+ tcg_gen_qemu_st_i64(hex_store_val64[slot_num],
198
+ hex_store_addr[slot_num],
199
+ ctx->mem_idx, MO_TEUQ);
200
break;
201
default:
202
{
203
@@ -XXX,XX +XXX,XX @@ static void process_dczeroa(DisasContext *ctx)
204
TCGv_i64 zero = tcg_constant_i64(0);
205
206
tcg_gen_andi_tl(addr, hex_dczero_addr, ~0x1f);
207
- tcg_gen_qemu_st64(zero, addr, ctx->mem_idx);
208
+ tcg_gen_qemu_st_i64(zero, addr, ctx->mem_idx, MO_UQ);
209
tcg_gen_addi_tl(addr, addr, 8);
210
- tcg_gen_qemu_st64(zero, addr, ctx->mem_idx);
211
+ tcg_gen_qemu_st_i64(zero, addr, ctx->mem_idx, MO_UQ);
212
tcg_gen_addi_tl(addr, addr, 8);
213
- tcg_gen_qemu_st64(zero, addr, ctx->mem_idx);
214
+ tcg_gen_qemu_st_i64(zero, addr, ctx->mem_idx, MO_UQ);
215
tcg_gen_addi_tl(addr, addr, 8);
216
- tcg_gen_qemu_st64(zero, addr, ctx->mem_idx);
217
+ tcg_gen_qemu_st_i64(zero, addr, ctx->mem_idx, MO_UQ);
218
}
219
}
220
221
--
222
2.34.1
diff view generated by jsdifflib
1
The value previously chosen overlaps GUSA_MASK.
1
Convert away from the old interface with the implicit
2
MemOp argument.
2
3
3
Rename all DELAY_SLOT_* and GUSA_* defines to emphasize
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
that they are included in TB_FLAGs. Add aliases for the
5
Reviewed-by: Anton Johansson <anjo@rev.ng>
5
FPSCR and SR bits that are included in TB_FLAGS, so that
6
Message-Id: <20230502135741.1158035-5-richard.henderson@linaro.org>
6
we don't accidentally reassign those bits.
7
---
8
target/m68k/translate.c | 76 ++++++++++++++---------------------------
9
1 file changed, 25 insertions(+), 51 deletions(-)
7
10
8
Fixes: 4da06fb3062 ("target/sh4: Implement prctl_unalign_sigbus")
11
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
9
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/856
10
Reviewed-by: Yoshinori Sato <ysato@users.sourceforge.jp>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
13
target/sh4/cpu.h | 56 +++++++++++++------------
14
linux-user/sh4/signal.c | 6 +--
15
target/sh4/cpu.c | 6 +--
16
target/sh4/helper.c | 6 +--
17
target/sh4/translate.c | 90 ++++++++++++++++++++++-------------------
18
5 files changed, 88 insertions(+), 76 deletions(-)
19
20
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
21
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
22
--- a/target/sh4/cpu.h
13
--- a/target/m68k/translate.c
23
+++ b/target/sh4/cpu.h
14
+++ b/target/m68k/translate.c
24
@@ -XXX,XX +XXX,XX @@
15
@@ -XXX,XX +XXX,XX @@ static inline void gen_addr_fault(DisasContext *s)
25
#define FPSCR_RM_NEAREST (0 << 0)
16
static inline TCGv gen_load(DisasContext *s, int opsize, TCGv addr,
26
#define FPSCR_RM_ZERO (1 << 0)
17
int sign, int index)
27
28
-#define DELAY_SLOT_MASK 0x7
29
-#define DELAY_SLOT (1 << 0)
30
-#define DELAY_SLOT_CONDITIONAL (1 << 1)
31
-#define DELAY_SLOT_RTE (1 << 2)
32
+#define TB_FLAG_DELAY_SLOT (1 << 0)
33
+#define TB_FLAG_DELAY_SLOT_COND (1 << 1)
34
+#define TB_FLAG_DELAY_SLOT_RTE (1 << 2)
35
+#define TB_FLAG_PENDING_MOVCA (1 << 3)
36
+#define TB_FLAG_GUSA_SHIFT 4 /* [11:4] */
37
+#define TB_FLAG_GUSA_EXCLUSIVE (1 << 12)
38
+#define TB_FLAG_UNALIGN (1 << 13)
39
+#define TB_FLAG_SR_FD (1 << SR_FD) /* 15 */
40
+#define TB_FLAG_FPSCR_PR FPSCR_PR /* 19 */
41
+#define TB_FLAG_FPSCR_SZ FPSCR_SZ /* 20 */
42
+#define TB_FLAG_FPSCR_FR FPSCR_FR /* 21 */
43
+#define TB_FLAG_SR_RB (1 << SR_RB) /* 29 */
44
+#define TB_FLAG_SR_MD (1 << SR_MD) /* 30 */
45
46
-#define TB_FLAG_PENDING_MOVCA (1 << 3)
47
-#define TB_FLAG_UNALIGN (1 << 4)
48
-
49
-#define GUSA_SHIFT 4
50
-#ifdef CONFIG_USER_ONLY
51
-#define GUSA_EXCLUSIVE (1 << 12)
52
-#define GUSA_MASK ((0xff << GUSA_SHIFT) | GUSA_EXCLUSIVE)
53
-#else
54
-/* Provide dummy versions of the above to allow tests against tbflags
55
- to be elided while avoiding ifdefs. */
56
-#define GUSA_EXCLUSIVE 0
57
-#define GUSA_MASK 0
58
-#endif
59
-
60
-#define TB_FLAG_ENVFLAGS_MASK (DELAY_SLOT_MASK | GUSA_MASK)
61
+#define TB_FLAG_DELAY_SLOT_MASK (TB_FLAG_DELAY_SLOT | \
62
+ TB_FLAG_DELAY_SLOT_COND | \
63
+ TB_FLAG_DELAY_SLOT_RTE)
64
+#define TB_FLAG_GUSA_MASK ((0xff << TB_FLAG_GUSA_SHIFT) | \
65
+ TB_FLAG_GUSA_EXCLUSIVE)
66
+#define TB_FLAG_FPSCR_MASK (TB_FLAG_FPSCR_PR | \
67
+ TB_FLAG_FPSCR_SZ | \
68
+ TB_FLAG_FPSCR_FR)
69
+#define TB_FLAG_SR_MASK (TB_FLAG_SR_FD | \
70
+ TB_FLAG_SR_RB | \
71
+ TB_FLAG_SR_MD)
72
+#define TB_FLAG_ENVFLAGS_MASK (TB_FLAG_DELAY_SLOT_MASK | \
73
+ TB_FLAG_GUSA_MASK)
74
75
typedef struct tlb_t {
76
uint32_t vpn;        /* virtual page number */
77
@@ -XXX,XX +XXX,XX @@ static inline int cpu_mmu_index (CPUSH4State *env, bool ifetch)
78
{
18
{
79
/* The instruction in a RTE delay slot is fetched in privileged
19
- TCGv tmp;
80
mode, but executed in user mode. */
20
- tmp = tcg_temp_new_i32();
81
- if (ifetch && (env->flags & DELAY_SLOT_RTE)) {
21
- switch(opsize) {
82
+ if (ifetch && (env->flags & TB_FLAG_DELAY_SLOT_RTE)) {
22
+ TCGv tmp = tcg_temp_new_i32();
83
return 0;
23
+
24
+ switch (opsize) {
25
case OS_BYTE:
26
- if (sign)
27
- tcg_gen_qemu_ld8s(tmp, addr, index);
28
- else
29
- tcg_gen_qemu_ld8u(tmp, addr, index);
30
- break;
31
case OS_WORD:
32
- if (sign)
33
- tcg_gen_qemu_ld16s(tmp, addr, index);
34
- else
35
- tcg_gen_qemu_ld16u(tmp, addr, index);
36
- break;
37
case OS_LONG:
38
- tcg_gen_qemu_ld32u(tmp, addr, index);
39
+ tcg_gen_qemu_ld_tl(tmp, addr, index,
40
+ opsize | (sign ? MO_SIGN : 0) | MO_TE);
41
break;
42
default:
43
g_assert_not_reached();
44
@@ -XXX,XX +XXX,XX @@ static inline TCGv gen_load(DisasContext *s, int opsize, TCGv addr,
45
static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val,
46
int index)
47
{
48
- switch(opsize) {
49
+ switch (opsize) {
50
case OS_BYTE:
51
- tcg_gen_qemu_st8(val, addr, index);
52
- break;
53
case OS_WORD:
54
- tcg_gen_qemu_st16(val, addr, index);
55
- break;
56
case OS_LONG:
57
- tcg_gen_qemu_st32(val, addr, index);
58
+ tcg_gen_qemu_st_tl(val, addr, index, opsize | MO_TE);
59
break;
60
default:
61
g_assert_not_reached();
62
@@ -XXX,XX +XXX,XX @@ static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
63
tmp = tcg_temp_new();
64
switch (opsize) {
65
case OS_BYTE:
66
- tcg_gen_qemu_ld8s(tmp, addr, index);
67
- gen_helper_exts32(cpu_env, fp, tmp);
68
- break;
69
case OS_WORD:
70
- tcg_gen_qemu_ld16s(tmp, addr, index);
71
- gen_helper_exts32(cpu_env, fp, tmp);
72
- break;
73
- case OS_LONG:
74
- tcg_gen_qemu_ld32u(tmp, addr, index);
75
+ tcg_gen_qemu_ld_tl(tmp, addr, index, opsize | MO_SIGN | MO_TE);
76
gen_helper_exts32(cpu_env, fp, tmp);
77
break;
78
case OS_SINGLE:
79
- tcg_gen_qemu_ld32u(tmp, addr, index);
80
+ tcg_gen_qemu_ld_tl(tmp, addr, index, MO_TEUL);
81
gen_helper_extf32(cpu_env, fp, tmp);
82
break;
83
case OS_DOUBLE:
84
- tcg_gen_qemu_ld64(t64, addr, index);
85
+ tcg_gen_qemu_ld_i64(t64, addr, index, MO_TEUQ);
86
gen_helper_extf64(cpu_env, fp, t64);
87
break;
88
case OS_EXTENDED:
89
@@ -XXX,XX +XXX,XX @@ static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
90
gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
91
break;
92
}
93
- tcg_gen_qemu_ld32u(tmp, addr, index);
94
+ tcg_gen_qemu_ld_i32(tmp, addr, index, MO_TEUL);
95
tcg_gen_shri_i32(tmp, tmp, 16);
96
tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
97
tcg_gen_addi_i32(tmp, addr, 4);
98
- tcg_gen_qemu_ld64(t64, tmp, index);
99
+ tcg_gen_qemu_ld_i64(t64, tmp, index, MO_TEUQ);
100
tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
101
break;
102
case OS_PACKED:
103
@@ -XXX,XX +XXX,XX @@ static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
104
tmp = tcg_temp_new();
105
switch (opsize) {
106
case OS_BYTE:
107
- gen_helper_reds32(tmp, cpu_env, fp);
108
- tcg_gen_qemu_st8(tmp, addr, index);
109
- break;
110
case OS_WORD:
111
- gen_helper_reds32(tmp, cpu_env, fp);
112
- tcg_gen_qemu_st16(tmp, addr, index);
113
- break;
114
case OS_LONG:
115
gen_helper_reds32(tmp, cpu_env, fp);
116
- tcg_gen_qemu_st32(tmp, addr, index);
117
+ tcg_gen_qemu_st_tl(tmp, addr, index, opsize | MO_TE);
118
break;
119
case OS_SINGLE:
120
gen_helper_redf32(tmp, cpu_env, fp);
121
- tcg_gen_qemu_st32(tmp, addr, index);
122
+ tcg_gen_qemu_st_tl(tmp, addr, index, MO_TEUL);
123
break;
124
case OS_DOUBLE:
125
gen_helper_redf64(t64, cpu_env, fp);
126
- tcg_gen_qemu_st64(t64, addr, index);
127
+ tcg_gen_qemu_st_i64(t64, addr, index, MO_TEUQ);
128
break;
129
case OS_EXTENDED:
130
if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
131
@@ -XXX,XX +XXX,XX @@ static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
132
}
133
tcg_gen_ld16u_i32(tmp, fp, offsetof(FPReg, l.upper));
134
tcg_gen_shli_i32(tmp, tmp, 16);
135
- tcg_gen_qemu_st32(tmp, addr, index);
136
+ tcg_gen_qemu_st_i32(tmp, addr, index, MO_TEUL);
137
tcg_gen_addi_i32(tmp, addr, 4);
138
tcg_gen_ld_i64(t64, fp, offsetof(FPReg, l.lower));
139
- tcg_gen_qemu_st64(t64, tmp, index);
140
+ tcg_gen_qemu_st_i64(t64, tmp, index, MO_TEUQ);
141
break;
142
case OS_PACKED:
143
/*
144
@@ -XXX,XX +XXX,XX @@ DISAS_INSN(movep)
145
if (insn & 0x80) {
146
for ( ; i > 0 ; i--) {
147
tcg_gen_shri_i32(dbuf, reg, (i - 1) * 8);
148
- tcg_gen_qemu_st8(dbuf, abuf, IS_USER(s));
149
+ tcg_gen_qemu_st_i32(dbuf, abuf, IS_USER(s), MO_UB);
150
if (i > 1) {
151
tcg_gen_addi_i32(abuf, abuf, 2);
152
}
153
}
84
} else {
154
} else {
85
return (env->sr & (1u << SR_MD)) == 0 ? 1 : 0;
155
for ( ; i > 0 ; i--) {
86
@@ -XXX,XX +XXX,XX @@ static inline void cpu_get_tb_cpu_state(CPUSH4State *env, target_ulong *pc,
156
- tcg_gen_qemu_ld8u(dbuf, abuf, IS_USER(s));
87
{
157
+ tcg_gen_qemu_ld_tl(dbuf, abuf, IS_USER(s), MO_UB);
88
*pc = env->pc;
158
tcg_gen_deposit_i32(reg, reg, dbuf, (i - 1) * 8, 8);
89
/* For a gUSA region, notice the end of the region. */
159
if (i > 1) {
90
- *cs_base = env->flags & GUSA_MASK ? env->gregs[0] : 0;
160
tcg_gen_addi_i32(abuf, abuf, 2);
91
- *flags = env->flags /* TB_FLAG_ENVFLAGS_MASK: bits 0-2, 4-12 */
161
@@ -XXX,XX +XXX,XX @@ static void m68k_copy_line(TCGv dst, TCGv src, int index)
92
- | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */
162
t1 = tcg_temp_new_i64();
93
- | (env->sr & ((1u << SR_MD) | (1u << SR_RB))) /* Bits 29-30 */
163
94
- | (env->sr & (1u << SR_FD)) /* Bit 15 */
164
tcg_gen_andi_i32(addr, src, ~15);
95
+ *cs_base = env->flags & TB_FLAG_GUSA_MASK ? env->gregs[0] : 0;
165
- tcg_gen_qemu_ld64(t0, addr, index);
96
+ *flags = env->flags
166
+ tcg_gen_qemu_ld_i64(t0, addr, index, MO_TEUQ);
97
+ | (env->fpscr & TB_FLAG_FPSCR_MASK)
167
tcg_gen_addi_i32(addr, addr, 8);
98
+ | (env->sr & TB_FLAG_SR_MASK)
168
- tcg_gen_qemu_ld64(t1, addr, index);
99
| (env->movcal_backup ? TB_FLAG_PENDING_MOVCA : 0); /* Bit 3 */
169
+ tcg_gen_qemu_ld_i64(t1, addr, index, MO_TEUQ);
100
#ifdef CONFIG_USER_ONLY
170
101
*flags |= TB_FLAG_UNALIGN * !env_cpu(env)->prctl_unalign_sigbus;
171
tcg_gen_andi_i32(addr, dst, ~15);
102
diff --git a/linux-user/sh4/signal.c b/linux-user/sh4/signal.c
172
- tcg_gen_qemu_st64(t0, addr, index);
103
index XXXXXXX..XXXXXXX 100644
173
+ tcg_gen_qemu_st_i64(t0, addr, index, MO_TEUQ);
104
--- a/linux-user/sh4/signal.c
174
tcg_gen_addi_i32(addr, addr, 8);
105
+++ b/linux-user/sh4/signal.c
175
- tcg_gen_qemu_st64(t1, addr, index);
106
@@ -XXX,XX +XXX,XX @@ static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc)
176
+ tcg_gen_qemu_st_i64(t1, addr, index, MO_TEUQ);
107
__get_user(regs->fpul, &sc->sc_fpul);
108
109
regs->tra = -1; /* disable syscall checks */
110
- regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK);
111
+ regs->flags = 0;
112
}
177
}
113
178
114
void setup_frame(int sig, struct target_sigaction *ka,
179
DISAS_INSN(move16_reg)
115
@@ -XXX,XX +XXX,XX @@ void setup_frame(int sig, struct target_sigaction *ka,
180
@@ -XXX,XX +XXX,XX @@ static void gen_qemu_store_fcr(DisasContext *s, TCGv addr, int reg)
116
regs->gregs[5] = 0;
181
117
regs->gregs[6] = frame_addr += offsetof(typeof(*frame), sc);
182
tmp = tcg_temp_new();
118
regs->pc = (unsigned long) ka->_sa_handler;
183
gen_load_fcr(s, tmp, reg);
119
- regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK);
184
- tcg_gen_qemu_st32(tmp, addr, index);
120
+ regs->flags &= ~(TB_FLAG_DELAY_SLOT_MASK | TB_FLAG_GUSA_MASK);
185
+ tcg_gen_qemu_st_tl(tmp, addr, index, MO_TEUL);
121
122
unlock_user_struct(frame, frame_addr, 1);
123
return;
124
@@ -XXX,XX +XXX,XX @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
125
regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info);
126
regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc);
127
regs->pc = (unsigned long) ka->_sa_handler;
128
- regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK);
129
+ regs->flags &= ~(TB_FLAG_DELAY_SLOT_MASK | TB_FLAG_GUSA_MASK);
130
131
unlock_user_struct(frame, frame_addr, 1);
132
return;
133
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
134
index XXXXXXX..XXXXXXX 100644
135
--- a/target/sh4/cpu.c
136
+++ b/target/sh4/cpu.c
137
@@ -XXX,XX +XXX,XX @@ static void superh_cpu_synchronize_from_tb(CPUState *cs,
138
SuperHCPU *cpu = SUPERH_CPU(cs);
139
140
cpu->env.pc = tb_pc(tb);
141
- cpu->env.flags = tb->flags & TB_FLAG_ENVFLAGS_MASK;
142
+ cpu->env.flags = tb->flags;
143
}
186
}
144
187
145
#ifndef CONFIG_USER_ONLY
188
static void gen_qemu_load_fcr(DisasContext *s, TCGv addr, int reg)
146
@@ -XXX,XX +XXX,XX @@ static bool superh_io_recompile_replay_branch(CPUState *cs,
189
@@ -XXX,XX +XXX,XX @@ static void gen_qemu_load_fcr(DisasContext *s, TCGv addr, int reg)
147
SuperHCPU *cpu = SUPERH_CPU(cs);
190
TCGv tmp;
148
CPUSH4State *env = &cpu->env;
191
149
192
tmp = tcg_temp_new();
150
- if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
193
- tcg_gen_qemu_ld32u(tmp, addr, index);
151
+ if ((env->flags & (TB_FLAG_DELAY_SLOT | TB_FLAG_DELAY_SLOT_COND))
194
+ tcg_gen_qemu_ld_tl(tmp, addr, index, MO_TEUL);
152
&& env->pc != tb_pc(tb)) {
195
gen_store_fcr(s, tmp, reg);
153
env->pc -= 2;
154
- env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
155
+ env->flags &= ~(TB_FLAG_DELAY_SLOT | TB_FLAG_DELAY_SLOT_COND);
156
return true;
157
}
158
return false;
159
diff --git a/target/sh4/helper.c b/target/sh4/helper.c
160
index XXXXXXX..XXXXXXX 100644
161
--- a/target/sh4/helper.c
162
+++ b/target/sh4/helper.c
163
@@ -XXX,XX +XXX,XX @@ void superh_cpu_do_interrupt(CPUState *cs)
164
env->sr |= (1u << SR_BL) | (1u << SR_MD) | (1u << SR_RB);
165
env->lock_addr = -1;
166
167
- if (env->flags & DELAY_SLOT_MASK) {
168
+ if (env->flags & TB_FLAG_DELAY_SLOT_MASK) {
169
/* Branch instruction should be executed again before delay slot. */
170
    env->spc -= 2;
171
    /* Clear flags for exception/interrupt routine. */
172
- env->flags &= ~DELAY_SLOT_MASK;
173
+ env->flags &= ~TB_FLAG_DELAY_SLOT_MASK;
174
}
175
176
if (do_exp) {
177
@@ -XXX,XX +XXX,XX @@ bool superh_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
178
CPUSH4State *env = &cpu->env;
179
180
/* Delay slots are indivisible, ignore interrupts */
181
- if (env->flags & DELAY_SLOT_MASK) {
182
+ if (env->flags & TB_FLAG_DELAY_SLOT_MASK) {
183
return false;
184
} else {
185
superh_cpu_do_interrupt(cs);
186
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
187
index XXXXXXX..XXXXXXX 100644
188
--- a/target/sh4/translate.c
189
+++ b/target/sh4/translate.c
190
@@ -XXX,XX +XXX,XX @@ void superh_cpu_dump_state(CPUState *cs, FILE *f, int flags)
191
         i, env->gregs[i], i + 1, env->gregs[i + 1],
192
         i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
193
}
194
- if (env->flags & DELAY_SLOT) {
195
+ if (env->flags & TB_FLAG_DELAY_SLOT) {
196
qemu_printf("in delay slot (delayed_pc=0x%08x)\n",
197
         env->delayed_pc);
198
- } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
199
+ } else if (env->flags & TB_FLAG_DELAY_SLOT_COND) {
200
qemu_printf("in conditional delay slot (delayed_pc=0x%08x)\n",
201
         env->delayed_pc);
202
- } else if (env->flags & DELAY_SLOT_RTE) {
203
+ } else if (env->flags & TB_FLAG_DELAY_SLOT_RTE) {
204
qemu_fprintf(f, "in rte delay slot (delayed_pc=0x%08x)\n",
205
env->delayed_pc);
206
}
207
@@ -XXX,XX +XXX,XX @@ static inline void gen_save_cpu_state(DisasContext *ctx, bool save_pc)
208
209
static inline bool use_exit_tb(DisasContext *ctx)
210
{
211
- return (ctx->tbflags & GUSA_EXCLUSIVE) != 0;
212
+ return (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) != 0;
213
}
196
}
214
197
215
static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
216
@@ -XXX,XX +XXX,XX @@ static void gen_conditional_jump(DisasContext *ctx, target_ulong dest,
217
TCGLabel *l1 = gen_new_label();
218
TCGCond cond_not_taken = jump_if_true ? TCG_COND_EQ : TCG_COND_NE;
219
220
- if (ctx->tbflags & GUSA_EXCLUSIVE) {
221
+ if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
222
/* When in an exclusive region, we must continue to the end.
223
Therefore, exit the region on a taken branch, but otherwise
224
fall through to the next instruction. */
225
tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
226
- tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK);
227
+ tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~TB_FLAG_GUSA_MASK);
228
/* Note that this won't actually use a goto_tb opcode because we
229
disallow it in use_goto_tb, but it handles exit + singlestep. */
230
gen_goto_tb(ctx, 0, dest);
231
@@ -XXX,XX +XXX,XX @@ static void gen_delayed_conditional_jump(DisasContext * ctx)
232
tcg_gen_mov_i32(ds, cpu_delayed_cond);
233
tcg_gen_discard_i32(cpu_delayed_cond);
234
235
- if (ctx->tbflags & GUSA_EXCLUSIVE) {
236
+ if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
237
/* When in an exclusive region, we must continue to the end.
238
Therefore, exit the region on a taken branch, but otherwise
239
fall through to the next instruction. */
240
tcg_gen_brcondi_i32(TCG_COND_EQ, ds, 0, l1);
241
242
/* Leave the gUSA region. */
243
- tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK);
244
+ tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~TB_FLAG_GUSA_MASK);
245
gen_jump(ctx);
246
247
gen_set_label(l1);
248
@@ -XXX,XX +XXX,XX @@ static inline void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
249
#define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
250
251
#define CHECK_NOT_DELAY_SLOT \
252
- if (ctx->envflags & DELAY_SLOT_MASK) { \
253
- goto do_illegal_slot; \
254
+ if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) { \
255
+ goto do_illegal_slot; \
256
}
257
258
#define CHECK_PRIVILEGED \
259
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
260
case 0x000b:        /* rts */
261
    CHECK_NOT_DELAY_SLOT
262
    tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
263
- ctx->envflags |= DELAY_SLOT;
264
+ ctx->envflags |= TB_FLAG_DELAY_SLOT;
265
    ctx->delayed_pc = (uint32_t) - 1;
266
    return;
267
case 0x0028:        /* clrmac */
268
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
269
    CHECK_NOT_DELAY_SLOT
270
gen_write_sr(cpu_ssr);
271
    tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
272
- ctx->envflags |= DELAY_SLOT_RTE;
273
+ ctx->envflags |= TB_FLAG_DELAY_SLOT_RTE;
274
    ctx->delayed_pc = (uint32_t) - 1;
275
ctx->base.is_jmp = DISAS_STOP;
276
    return;
277
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
278
    return;
279
case 0xe000:        /* mov #imm,Rn */
280
#ifdef CONFIG_USER_ONLY
281
- /* Detect the start of a gUSA region. If so, update envflags
282
- and end the TB. This will allow us to see the end of the
283
- region (stored in R0) in the next TB. */
284
+ /*
285
+ * Detect the start of a gUSA region (mov #-n, r15).
286
+ * If so, update envflags and end the TB. This will allow us
287
+ * to see the end of the region (stored in R0) in the next TB.
288
+ */
289
if (B11_8 == 15 && B7_0s < 0 &&
290
(tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
291
- ctx->envflags = deposit32(ctx->envflags, GUSA_SHIFT, 8, B7_0s);
292
+ ctx->envflags =
293
+ deposit32(ctx->envflags, TB_FLAG_GUSA_SHIFT, 8, B7_0s);
294
ctx->base.is_jmp = DISAS_STOP;
295
}
296
#endif
297
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
298
case 0xa000:        /* bra disp */
299
    CHECK_NOT_DELAY_SLOT
300
ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
301
- ctx->envflags |= DELAY_SLOT;
302
+ ctx->envflags |= TB_FLAG_DELAY_SLOT;
303
    return;
304
case 0xb000:        /* bsr disp */
305
    CHECK_NOT_DELAY_SLOT
306
tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
307
ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
308
- ctx->envflags |= DELAY_SLOT;
309
+ ctx->envflags |= TB_FLAG_DELAY_SLOT;
310
    return;
311
}
312
313
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
314
    CHECK_NOT_DELAY_SLOT
315
tcg_gen_xori_i32(cpu_delayed_cond, cpu_sr_t, 1);
316
ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
317
- ctx->envflags |= DELAY_SLOT_CONDITIONAL;
318
+ ctx->envflags |= TB_FLAG_DELAY_SLOT_COND;
319
    return;
320
case 0x8900:        /* bt label */
321
    CHECK_NOT_DELAY_SLOT
322
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
323
    CHECK_NOT_DELAY_SLOT
324
tcg_gen_mov_i32(cpu_delayed_cond, cpu_sr_t);
325
ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
326
- ctx->envflags |= DELAY_SLOT_CONDITIONAL;
327
+ ctx->envflags |= TB_FLAG_DELAY_SLOT_COND;
328
    return;
329
case 0x8800:        /* cmp/eq #imm,R0 */
330
tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s);
331
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
332
case 0x0023:        /* braf Rn */
333
    CHECK_NOT_DELAY_SLOT
334
tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->base.pc_next + 4);
335
- ctx->envflags |= DELAY_SLOT;
336
+ ctx->envflags |= TB_FLAG_DELAY_SLOT;
337
    ctx->delayed_pc = (uint32_t) - 1;
338
    return;
339
case 0x0003:        /* bsrf Rn */
340
    CHECK_NOT_DELAY_SLOT
341
tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
342
    tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
343
- ctx->envflags |= DELAY_SLOT;
344
+ ctx->envflags |= TB_FLAG_DELAY_SLOT;
345
    ctx->delayed_pc = (uint32_t) - 1;
346
    return;
347
case 0x4015:        /* cmp/pl Rn */
348
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
349
case 0x402b:        /* jmp @Rn */
350
    CHECK_NOT_DELAY_SLOT
351
    tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
352
- ctx->envflags |= DELAY_SLOT;
353
+ ctx->envflags |= TB_FLAG_DELAY_SLOT;
354
    ctx->delayed_pc = (uint32_t) - 1;
355
    return;
356
case 0x400b:        /* jsr @Rn */
357
    CHECK_NOT_DELAY_SLOT
358
tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
359
    tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
360
- ctx->envflags |= DELAY_SLOT;
361
+ ctx->envflags |= TB_FLAG_DELAY_SLOT;
362
    ctx->delayed_pc = (uint32_t) - 1;
363
    return;
364
case 0x400e:        /* ldc Rm,SR */
365
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
366
fflush(stderr);
367
#endif
368
do_illegal:
369
- if (ctx->envflags & DELAY_SLOT_MASK) {
370
+ if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) {
371
do_illegal_slot:
372
gen_save_cpu_state(ctx, true);
373
gen_helper_raise_slot_illegal_instruction(cpu_env);
374
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
375
376
do_fpu_disabled:
377
gen_save_cpu_state(ctx, true);
378
- if (ctx->envflags & DELAY_SLOT_MASK) {
379
+ if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) {
380
gen_helper_raise_slot_fpu_disable(cpu_env);
381
} else {
382
gen_helper_raise_fpu_disable(cpu_env);
383
@@ -XXX,XX +XXX,XX @@ static void decode_opc(DisasContext * ctx)
384
385
_decode_opc(ctx);
386
387
- if (old_flags & DELAY_SLOT_MASK) {
388
+ if (old_flags & TB_FLAG_DELAY_SLOT_MASK) {
389
/* go out of the delay slot */
390
- ctx->envflags &= ~DELAY_SLOT_MASK;
391
+ ctx->envflags &= ~TB_FLAG_DELAY_SLOT_MASK;
392
393
/* When in an exclusive region, we must continue to the end
394
for conditional branches. */
395
- if (ctx->tbflags & GUSA_EXCLUSIVE
396
- && old_flags & DELAY_SLOT_CONDITIONAL) {
397
+ if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE
398
+ && old_flags & TB_FLAG_DELAY_SLOT_COND) {
399
gen_delayed_conditional_jump(ctx);
400
return;
401
}
402
/* Otherwise this is probably an invalid gUSA region.
403
Drop the GUSA bits so the next TB doesn't see them. */
404
- ctx->envflags &= ~GUSA_MASK;
405
+ ctx->envflags &= ~TB_FLAG_GUSA_MASK;
406
407
tcg_gen_movi_i32(cpu_flags, ctx->envflags);
408
- if (old_flags & DELAY_SLOT_CONDITIONAL) {
409
+ if (old_flags & TB_FLAG_DELAY_SLOT_COND) {
410
     gen_delayed_conditional_jump(ctx);
411
} else {
412
gen_jump(ctx);
413
@@ -XXX,XX +XXX,XX @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
414
}
415
416
/* The entire region has been translated. */
417
- ctx->envflags &= ~GUSA_MASK;
418
+ ctx->envflags &= ~TB_FLAG_GUSA_MASK;
419
ctx->base.pc_next = pc_end;
420
ctx->base.num_insns += max_insns - 1;
421
return;
422
@@ -XXX,XX +XXX,XX @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
423
424
/* Restart with the EXCLUSIVE bit set, within a TB run via
425
cpu_exec_step_atomic holding the exclusive lock. */
426
- ctx->envflags |= GUSA_EXCLUSIVE;
427
+ ctx->envflags |= TB_FLAG_GUSA_EXCLUSIVE;
428
gen_save_cpu_state(ctx, false);
429
gen_helper_exclusive(cpu_env);
430
ctx->base.is_jmp = DISAS_NORETURN;
431
@@ -XXX,XX +XXX,XX @@ static void sh4_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
432
(tbflags & (1 << SR_RB))) * 0x10;
433
ctx->fbank = tbflags & FPSCR_FR ? 0x10 : 0;
434
435
- if (tbflags & GUSA_MASK) {
436
+#ifdef CONFIG_USER_ONLY
437
+ if (tbflags & TB_FLAG_GUSA_MASK) {
438
+ /* In gUSA exclusive region. */
439
uint32_t pc = ctx->base.pc_next;
440
uint32_t pc_end = ctx->base.tb->cs_base;
441
- int backup = sextract32(ctx->tbflags, GUSA_SHIFT, 8);
442
+ int backup = sextract32(ctx->tbflags, TB_FLAG_GUSA_SHIFT, 8);
443
int max_insns = (pc_end - pc) / 2;
444
445
if (pc != pc_end + backup || max_insns < 2) {
446
/* This is a malformed gUSA region. Don't do anything special,
447
since the interpreter is likely to get confused. */
448
- ctx->envflags &= ~GUSA_MASK;
449
- } else if (tbflags & GUSA_EXCLUSIVE) {
450
+ ctx->envflags &= ~TB_FLAG_GUSA_MASK;
451
+ } else if (tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
452
/* Regardless of single-stepping or the end of the page,
453
we must complete execution of the gUSA region while
454
holding the exclusive lock. */
455
@@ -XXX,XX +XXX,XX @@ static void sh4_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
456
return;
457
}
458
}
459
+#endif
460
461
/* Since the ISA is fixed-width, we can bound by the number
462
of instructions remaining on the page. */
463
@@ -XXX,XX +XXX,XX @@ static void sh4_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
464
DisasContext *ctx = container_of(dcbase, DisasContext, base);
465
466
#ifdef CONFIG_USER_ONLY
467
- if (unlikely(ctx->envflags & GUSA_MASK)
468
- && !(ctx->envflags & GUSA_EXCLUSIVE)) {
469
+ if (unlikely(ctx->envflags & TB_FLAG_GUSA_MASK)
470
+ && !(ctx->envflags & TB_FLAG_GUSA_EXCLUSIVE)) {
471
/* We're in an gUSA region, and we have not already fallen
472
back on using an exclusive region. Attempt to parse the
473
region into a single supported atomic operation. Failure
474
@@ -XXX,XX +XXX,XX @@ static void sh4_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
475
{
476
DisasContext *ctx = container_of(dcbase, DisasContext, base);
477
478
- if (ctx->tbflags & GUSA_EXCLUSIVE) {
479
+ if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
480
/* Ending the region of exclusivity. Clear the bits. */
481
- ctx->envflags &= ~GUSA_MASK;
482
+ ctx->envflags &= ~TB_FLAG_GUSA_MASK;
483
}
484
485
switch (ctx->base.is_jmp) {
486
--
198
--
487
2.34.1
199
2.34.1
diff view generated by jsdifflib
New patch
1
Convert away from the old interface with the implicit
2
MemOp argument.
1
3
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Anton Johansson <anjo@rev.ng>
6
Message-Id: <20230502135741.1158035-6-richard.henderson@linaro.org>
7
---
8
target/mips/tcg/translate.c | 8 ++++----
9
target/mips/tcg/nanomips_translate.c.inc | 2 +-
10
2 files changed, 5 insertions(+), 5 deletions(-)
11
12
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/mips/tcg/translate.c
15
+++ b/target/mips/tcg/translate.c
16
@@ -XXX,XX +XXX,XX @@ FOP_CONDNS(s, FMT_S, 32, gen_store_fpr32(ctx, fp0, fd))
17
18
/* load/store instructions. */
19
#ifdef CONFIG_USER_ONLY
20
-#define OP_LD_ATOMIC(insn, fname) \
21
+#define OP_LD_ATOMIC(insn, memop) \
22
static inline void op_ld_##insn(TCGv ret, TCGv arg1, int mem_idx, \
23
DisasContext *ctx) \
24
{ \
25
TCGv t0 = tcg_temp_new(); \
26
tcg_gen_mov_tl(t0, arg1); \
27
- tcg_gen_qemu_##fname(ret, arg1, ctx->mem_idx); \
28
+ tcg_gen_qemu_ld_tl(ret, arg1, ctx->mem_idx, memop); \
29
tcg_gen_st_tl(t0, cpu_env, offsetof(CPUMIPSState, lladdr)); \
30
tcg_gen_st_tl(ret, cpu_env, offsetof(CPUMIPSState, llval)); \
31
}
32
@@ -XXX,XX +XXX,XX @@ static inline void op_ld_##insn(TCGv ret, TCGv arg1, int mem_idx, \
33
gen_helper_##insn(ret, cpu_env, arg1, tcg_constant_i32(mem_idx)); \
34
}
35
#endif
36
-OP_LD_ATOMIC(ll, ld32s);
37
+OP_LD_ATOMIC(ll, MO_TESL);
38
#if defined(TARGET_MIPS64)
39
-OP_LD_ATOMIC(lld, ld64);
40
+OP_LD_ATOMIC(lld, MO_TEUQ);
41
#endif
42
#undef OP_LD_ATOMIC
43
44
diff --git a/target/mips/tcg/nanomips_translate.c.inc b/target/mips/tcg/nanomips_translate.c.inc
45
index XXXXXXX..XXXXXXX 100644
46
--- a/target/mips/tcg/nanomips_translate.c.inc
47
+++ b/target/mips/tcg/nanomips_translate.c.inc
48
@@ -XXX,XX +XXX,XX @@ static void gen_llwp(DisasContext *ctx, uint32_t base, int16_t offset,
49
TCGv tmp2 = tcg_temp_new();
50
51
gen_base_offset_addr(ctx, taddr, base, offset);
52
- tcg_gen_qemu_ld64(tval, taddr, ctx->mem_idx);
53
+ tcg_gen_qemu_ld_i64(tval, taddr, ctx->mem_idx, MO_TEUQ);
54
if (cpu_is_bigendian(ctx)) {
55
tcg_gen_extr_i64_tl(tmp2, tmp1, tval);
56
} else {
57
--
58
2.34.1
diff view generated by jsdifflib
New patch
1
Convert away from the old interface with the implicit
2
MemOp argument.
1
3
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: David Hildenbrand <david@redhat.com>
6
Reviewed-by: Ilya Leoshkevich <iii@linux.ibm.com>
7
Message-Id: <20230502135741.1158035-7-richard.henderson@linaro.org>
8
---
9
target/s390x/tcg/translate.c | 152 ++++++++++++++++-------------------
10
1 file changed, 71 insertions(+), 81 deletions(-)
11
12
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/s390x/tcg/translate.c
15
+++ b/target/s390x/tcg/translate.c
16
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_clc(DisasContext *s, DisasOps *o)
17
{
18
int l = get_field(s, l1);
19
TCGv_i32 vl;
20
+ MemOp mop;
21
22
switch (l + 1) {
23
case 1:
24
- tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
25
- tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
26
- break;
27
case 2:
28
- tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
29
- tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
30
- break;
31
case 4:
32
- tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
33
- tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
34
- break;
35
case 8:
36
- tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
37
- tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
38
- break;
39
+ mop = ctz32(l + 1) | MO_TE;
40
+ tcg_gen_qemu_ld_tl(cc_src, o->addr1, get_mem_index(s), mop);
41
+ tcg_gen_qemu_ld_tl(cc_dst, o->in2, get_mem_index(s), mop);
42
+ gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
43
+ return DISAS_NEXT;
44
default:
45
vl = tcg_constant_i32(l);
46
gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
47
set_cc_static(s);
48
return DISAS_NEXT;
49
}
50
- gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
51
- return DISAS_NEXT;
52
}
53
54
static DisasJumpType op_clcl(DisasContext *s, DisasOps *o)
55
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
56
TCGv_i32 t2 = tcg_temp_new_i32();
57
tcg_gen_extrl_i64_i32(t2, o->in1);
58
gen_helper_cvd(t1, t2);
59
- tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
60
+ tcg_gen_qemu_st_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
61
return DISAS_NEXT;
62
}
63
64
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
65
switch (m3) {
66
case 0xf:
67
/* Effectively a 32-bit load. */
68
- tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
69
+ tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
70
len = 32;
71
goto one_insert;
72
73
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
74
case 0x6:
75
case 0x3:
76
/* Effectively a 16-bit load. */
77
- tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
78
+ tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
79
len = 16;
80
goto one_insert;
81
82
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
83
case 0x2:
84
case 0x1:
85
/* Effectively an 8-bit load. */
86
- tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
87
+ tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
88
len = 8;
89
goto one_insert;
90
91
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
92
ccm = 0;
93
while (m3) {
94
if (m3 & 0x8) {
95
- tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
96
+ tcg_gen_qemu_ld_i64(tmp, o->in2, get_mem_index(s), MO_UB);
97
tcg_gen_addi_i64(o->in2, o->in2, 1);
98
tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
99
ccm |= 0xffull << pos;
100
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_llgt(DisasContext *s, DisasOps *o)
101
102
static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o)
103
{
104
- tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
105
+ tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_SB);
106
return DISAS_NEXT;
107
}
108
109
static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o)
110
{
111
- tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
112
+ tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_UB);
113
return DISAS_NEXT;
114
}
115
116
static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o)
117
{
118
- tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
119
+ tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TESW);
120
return DISAS_NEXT;
121
}
122
123
static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o)
124
{
125
- tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
126
+ tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUW);
127
return DISAS_NEXT;
128
}
129
130
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lat(DisasContext *s, DisasOps *o)
131
static DisasJumpType op_lgat(DisasContext *s, DisasOps *o)
132
{
133
TCGLabel *lab = gen_new_label();
134
- tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
135
+ tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUQ);
136
/* The value is stored even in case of trap. */
137
tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
138
gen_trap(s);
139
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o)
140
static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o)
141
{
142
TCGLabel *lab = gen_new_label();
143
- tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
144
+
145
+ tcg_gen_qemu_ld_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
146
/* The value is stored even in case of trap. */
147
tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
148
gen_trap(s);
149
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
150
tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
151
MO_TEUQ | MO_ALIGN_8);
152
tcg_gen_addi_i64(o->in2, o->in2, 8);
153
- tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
154
+ tcg_gen_qemu_ld_i64(t2, o->in2, get_mem_index(s), MO_TEUQ);
155
gen_helper_load_psw(cpu_env, t1, t2);
156
return DISAS_NORETURN;
157
}
158
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
159
/* Only one register to read. */
160
t1 = tcg_temp_new_i64();
161
if (unlikely(r1 == r3)) {
162
- tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
163
+ tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
164
store_reg32_i64(r1, t1);
165
return DISAS_NEXT;
166
}
167
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
168
/* First load the values of the first and last registers to trigger
169
possible page faults. */
170
t2 = tcg_temp_new_i64();
171
- tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
172
+ tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
173
tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
174
- tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
175
+ tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
176
store_reg32_i64(r1, t1);
177
store_reg32_i64(r3, t2);
178
179
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
180
while (r1 != r3) {
181
r1 = (r1 + 1) & 15;
182
tcg_gen_add_i64(o->in2, o->in2, t2);
183
- tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
184
+ tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
185
store_reg32_i64(r1, t1);
186
}
187
return DISAS_NEXT;
188
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
189
/* Only one register to read. */
190
t1 = tcg_temp_new_i64();
191
if (unlikely(r1 == r3)) {
192
- tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
193
+ tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
194
store_reg32h_i64(r1, t1);
195
return DISAS_NEXT;
196
}
197
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
198
/* First load the values of the first and last registers to trigger
199
possible page faults. */
200
t2 = tcg_temp_new_i64();
201
- tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
202
+ tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
203
tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
204
- tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
205
+ tcg_gen_qemu_ld_i64(t2, t2, get_mem_index(s), MO_TEUL);
206
store_reg32h_i64(r1, t1);
207
store_reg32h_i64(r3, t2);
208
209
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
210
while (r1 != r3) {
211
r1 = (r1 + 1) & 15;
212
tcg_gen_add_i64(o->in2, o->in2, t2);
213
- tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
214
+ tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUL);
215
store_reg32h_i64(r1, t1);
216
}
217
return DISAS_NEXT;
218
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
219
220
/* Only one register to read. */
221
if (unlikely(r1 == r3)) {
222
- tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
223
+ tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
224
return DISAS_NEXT;
225
}
226
227
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
228
possible page faults. */
229
t1 = tcg_temp_new_i64();
230
t2 = tcg_temp_new_i64();
231
- tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
232
+ tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), MO_TEUQ);
233
tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
234
- tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
235
+ tcg_gen_qemu_ld_i64(regs[r3], t2, get_mem_index(s), MO_TEUQ);
236
tcg_gen_mov_i64(regs[r1], t1);
237
238
/* Only two registers to read. */
239
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
240
while (r1 != r3) {
241
r1 = (r1 + 1) & 15;
242
tcg_gen_add_i64(o->in2, o->in2, t1);
243
- tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
244
+ tcg_gen_qemu_ld_i64(regs[r1], o->in2, get_mem_index(s), MO_TEUQ);
245
}
246
return DISAS_NEXT;
247
}
248
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
249
a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
250
switch (s->insn->data) {
251
case 1: /* STOCG */
252
- tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
253
+ tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUQ);
254
break;
255
case 0: /* STOC */
256
- tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
257
+ tcg_gen_qemu_st_i64(regs[r1], a, get_mem_index(s), MO_TEUL);
258
break;
259
case 2: /* STOCFH */
260
h = tcg_temp_new_i64();
261
tcg_gen_shri_i64(h, regs[r1], 32);
262
- tcg_gen_qemu_st32(h, a, get_mem_index(s));
263
+ tcg_gen_qemu_st_i64(h, a, get_mem_index(s), MO_TEUL);
264
break;
265
default:
266
g_assert_not_reached();
267
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
268
gen_addi_and_wrap_i64(s, o->addr1, regs[r3], 0);
269
270
/* load the third operand into r3 before modifying anything */
271
- tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
272
+ tcg_gen_qemu_ld_i64(regs[r3], o->addr1, get_mem_index(s), MO_TEUQ);
273
274
/* subtract CPU timer from first operand and store in GR0 */
275
gen_helper_stpt(tmp, cpu_env);
276
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
277
tcg_gen_shri_i64(c1, c1, 8);
278
tcg_gen_ori_i64(c2, c2, 0x10000);
279
tcg_gen_or_i64(c2, c2, todpr);
280
- tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
281
+ tcg_gen_qemu_st_i64(c1, o->in2, get_mem_index(s), MO_TEUQ);
282
tcg_gen_addi_i64(o->in2, o->in2, 8);
283
- tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
284
+ tcg_gen_qemu_st_i64(c2, o->in2, get_mem_index(s), MO_TEUQ);
285
/* ??? We don't implement clock states. */
286
gen_op_movi_cc(s, 0);
287
return DISAS_NEXT;
288
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
289
restart, we'll have the wrong SYSTEM MASK in place. */
290
t = tcg_temp_new_i64();
291
tcg_gen_shri_i64(t, psw_mask, 56);
292
- tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
293
+ tcg_gen_qemu_st_i64(t, o->addr1, get_mem_index(s), MO_UB);
294
295
if (s->fields.op == 0xac) {
296
tcg_gen_andi_i64(psw_mask, psw_mask,
297
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_stfle(DisasContext *s, DisasOps *o)
298
299
static DisasJumpType op_st8(DisasContext *s, DisasOps *o)
300
{
301
- tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
302
+ tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_UB);
303
return DISAS_NEXT;
304
}
305
306
static DisasJumpType op_st16(DisasContext *s, DisasOps *o)
307
{
308
- tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
309
+ tcg_gen_qemu_st_i64(o->in1, o->in2, get_mem_index(s), MO_TEUW);
310
return DISAS_NEXT;
311
}
312
313
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
314
case 0xf:
315
/* Effectively a 32-bit store. */
316
tcg_gen_shri_i64(tmp, o->in1, pos);
317
- tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
318
+ tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUL);
319
break;
320
321
case 0xc:
322
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
323
case 0x3:
324
/* Effectively a 16-bit store. */
325
tcg_gen_shri_i64(tmp, o->in1, pos);
326
- tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
327
+ tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_TEUW);
328
break;
329
330
case 0x8:
331
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
332
case 0x1:
333
/* Effectively an 8-bit store. */
334
tcg_gen_shri_i64(tmp, o->in1, pos);
335
- tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
336
+ tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
337
break;
338
339
default:
340
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
341
while (m3) {
342
if (m3 & 0x8) {
343
tcg_gen_shri_i64(tmp, o->in1, pos);
344
- tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
345
+ tcg_gen_qemu_st_i64(tmp, o->in2, get_mem_index(s), MO_UB);
346
tcg_gen_addi_i64(o->in2, o->in2, 1);
347
}
348
m3 = (m3 << 1) & 0xf;
349
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_stm(DisasContext *s, DisasOps *o)
350
TCGv_i64 tsize = tcg_constant_i64(size);
351
352
while (1) {
353
- if (size == 8) {
354
- tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
355
- } else {
356
- tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
357
- }
358
+ tcg_gen_qemu_st_i64(regs[r1], o->in2, get_mem_index(s),
359
+ size == 8 ? MO_TEUQ : MO_TEUL);
360
if (r1 == r3) {
361
break;
362
}
363
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
364
365
while (1) {
366
tcg_gen_shl_i64(t, regs[r1], t32);
367
- tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
368
+ tcg_gen_qemu_st_i64(t, o->in2, get_mem_index(s), MO_TEUL);
369
if (r1 == r3) {
370
break;
371
}
372
@@ -XXX,XX +XXX,XX @@ static DisasJumpType op_xc(DisasContext *s, DisasOps *o)
373
374
l++;
375
while (l >= 8) {
376
- tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
377
+ tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UQ);
378
l -= 8;
379
if (l > 0) {
380
tcg_gen_addi_i64(o->addr1, o->addr1, 8);
381
}
382
}
383
if (l >= 4) {
384
- tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
385
+ tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UL);
386
l -= 4;
387
if (l > 0) {
388
tcg_gen_addi_i64(o->addr1, o->addr1, 4);
389
}
390
}
391
if (l >= 2) {
392
- tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
393
+ tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UW);
394
l -= 2;
395
if (l > 0) {
396
tcg_gen_addi_i64(o->addr1, o->addr1, 2);
397
}
398
}
399
if (l) {
400
- tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
401
+ tcg_gen_qemu_st_i64(o->in2, o->addr1, get_mem_index(s), MO_UB);
402
}
403
gen_op_movi_cc(s, 0);
404
return DISAS_NEXT;
405
@@ -XXX,XX +XXX,XX @@ static void wout_cond_e1e2(DisasContext *s, DisasOps *o)
406
407
static void wout_m1_8(DisasContext *s, DisasOps *o)
408
{
409
- tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
410
+ tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_UB);
411
}
412
#define SPEC_wout_m1_8 0
413
414
static void wout_m1_16(DisasContext *s, DisasOps *o)
415
{
416
- tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
417
+ tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUW);
418
}
419
#define SPEC_wout_m1_16 0
420
421
@@ -XXX,XX +XXX,XX @@ static void wout_m1_16a(DisasContext *s, DisasOps *o)
422
423
static void wout_m1_32(DisasContext *s, DisasOps *o)
424
{
425
- tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
426
+ tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUL);
427
}
428
#define SPEC_wout_m1_32 0
429
430
@@ -XXX,XX +XXX,XX @@ static void wout_m1_32a(DisasContext *s, DisasOps *o)
431
432
static void wout_m1_64(DisasContext *s, DisasOps *o)
433
{
434
- tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
435
+ tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ);
436
}
437
#define SPEC_wout_m1_64 0
438
439
@@ -XXX,XX +XXX,XX @@ static void wout_m1_64a(DisasContext *s, DisasOps *o)
440
441
static void wout_m2_32(DisasContext *s, DisasOps *o)
442
{
443
- tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
444
+ tcg_gen_qemu_st_i64(o->out, o->in2, get_mem_index(s), MO_TEUL);
445
}
446
#define SPEC_wout_m2_32 0
447
448
@@ -XXX,XX +XXX,XX @@ static void in1_m1_8u(DisasContext *s, DisasOps *o)
449
{
450
in1_la1(s, o);
451
o->in1 = tcg_temp_new_i64();
452
- tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
453
+ tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_UB);
454
}
455
#define SPEC_in1_m1_8u 0
456
457
@@ -XXX,XX +XXX,XX @@ static void in1_m1_16s(DisasContext *s, DisasOps *o)
458
{
459
in1_la1(s, o);
460
o->in1 = tcg_temp_new_i64();
461
- tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
462
+ tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESW);
463
}
464
#define SPEC_in1_m1_16s 0
465
466
@@ -XXX,XX +XXX,XX @@ static void in1_m1_16u(DisasContext *s, DisasOps *o)
467
{
468
in1_la1(s, o);
469
o->in1 = tcg_temp_new_i64();
470
- tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
471
+ tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUW);
472
}
473
#define SPEC_in1_m1_16u 0
474
475
@@ -XXX,XX +XXX,XX @@ static void in1_m1_32s(DisasContext *s, DisasOps *o)
476
{
477
in1_la1(s, o);
478
o->in1 = tcg_temp_new_i64();
479
- tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
480
+ tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TESL);
481
}
482
#define SPEC_in1_m1_32s 0
483
484
@@ -XXX,XX +XXX,XX @@ static void in1_m1_32u(DisasContext *s, DisasOps *o)
485
{
486
in1_la1(s, o);
487
o->in1 = tcg_temp_new_i64();
488
- tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
489
+ tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUL);
490
}
491
#define SPEC_in1_m1_32u 0
492
493
@@ -XXX,XX +XXX,XX @@ static void in1_m1_64(DisasContext *s, DisasOps *o)
494
{
495
in1_la1(s, o);
496
o->in1 = tcg_temp_new_i64();
497
- tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
498
+ tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUQ);
499
}
500
#define SPEC_in1_m1_64 0
501
502
@@ -XXX,XX +XXX,XX @@ static void in2_sh(DisasContext *s, DisasOps *o)
503
static void in2_m2_8u(DisasContext *s, DisasOps *o)
504
{
505
in2_a2(s, o);
506
- tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
507
+ tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_UB);
508
}
509
#define SPEC_in2_m2_8u 0
510
511
static void in2_m2_16s(DisasContext *s, DisasOps *o)
512
{
513
in2_a2(s, o);
514
- tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
515
+ tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESW);
516
}
517
#define SPEC_in2_m2_16s 0
518
519
static void in2_m2_16u(DisasContext *s, DisasOps *o)
520
{
521
in2_a2(s, o);
522
- tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
523
+ tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUW);
524
}
525
#define SPEC_in2_m2_16u 0
526
527
static void in2_m2_32s(DisasContext *s, DisasOps *o)
528
{
529
in2_a2(s, o);
530
- tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
531
+ tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TESL);
532
}
533
#define SPEC_in2_m2_32s 0
534
535
static void in2_m2_32u(DisasContext *s, DisasOps *o)
536
{
537
in2_a2(s, o);
538
- tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
539
+ tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUL);
540
}
541
#define SPEC_in2_m2_32u 0
542
543
@@ -XXX,XX +XXX,XX @@ static void in2_m2_32ua(DisasContext *s, DisasOps *o)
544
static void in2_m2_64(DisasContext *s, DisasOps *o)
545
{
546
in2_a2(s, o);
547
- tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
548
+ tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
549
}
550
#define SPEC_in2_m2_64 0
551
552
static void in2_m2_64w(DisasContext *s, DisasOps *o)
553
{
554
in2_a2(s, o);
555
- tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
556
+ tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ);
557
gen_addi_and_wrap_i64(s, o->in2, o->in2, 0);
558
}
559
#define SPEC_in2_m2_64w 0
560
@@ -XXX,XX +XXX,XX @@ static void in2_m2_64a(DisasContext *s, DisasOps *o)
561
static void in2_mri2_16s(DisasContext *s, DisasOps *o)
562
{
563
o->in2 = tcg_temp_new_i64();
564
- tcg_gen_qemu_ld16s(o->in2, gen_ri2(s), get_mem_index(s));
565
+ tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TESW);
566
}
567
#define SPEC_in2_mri2_16s 0
568
569
static void in2_mri2_16u(DisasContext *s, DisasOps *o)
570
{
571
o->in2 = tcg_temp_new_i64();
572
- tcg_gen_qemu_ld16u(o->in2, gen_ri2(s), get_mem_index(s));
573
+ tcg_gen_qemu_ld_i64(o->in2, gen_ri2(s), get_mem_index(s), MO_TEUW);
574
}
575
#define SPEC_in2_mri2_16u 0
576
577
--
578
2.34.1
diff view generated by jsdifflib
New patch
1
Convert away from the old interface with the implicit
2
MemOp argument.
1
3
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Anton Johansson <anjo@rev.ng>
6
Message-Id: <20230502135741.1158035-8-richard.henderson@linaro.org>
7
---
8
target/sparc/translate.c | 43 ++++++++++++++++++++++++++--------------
9
1 file changed, 28 insertions(+), 15 deletions(-)
10
11
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/sparc/translate.c
14
+++ b/target/sparc/translate.c
15
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
16
switch (xop) {
17
case 0x0: /* ld, V9 lduw, load unsigned word */
18
gen_address_mask(dc, cpu_addr);
19
- tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
20
+ tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
21
+ dc->mem_idx, MO_TEUL);
22
break;
23
case 0x1: /* ldub, load unsigned byte */
24
gen_address_mask(dc, cpu_addr);
25
- tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
26
+ tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
27
+ dc->mem_idx, MO_UB);
28
break;
29
case 0x2: /* lduh, load unsigned halfword */
30
gen_address_mask(dc, cpu_addr);
31
- tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
32
+ tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
33
+ dc->mem_idx, MO_TEUW);
34
break;
35
case 0x3: /* ldd, load double word */
36
if (rd & 1)
37
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
38
39
gen_address_mask(dc, cpu_addr);
40
t64 = tcg_temp_new_i64();
41
- tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
42
+ tcg_gen_qemu_ld_i64(t64, cpu_addr,
43
+ dc->mem_idx, MO_TEUQ);
44
tcg_gen_trunc_i64_tl(cpu_val, t64);
45
tcg_gen_ext32u_tl(cpu_val, cpu_val);
46
gen_store_gpr(dc, rd + 1, cpu_val);
47
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
48
break;
49
case 0x9: /* ldsb, load signed byte */
50
gen_address_mask(dc, cpu_addr);
51
- tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
52
+ tcg_gen_qemu_ld_tl(cpu_val, cpu_addr, dc->mem_idx, MO_SB);
53
break;
54
case 0xa: /* ldsh, load signed halfword */
55
gen_address_mask(dc, cpu_addr);
56
- tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
57
+ tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
58
+ dc->mem_idx, MO_TESW);
59
break;
60
case 0xd: /* ldstub */
61
gen_ldstub(dc, cpu_val, cpu_addr, dc->mem_idx);
62
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
63
#ifdef TARGET_SPARC64
64
case 0x08: /* V9 ldsw */
65
gen_address_mask(dc, cpu_addr);
66
- tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
67
+ tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
68
+ dc->mem_idx, MO_TESL);
69
break;
70
case 0x0b: /* V9 ldx */
71
gen_address_mask(dc, cpu_addr);
72
- tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
73
+ tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
74
+ dc->mem_idx, MO_TEUQ);
75
break;
76
case 0x18: /* V9 ldswa */
77
gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL);
78
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
79
switch (xop) {
80
case 0x4: /* st, store word */
81
gen_address_mask(dc, cpu_addr);
82
- tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
83
+ tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
84
+ dc->mem_idx, MO_TEUL);
85
break;
86
case 0x5: /* stb, store byte */
87
gen_address_mask(dc, cpu_addr);
88
- tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
89
+ tcg_gen_qemu_st_tl(cpu_val, cpu_addr, dc->mem_idx, MO_UB);
90
break;
91
case 0x6: /* sth, store halfword */
92
gen_address_mask(dc, cpu_addr);
93
- tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
94
+ tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
95
+ dc->mem_idx, MO_TEUW);
96
break;
97
case 0x7: /* std, store double word */
98
if (rd & 1)
99
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
100
lo = gen_load_gpr(dc, rd + 1);
101
t64 = tcg_temp_new_i64();
102
tcg_gen_concat_tl_i64(t64, lo, cpu_val);
103
- tcg_gen_qemu_st64(t64, cpu_addr, dc->mem_idx);
104
+ tcg_gen_qemu_st_i64(t64, cpu_addr,
105
+ dc->mem_idx, MO_TEUQ);
106
}
107
break;
108
#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
109
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
110
#ifdef TARGET_SPARC64
111
case 0x0e: /* V9 stx */
112
gen_address_mask(dc, cpu_addr);
113
- tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
114
+ tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
115
+ dc->mem_idx, MO_TEUQ);
116
break;
117
case 0x1e: /* V9 stxa */
118
gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ);
119
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
120
#ifdef TARGET_SPARC64
121
gen_address_mask(dc, cpu_addr);
122
if (rd == 1) {
123
- tcg_gen_qemu_st64(cpu_fsr, cpu_addr, dc->mem_idx);
124
+ tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr,
125
+ dc->mem_idx, MO_TEUQ);
126
break;
127
}
128
#endif
129
- tcg_gen_qemu_st32(cpu_fsr, cpu_addr, dc->mem_idx);
130
+ tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr,
131
+ dc->mem_idx, MO_TEUL);
132
}
133
break;
134
case 0x26:
135
--
136
2.34.1
diff view generated by jsdifflib
New patch
1
Convert away from the old interface with the implicit
2
MemOp argument.
1
3
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Max Filippov <jcmvbkbc@gmail.com>
6
Message-Id: <20230502135741.1158035-9-richard.henderson@linaro.org>
7
---
8
target/xtensa/translate.c | 4 ++--
9
1 file changed, 2 insertions(+), 2 deletions(-)
10
11
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/xtensa/translate.c
14
+++ b/target/xtensa/translate.c
15
@@ -XXX,XX +XXX,XX @@ static void translate_dcache(DisasContext *dc, const OpcodeArg arg[],
16
TCGv_i32 res = tcg_temp_new_i32();
17
18
tcg_gen_addi_i32(addr, arg[0].in, arg[1].imm);
19
- tcg_gen_qemu_ld8u(res, addr, dc->cring);
20
+ tcg_gen_qemu_ld_i32(res, addr, dc->cring, MO_UB);
21
}
22
23
static void translate_depbits(DisasContext *dc, const OpcodeArg arg[],
24
@@ -XXX,XX +XXX,XX @@ static void translate_l32r(DisasContext *dc, const OpcodeArg arg[],
25
} else {
26
tmp = tcg_constant_i32(arg[1].imm);
27
}
28
- tcg_gen_qemu_ld32u(arg[0].out, tmp, dc->cring);
29
+ tcg_gen_qemu_ld_i32(arg[0].out, tmp, dc->cring, MO_TEUL);
30
}
31
32
static void translate_loop(DisasContext *dc, const OpcodeArg arg[],
33
--
34
2.34.1
diff view generated by jsdifflib
New patch
1
Remove the old interfaces with the implicit MemOp argument.
1
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Acked-by: David Hildenbrand <david@redhat.com>
5
Message-Id: <20230502135741.1158035-10-richard.henderson@linaro.org>
6
---
7
include/tcg/tcg-op.h | 55 --------------------------------------------
8
1 file changed, 55 deletions(-)
9
10
diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/include/tcg/tcg-op.h
13
+++ b/include/tcg/tcg-op.h
14
@@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_st_i64(TCGv_i64, TCGv, TCGArg, MemOp);
15
void tcg_gen_qemu_ld_i128(TCGv_i128, TCGv, TCGArg, MemOp);
16
void tcg_gen_qemu_st_i128(TCGv_i128, TCGv, TCGArg, MemOp);
17
18
-static inline void tcg_gen_qemu_ld8u(TCGv ret, TCGv addr, int mem_index)
19
-{
20
- tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_UB);
21
-}
22
-
23
-static inline void tcg_gen_qemu_ld8s(TCGv ret, TCGv addr, int mem_index)
24
-{
25
- tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_SB);
26
-}
27
-
28
-static inline void tcg_gen_qemu_ld16u(TCGv ret, TCGv addr, int mem_index)
29
-{
30
- tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TEUW);
31
-}
32
-
33
-static inline void tcg_gen_qemu_ld16s(TCGv ret, TCGv addr, int mem_index)
34
-{
35
- tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TESW);
36
-}
37
-
38
-static inline void tcg_gen_qemu_ld32u(TCGv ret, TCGv addr, int mem_index)
39
-{
40
- tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TEUL);
41
-}
42
-
43
-static inline void tcg_gen_qemu_ld32s(TCGv ret, TCGv addr, int mem_index)
44
-{
45
- tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TESL);
46
-}
47
-
48
-static inline void tcg_gen_qemu_ld64(TCGv_i64 ret, TCGv addr, int mem_index)
49
-{
50
- tcg_gen_qemu_ld_i64(ret, addr, mem_index, MO_TEUQ);
51
-}
52
-
53
-static inline void tcg_gen_qemu_st8(TCGv arg, TCGv addr, int mem_index)
54
-{
55
- tcg_gen_qemu_st_tl(arg, addr, mem_index, MO_UB);
56
-}
57
-
58
-static inline void tcg_gen_qemu_st16(TCGv arg, TCGv addr, int mem_index)
59
-{
60
- tcg_gen_qemu_st_tl(arg, addr, mem_index, MO_TEUW);
61
-}
62
-
63
-static inline void tcg_gen_qemu_st32(TCGv arg, TCGv addr, int mem_index)
64
-{
65
- tcg_gen_qemu_st_tl(arg, addr, mem_index, MO_TEUL);
66
-}
67
-
68
-static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv addr, int mem_index)
69
-{
70
- tcg_gen_qemu_st_i64(arg, addr, mem_index, MO_TEUQ);
71
-}
72
-
73
void tcg_gen_atomic_cmpxchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGv_i32,
74
TCGArg, MemOp);
75
void tcg_gen_atomic_cmpxchg_i64(TCGv_i64, TCGv, TCGv_i64, TCGv_i64,
76
--
77
2.34.1
diff view generated by jsdifflib
New patch
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
---
3
target/alpha/translate.c | 2 +-
4
1 file changed, 1 insertion(+), 1 deletion(-)
1
5
6
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
7
index XXXXXXX..XXXXXXX 100644
8
--- a/target/alpha/translate.c
9
+++ b/target/alpha/translate.c
10
@@ -XXX,XX +XXX,XX @@ struct DisasContext {
11
#ifdef CONFIG_USER_ONLY
12
#define UNALIGN(C) (C)->unalign
13
#else
14
-#define UNALIGN(C) 0
15
+#define UNALIGN(C) MO_ALIGN
16
#endif
17
18
/* Target-specific return values from translate_one, indicating the
19
--
20
2.34.1
diff view generated by jsdifflib
New patch
1
Mark all memory operations that are not already marked with UNALIGN.
1
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
target/alpha/translate.c | 36 ++++++++++++++++++++----------------
6
1 file changed, 20 insertions(+), 16 deletions(-)
7
8
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/target/alpha/translate.c
11
+++ b/target/alpha/translate.c
12
@@ -XXX,XX +XXX,XX @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
13
switch ((insn >> 12) & 0xF) {
14
case 0x0:
15
/* Longword physical access (hw_ldl/p) */
16
- tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL);
17
+ tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
18
break;
19
case 0x1:
20
/* Quadword physical access (hw_ldq/p) */
21
- tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ);
22
+ tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
23
break;
24
case 0x2:
25
/* Longword physical access with lock (hw_ldl_l/p) */
26
- tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL);
27
+ tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
28
tcg_gen_mov_i64(cpu_lock_addr, addr);
29
tcg_gen_mov_i64(cpu_lock_value, va);
30
break;
31
case 0x3:
32
/* Quadword physical access with lock (hw_ldq_l/p) */
33
- tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ);
34
+ tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
35
tcg_gen_mov_i64(cpu_lock_addr, addr);
36
tcg_gen_mov_i64(cpu_lock_value, va);
37
break;
38
@@ -XXX,XX +XXX,XX @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
39
goto invalid_opc;
40
case 0xA:
41
/* Longword virtual access with protection check (hw_ldl/w) */
42
- tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL);
43
+ tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX,
44
+ MO_LESL | MO_ALIGN);
45
break;
46
case 0xB:
47
/* Quadword virtual access with protection check (hw_ldq/w) */
48
- tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEUQ);
49
+ tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX,
50
+ MO_LEUQ | MO_ALIGN);
51
break;
52
case 0xC:
53
/* Longword virtual access with alt access mode (hw_ldl/a)*/
54
@@ -XXX,XX +XXX,XX @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
55
case 0xE:
56
/* Longword virtual access with alternate access mode and
57
protection checks (hw_ldl/wa) */
58
- tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL);
59
+ tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX,
60
+ MO_LESL | MO_ALIGN);
61
break;
62
case 0xF:
63
/* Quadword virtual access with alternate access mode and
64
protection checks (hw_ldq/wa) */
65
- tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEUQ);
66
+ tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX,
67
+ MO_LEUQ | MO_ALIGN);
68
break;
69
}
70
break;
71
@@ -XXX,XX +XXX,XX @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
72
vb = load_gpr(ctx, rb);
73
tmp = tcg_temp_new();
74
tcg_gen_addi_i64(tmp, vb, disp12);
75
- tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL);
76
+ tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
77
break;
78
case 0x1:
79
/* Quadword physical access */
80
@@ -XXX,XX +XXX,XX @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
81
vb = load_gpr(ctx, rb);
82
tmp = tcg_temp_new();
83
tcg_gen_addi_i64(tmp, vb, disp12);
84
- tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEUQ);
85
+ tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
86
break;
87
case 0x2:
88
/* Longword physical access with lock */
89
ret = gen_store_conditional(ctx, ra, rb, disp12,
90
- MMU_PHYS_IDX, MO_LESL);
91
+ MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
92
break;
93
case 0x3:
94
/* Quadword physical access with lock */
95
ret = gen_store_conditional(ctx, ra, rb, disp12,
96
- MMU_PHYS_IDX, MO_LEUQ);
97
+ MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
98
break;
99
case 0x4:
100
/* Longword virtual access */
101
@@ -XXX,XX +XXX,XX @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
102
break;
103
case 0x2A:
104
/* LDL_L */
105
- gen_load_int(ctx, ra, rb, disp16, MO_LESL, 0, 1);
106
+ gen_load_int(ctx, ra, rb, disp16, MO_LESL | MO_ALIGN, 0, 1);
107
break;
108
case 0x2B:
109
/* LDQ_L */
110
- gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 0, 1);
111
+ gen_load_int(ctx, ra, rb, disp16, MO_LEUQ | MO_ALIGN, 0, 1);
112
break;
113
case 0x2C:
114
/* STL */
115
@@ -XXX,XX +XXX,XX @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
116
case 0x2E:
117
/* STL_C */
118
ret = gen_store_conditional(ctx, ra, rb, disp16,
119
- ctx->mem_idx, MO_LESL);
120
+ ctx->mem_idx, MO_LESL | MO_ALIGN);
121
break;
122
case 0x2F:
123
/* STQ_C */
124
ret = gen_store_conditional(ctx, ra, rb, disp16,
125
- ctx->mem_idx, MO_LEUQ);
126
+ ctx->mem_idx, MO_LEUQ | MO_ALIGN);
127
break;
128
case 0x30:
129
/* BR */
130
--
131
2.34.1
diff view generated by jsdifflib
New patch
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
---
3
configs/targets/alpha-linux-user.mak | 1 -
4
configs/targets/alpha-softmmu.mak | 1 -
5
2 files changed, 2 deletions(-)
1
6
7
diff --git a/configs/targets/alpha-linux-user.mak b/configs/targets/alpha-linux-user.mak
8
index XXXXXXX..XXXXXXX 100644
9
--- a/configs/targets/alpha-linux-user.mak
10
+++ b/configs/targets/alpha-linux-user.mak
11
@@ -XXX,XX +XXX,XX @@
12
TARGET_ARCH=alpha
13
TARGET_SYSTBL_ABI=common
14
TARGET_SYSTBL=syscall.tbl
15
-TARGET_ALIGNED_ONLY=y
16
diff --git a/configs/targets/alpha-softmmu.mak b/configs/targets/alpha-softmmu.mak
17
index XXXXXXX..XXXXXXX 100644
18
--- a/configs/targets/alpha-softmmu.mak
19
+++ b/configs/targets/alpha-softmmu.mak
20
@@ -XXX,XX +XXX,XX @@
21
TARGET_ARCH=alpha
22
-TARGET_ALIGNED_ONLY=y
23
TARGET_SUPPORTS_MTTCG=y
24
--
25
2.34.1
diff view generated by jsdifflib
New patch
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
---
3
target/hppa/translate.c | 2 +-
4
1 file changed, 1 insertion(+), 1 deletion(-)
1
5
6
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
7
index XXXXXXX..XXXXXXX 100644
8
--- a/target/hppa/translate.c
9
+++ b/target/hppa/translate.c
10
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
11
#ifdef CONFIG_USER_ONLY
12
#define UNALIGN(C) (C)->unalign
13
#else
14
-#define UNALIGN(C) 0
15
+#define UNALIGN(C) MO_ALIGN
16
#endif
17
18
/* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
19
--
20
2.34.1
diff view generated by jsdifflib
New patch
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
---
3
configs/targets/hppa-linux-user.mak | 1 -
4
configs/targets/hppa-softmmu.mak | 1 -
5
2 files changed, 2 deletions(-)
1
6
7
diff --git a/configs/targets/hppa-linux-user.mak b/configs/targets/hppa-linux-user.mak
8
index XXXXXXX..XXXXXXX 100644
9
--- a/configs/targets/hppa-linux-user.mak
10
+++ b/configs/targets/hppa-linux-user.mak
11
@@ -XXX,XX +XXX,XX @@
12
TARGET_ARCH=hppa
13
TARGET_SYSTBL_ABI=common,32
14
TARGET_SYSTBL=syscall.tbl
15
-TARGET_ALIGNED_ONLY=y
16
TARGET_BIG_ENDIAN=y
17
diff --git a/configs/targets/hppa-softmmu.mak b/configs/targets/hppa-softmmu.mak
18
index XXXXXXX..XXXXXXX 100644
19
--- a/configs/targets/hppa-softmmu.mak
20
+++ b/configs/targets/hppa-softmmu.mak
21
@@ -XXX,XX +XXX,XX @@
22
TARGET_ARCH=hppa
23
-TARGET_ALIGNED_ONLY=y
24
TARGET_BIG_ENDIAN=y
25
TARGET_SUPPORTS_MTTCG=y
26
--
27
2.34.1
diff view generated by jsdifflib
New patch
1
Acked-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
target/sparc/translate.c | 66 +++++++++++++++++++++-------------------
5
1 file changed, 34 insertions(+), 32 deletions(-)
1
6
7
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/target/sparc/translate.c
10
+++ b/target/sparc/translate.c
11
@@ -XXX,XX +XXX,XX @@ static void gen_swap(DisasContext *dc, TCGv dst, TCGv src,
12
TCGv addr, int mmu_idx, MemOp memop)
13
{
14
gen_address_mask(dc, addr);
15
- tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop);
16
+ tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop | MO_ALIGN);
17
}
18
19
static void gen_ldstub(DisasContext *dc, TCGv dst, TCGv addr, int mmu_idx)
20
@@ -XXX,XX +XXX,XX @@ static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr,
21
break;
22
case GET_ASI_DIRECT:
23
gen_address_mask(dc, addr);
24
- tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop);
25
+ tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop | MO_ALIGN);
26
break;
27
default:
28
{
29
TCGv_i32 r_asi = tcg_constant_i32(da.asi);
30
- TCGv_i32 r_mop = tcg_constant_i32(memop);
31
+ TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
32
33
save_state(dc);
34
#ifdef TARGET_SPARC64
35
@@ -XXX,XX +XXX,XX @@ static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr,
36
/* fall through */
37
case GET_ASI_DIRECT:
38
gen_address_mask(dc, addr);
39
- tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop);
40
+ tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop | MO_ALIGN);
41
break;
42
#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
43
case GET_ASI_BCOPY:
44
@@ -XXX,XX +XXX,XX @@ static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr,
45
default:
46
{
47
TCGv_i32 r_asi = tcg_constant_i32(da.asi);
48
- TCGv_i32 r_mop = tcg_constant_i32(memop & MO_SIZE);
49
+ TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
50
51
save_state(dc);
52
#ifdef TARGET_SPARC64
53
@@ -XXX,XX +XXX,XX @@ static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
54
case GET_ASI_DIRECT:
55
oldv = tcg_temp_new();
56
tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
57
- da.mem_idx, da.memop);
58
+ da.mem_idx, da.memop | MO_ALIGN);
59
gen_store_gpr(dc, rd, oldv);
60
break;
61
default:
62
@@ -XXX,XX +XXX,XX @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr,
63
switch (size) {
64
case 4:
65
d32 = gen_dest_fpr_F(dc);
66
- tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop);
67
+ tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN);
68
gen_store_fpr_F(dc, rd, d32);
69
break;
70
case 8:
71
@@ -XXX,XX +XXX,XX @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr,
72
/* Valid for lddfa only. */
73
if (size == 8) {
74
gen_address_mask(dc, addr);
75
- tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
76
+ tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
77
+ da.memop | MO_ALIGN);
78
} else {
79
gen_exception(dc, TT_ILL_INSN);
80
}
81
@@ -XXX,XX +XXX,XX @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr,
82
default:
83
{
84
TCGv_i32 r_asi = tcg_constant_i32(da.asi);
85
- TCGv_i32 r_mop = tcg_constant_i32(da.memop);
86
+ TCGv_i32 r_mop = tcg_constant_i32(da.memop | MO_ALIGN);
87
88
save_state(dc);
89
/* According to the table in the UA2011 manual, the only
90
@@ -XXX,XX +XXX,XX @@ static void gen_stf_asi(DisasContext *dc, TCGv addr,
91
switch (size) {
92
case 4:
93
d32 = gen_load_fpr_F(dc, rd);
94
- tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop);
95
+ tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN);
96
break;
97
case 8:
98
tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
99
@@ -XXX,XX +XXX,XX @@ static void gen_stf_asi(DisasContext *dc, TCGv addr,
100
/* Valid for stdfa only. */
101
if (size == 8) {
102
gen_address_mask(dc, addr);
103
- tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
104
+ tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
105
+ da.memop | MO_ALIGN);
106
} else {
107
gen_exception(dc, TT_ILL_INSN);
108
}
109
@@ -XXX,XX +XXX,XX @@ static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
110
TCGv_i64 tmp = tcg_temp_new_i64();
111
112
gen_address_mask(dc, addr);
113
- tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop);
114
+ tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop | MO_ALIGN);
115
116
/* Note that LE ldda acts as if each 32-bit register
117
result is byte swapped. Having just performed one
118
@@ -XXX,XX +XXX,XX @@ static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
119
tcg_gen_concat32_i64(t64, hi, lo);
120
}
121
gen_address_mask(dc, addr);
122
- tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop);
123
+ tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN);
124
}
125
break;
126
127
@@ -XXX,XX +XXX,XX @@ static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
128
case GET_ASI_DIRECT:
129
oldv = tcg_temp_new();
130
tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
131
- da.mem_idx, da.memop);
132
+ da.mem_idx, da.memop | MO_ALIGN);
133
gen_store_gpr(dc, rd, oldv);
134
break;
135
default:
136
@@ -XXX,XX +XXX,XX @@ static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
137
return;
138
case GET_ASI_DIRECT:
139
gen_address_mask(dc, addr);
140
- tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop);
141
+ tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN);
142
break;
143
default:
144
{
145
@@ -XXX,XX +XXX,XX @@ static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
146
break;
147
case GET_ASI_DIRECT:
148
gen_address_mask(dc, addr);
149
- tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop);
150
+ tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN);
151
break;
152
case GET_ASI_BFILL:
153
/* Store 32 bytes of T64 to ADDR. */
154
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
155
case 0x0: /* ld, V9 lduw, load unsigned word */
156
gen_address_mask(dc, cpu_addr);
157
tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
158
- dc->mem_idx, MO_TEUL);
159
+ dc->mem_idx, MO_TEUL | MO_ALIGN);
160
break;
161
case 0x1: /* ldub, load unsigned byte */
162
gen_address_mask(dc, cpu_addr);
163
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
164
case 0x2: /* lduh, load unsigned halfword */
165
gen_address_mask(dc, cpu_addr);
166
tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
167
- dc->mem_idx, MO_TEUW);
168
+ dc->mem_idx, MO_TEUW | MO_ALIGN);
169
break;
170
case 0x3: /* ldd, load double word */
171
if (rd & 1)
172
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
173
gen_address_mask(dc, cpu_addr);
174
t64 = tcg_temp_new_i64();
175
tcg_gen_qemu_ld_i64(t64, cpu_addr,
176
- dc->mem_idx, MO_TEUQ);
177
+ dc->mem_idx, MO_TEUQ | MO_ALIGN);
178
tcg_gen_trunc_i64_tl(cpu_val, t64);
179
tcg_gen_ext32u_tl(cpu_val, cpu_val);
180
gen_store_gpr(dc, rd + 1, cpu_val);
181
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
182
case 0xa: /* ldsh, load signed halfword */
183
gen_address_mask(dc, cpu_addr);
184
tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
185
- dc->mem_idx, MO_TESW);
186
+ dc->mem_idx, MO_TESW | MO_ALIGN);
187
break;
188
case 0xd: /* ldstub */
189
gen_ldstub(dc, cpu_val, cpu_addr, dc->mem_idx);
190
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
191
case 0x08: /* V9 ldsw */
192
gen_address_mask(dc, cpu_addr);
193
tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
194
- dc->mem_idx, MO_TESL);
195
+ dc->mem_idx, MO_TESL | MO_ALIGN);
196
break;
197
case 0x0b: /* V9 ldx */
198
gen_address_mask(dc, cpu_addr);
199
tcg_gen_qemu_ld_tl(cpu_val, cpu_addr,
200
- dc->mem_idx, MO_TEUQ);
201
+ dc->mem_idx, MO_TEUQ | MO_ALIGN);
202
break;
203
case 0x18: /* V9 ldswa */
204
gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL);
205
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
206
gen_address_mask(dc, cpu_addr);
207
cpu_dst_32 = gen_dest_fpr_F(dc);
208
tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
209
- dc->mem_idx, MO_TEUL);
210
+ dc->mem_idx, MO_TEUL | MO_ALIGN);
211
gen_store_fpr_F(dc, rd, cpu_dst_32);
212
break;
213
case 0x21: /* ldfsr, V9 ldxfsr */
214
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
215
if (rd == 1) {
216
TCGv_i64 t64 = tcg_temp_new_i64();
217
tcg_gen_qemu_ld_i64(t64, cpu_addr,
218
- dc->mem_idx, MO_TEUQ);
219
+ dc->mem_idx, MO_TEUQ | MO_ALIGN);
220
gen_helper_ldxfsr(cpu_fsr, cpu_env, cpu_fsr, t64);
221
break;
222
}
223
#endif
224
cpu_dst_32 = tcg_temp_new_i32();
225
tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
226
- dc->mem_idx, MO_TEUL);
227
+ dc->mem_idx, MO_TEUL | MO_ALIGN);
228
gen_helper_ldfsr(cpu_fsr, cpu_env, cpu_fsr, cpu_dst_32);
229
break;
230
case 0x22: /* ldqf, load quad fpreg */
231
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
232
case 0x4: /* st, store word */
233
gen_address_mask(dc, cpu_addr);
234
tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
235
- dc->mem_idx, MO_TEUL);
236
+ dc->mem_idx, MO_TEUL | MO_ALIGN);
237
break;
238
case 0x5: /* stb, store byte */
239
gen_address_mask(dc, cpu_addr);
240
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
241
case 0x6: /* sth, store halfword */
242
gen_address_mask(dc, cpu_addr);
243
tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
244
- dc->mem_idx, MO_TEUW);
245
+ dc->mem_idx, MO_TEUW | MO_ALIGN);
246
break;
247
case 0x7: /* std, store double word */
248
if (rd & 1)
249
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
250
t64 = tcg_temp_new_i64();
251
tcg_gen_concat_tl_i64(t64, lo, cpu_val);
252
tcg_gen_qemu_st_i64(t64, cpu_addr,
253
- dc->mem_idx, MO_TEUQ);
254
+ dc->mem_idx, MO_TEUQ | MO_ALIGN);
255
}
256
break;
257
#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
258
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
259
case 0x0e: /* V9 stx */
260
gen_address_mask(dc, cpu_addr);
261
tcg_gen_qemu_st_tl(cpu_val, cpu_addr,
262
- dc->mem_idx, MO_TEUQ);
263
+ dc->mem_idx, MO_TEUQ | MO_ALIGN);
264
break;
265
case 0x1e: /* V9 stxa */
266
gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ);
267
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
268
gen_address_mask(dc, cpu_addr);
269
cpu_src1_32 = gen_load_fpr_F(dc, rd);
270
tcg_gen_qemu_st_i32(cpu_src1_32, cpu_addr,
271
- dc->mem_idx, MO_TEUL);
272
+ dc->mem_idx, MO_TEUL | MO_ALIGN);
273
break;
274
case 0x25: /* stfsr, V9 stxfsr */
275
{
276
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
277
gen_address_mask(dc, cpu_addr);
278
if (rd == 1) {
279
tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr,
280
- dc->mem_idx, MO_TEUQ);
281
+ dc->mem_idx, MO_TEUQ | MO_ALIGN);
282
break;
283
}
284
#endif
285
tcg_gen_qemu_st_tl(cpu_fsr, cpu_addr,
286
- dc->mem_idx, MO_TEUL);
287
+ dc->mem_idx, MO_TEUL | MO_ALIGN);
288
}
289
break;
290
case 0x26:
291
--
292
2.34.1
diff view generated by jsdifflib
New patch
1
This passes on the memop as given as argument to
2
helper_ld_asi to the ultimate load primitive.
1
3
4
Reviewed-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
target/sparc/ldst_helper.c | 10 ++++++----
8
1 file changed, 6 insertions(+), 4 deletions(-)
9
10
diff --git a/target/sparc/ldst_helper.c b/target/sparc/ldst_helper.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/target/sparc/ldst_helper.c
13
+++ b/target/sparc/ldst_helper.c
14
@@ -XXX,XX +XXX,XX @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
15
#if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
16
uint32_t last_addr = addr;
17
#endif
18
+ MemOpIdx oi;
19
20
do_check_align(env, addr, size - 1, GETPC());
21
switch (asi) {
22
@@ -XXX,XX +XXX,XX @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
23
case ASI_M_IODIAG: /* Turbosparc IOTLB Diagnostic */
24
break;
25
case ASI_KERNELTXT: /* Supervisor code access */
26
+ oi = make_memop_idx(memop, cpu_mmu_index(env, true));
27
switch (size) {
28
case 1:
29
- ret = cpu_ldub_code(env, addr);
30
+ ret = cpu_ldb_code_mmu(env, addr, oi, GETPC());
31
break;
32
case 2:
33
- ret = cpu_lduw_code(env, addr);
34
+ ret = cpu_ldw_code_mmu(env, addr, oi, GETPC());
35
break;
36
default:
37
case 4:
38
- ret = cpu_ldl_code(env, addr);
39
+ ret = cpu_ldl_code_mmu(env, addr, oi, GETPC());
40
break;
41
case 8:
42
- ret = cpu_ldq_code(env, addr);
43
+ ret = cpu_ldq_code_mmu(env, addr, oi, GETPC());
44
break;
45
}
46
break;
47
--
48
2.34.1
diff view generated by jsdifflib
New patch
1
Reviewed-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
configs/targets/sparc-linux-user.mak | 1 -
5
configs/targets/sparc-softmmu.mak | 1 -
6
configs/targets/sparc32plus-linux-user.mak | 1 -
7
configs/targets/sparc64-linux-user.mak | 1 -
8
configs/targets/sparc64-softmmu.mak | 1 -
9
5 files changed, 5 deletions(-)
1
10
11
diff --git a/configs/targets/sparc-linux-user.mak b/configs/targets/sparc-linux-user.mak
12
index XXXXXXX..XXXXXXX 100644
13
--- a/configs/targets/sparc-linux-user.mak
14
+++ b/configs/targets/sparc-linux-user.mak
15
@@ -XXX,XX +XXX,XX @@
16
TARGET_ARCH=sparc
17
TARGET_SYSTBL_ABI=common,32
18
TARGET_SYSTBL=syscall.tbl
19
-TARGET_ALIGNED_ONLY=y
20
TARGET_BIG_ENDIAN=y
21
diff --git a/configs/targets/sparc-softmmu.mak b/configs/targets/sparc-softmmu.mak
22
index XXXXXXX..XXXXXXX 100644
23
--- a/configs/targets/sparc-softmmu.mak
24
+++ b/configs/targets/sparc-softmmu.mak
25
@@ -XXX,XX +XXX,XX @@
26
TARGET_ARCH=sparc
27
-TARGET_ALIGNED_ONLY=y
28
TARGET_BIG_ENDIAN=y
29
diff --git a/configs/targets/sparc32plus-linux-user.mak b/configs/targets/sparc32plus-linux-user.mak
30
index XXXXXXX..XXXXXXX 100644
31
--- a/configs/targets/sparc32plus-linux-user.mak
32
+++ b/configs/targets/sparc32plus-linux-user.mak
33
@@ -XXX,XX +XXX,XX @@ TARGET_BASE_ARCH=sparc
34
TARGET_ABI_DIR=sparc
35
TARGET_SYSTBL_ABI=common,32
36
TARGET_SYSTBL=syscall.tbl
37
-TARGET_ALIGNED_ONLY=y
38
TARGET_BIG_ENDIAN=y
39
diff --git a/configs/targets/sparc64-linux-user.mak b/configs/targets/sparc64-linux-user.mak
40
index XXXXXXX..XXXXXXX 100644
41
--- a/configs/targets/sparc64-linux-user.mak
42
+++ b/configs/targets/sparc64-linux-user.mak
43
@@ -XXX,XX +XXX,XX @@ TARGET_BASE_ARCH=sparc
44
TARGET_ABI_DIR=sparc
45
TARGET_SYSTBL_ABI=common,64
46
TARGET_SYSTBL=syscall.tbl
47
-TARGET_ALIGNED_ONLY=y
48
TARGET_BIG_ENDIAN=y
49
diff --git a/configs/targets/sparc64-softmmu.mak b/configs/targets/sparc64-softmmu.mak
50
index XXXXXXX..XXXXXXX 100644
51
--- a/configs/targets/sparc64-softmmu.mak
52
+++ b/configs/targets/sparc64-softmmu.mak
53
@@ -XXX,XX +XXX,XX @@
54
TARGET_ARCH=sparc64
55
TARGET_BASE_ARCH=sparc
56
-TARGET_ALIGNED_ONLY=y
57
TARGET_BIG_ENDIAN=y
58
--
59
2.34.1
diff view generated by jsdifflib
1
This function has two users, who use it incompatibly.
1
Interpret the variable argument placement in the caller. Pass data_type
2
In tlb_flush_page_by_mmuidx_async_0, when flushing a
2
instead of is64 -- there are several places where we already convert back
3
single page, we need to flush exactly two pages.
3
from bool to type. Clean things up by using type throughout.
4
In tlb_flush_range_by_mmuidx_async_0, when flushing a
4
5
range of pages, we need to flush N+1 pages.
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
7
This avoids double-flushing of jmp cache pages in a range.
8
9
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
7
---
12
accel/tcg/cputlb.c | 25 ++++++++++++++-----------
8
tcg/i386/tcg-target.c.inc | 111 +++++++++++++++++---------------------
13
1 file changed, 14 insertions(+), 11 deletions(-)
9
1 file changed, 50 insertions(+), 61 deletions(-)
14
10
15
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
11
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
16
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
17
--- a/accel/tcg/cputlb.c
13
--- a/tcg/i386/tcg-target.c.inc
18
+++ b/accel/tcg/cputlb.c
14
+++ b/tcg/i386/tcg-target.c.inc
19
@@ -XXX,XX +XXX,XX @@ static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
15
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
16
* Record the context of a call to the out of line helper code for the slow path
17
* for a load or store, so that we can later generate the correct helper code
18
*/
19
-static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64,
20
- MemOpIdx oi,
21
+static void add_qemu_ldst_label(TCGContext *s, bool is_ld,
22
+ TCGType type, MemOpIdx oi,
23
TCGReg datalo, TCGReg datahi,
24
TCGReg addrlo, TCGReg addrhi,
25
tcg_insn_unit *raddr,
26
@@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64,
27
28
label->is_ld = is_ld;
29
label->oi = oi;
30
- label->type = is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
31
+ label->type = type;
32
label->datalo_reg = datalo;
33
label->datahi_reg = datahi;
34
label->addrlo_reg = addrlo;
35
@@ -XXX,XX +XXX,XX @@ static inline int setup_guest_base_seg(void)
36
37
static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
38
TCGReg base, int index, intptr_t ofs,
39
- int seg, bool is64, MemOp memop)
40
+ int seg, TCGType type, MemOp memop)
41
{
42
- TCGType type = is64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
43
bool use_movbe = false;
44
- int rexw = is64 * P_REXW;
45
+ int rexw = (type == TCG_TYPE_I32 ? 0 : P_REXW);
46
int movop = OPC_MOVL_GvEv;
47
48
/* Do big-endian loads with movbe. */
49
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
20
}
50
}
21
}
51
}
22
52
23
-static void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
53
-/* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and
24
-{
54
- EAX. It will be useful once fixed registers globals are less
25
- /* Discard jump cache entries for any tb which might potentially
55
- common. */
26
- overlap the flushed page. */
56
-static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
27
- tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
57
+static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
28
- tb_jmp_cache_clear_page(cpu, addr);
58
+ TCGReg addrlo, TCGReg addrhi,
29
-}
59
+ MemOpIdx oi, TCGType data_type)
30
-
60
{
31
/**
61
- TCGReg datalo, datahi, addrlo;
32
* tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
62
- TCGReg addrhi __attribute__((unused));
33
* @desc: The CPUTLBDesc portion of the TLB
63
- MemOpIdx oi;
34
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
64
- MemOp opc;
35
}
65
+ MemOp opc = get_memop(oi);
36
qemu_spin_unlock(&env_tlb(env)->c.lock);
66
+
37
67
#if defined(CONFIG_SOFTMMU)
38
- tb_flush_jmp_cache(cpu, addr);
68
- int mem_index;
39
+ /*
69
tcg_insn_unit *label_ptr[2];
40
+ * Discard jump cache entries for any tb which might potentially
70
-#else
41
+ * overlap the flushed page, which includes the previous.
71
- unsigned a_bits;
42
+ */
72
-#endif
43
+ tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
73
44
+ tb_jmp_cache_clear_page(cpu, addr);
74
- datalo = *args++;
75
- datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
76
- addrlo = *args++;
77
- addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
78
- oi = *args++;
79
- opc = get_memop(oi);
80
-
81
-#if defined(CONFIG_SOFTMMU)
82
- mem_index = get_mmuidx(oi);
83
-
84
- tcg_out_tlb_load(s, addrlo, addrhi, mem_index, opc,
85
+ tcg_out_tlb_load(s, addrlo, addrhi, get_mmuidx(oi), opc,
86
label_ptr, offsetof(CPUTLBEntry, addr_read));
87
88
/* TLB Hit. */
89
- tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, is64, opc);
90
+ tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1,
91
+ -1, 0, 0, data_type, opc);
92
93
/* Record the current context of a load into ldst label */
94
- add_qemu_ldst_label(s, true, is64, oi, datalo, datahi, addrlo, addrhi,
95
- s->code_ptr, label_ptr);
96
+ add_qemu_ldst_label(s, true, data_type, oi, datalo, datahi,
97
+ addrlo, addrhi, s->code_ptr, label_ptr);
98
#else
99
- a_bits = get_alignment_bits(opc);
100
+ unsigned a_bits = get_alignment_bits(opc);
101
if (a_bits) {
102
tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
103
}
104
105
tcg_out_qemu_ld_direct(s, datalo, datahi, addrlo, x86_guest_base_index,
106
x86_guest_base_offset, x86_guest_base_seg,
107
- is64, opc);
108
+ data_type, opc);
109
#endif
45
}
110
}
46
111
47
/**
112
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
48
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
49
return;
50
}
51
52
- for (target_ulong i = 0; i < d.len; i += TARGET_PAGE_SIZE) {
53
- tb_flush_jmp_cache(cpu, d.addr + i);
54
+ /*
55
+ * Discard jump cache entries for any tb which might potentially
56
+ * overlap the flushed pages, which includes the previous.
57
+ */
58
+ d.addr -= TARGET_PAGE_SIZE;
59
+ for (target_ulong i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) {
60
+ tb_jmp_cache_clear_page(cpu, d.addr);
61
+ d.addr += TARGET_PAGE_SIZE;
62
}
113
}
63
}
114
}
64
115
116
-static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
117
+static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
118
+ TCGReg addrlo, TCGReg addrhi,
119
+ MemOpIdx oi, TCGType data_type)
120
{
121
- TCGReg datalo, datahi, addrlo;
122
- TCGReg addrhi __attribute__((unused));
123
- MemOpIdx oi;
124
- MemOp opc;
125
+ MemOp opc = get_memop(oi);
126
+
127
#if defined(CONFIG_SOFTMMU)
128
- int mem_index;
129
tcg_insn_unit *label_ptr[2];
130
-#else
131
- unsigned a_bits;
132
-#endif
133
134
- datalo = *args++;
135
- datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
136
- addrlo = *args++;
137
- addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
138
- oi = *args++;
139
- opc = get_memop(oi);
140
-
141
-#if defined(CONFIG_SOFTMMU)
142
- mem_index = get_mmuidx(oi);
143
-
144
- tcg_out_tlb_load(s, addrlo, addrhi, mem_index, opc,
145
+ tcg_out_tlb_load(s, addrlo, addrhi, get_mmuidx(oi), opc,
146
label_ptr, offsetof(CPUTLBEntry, addr_write));
147
148
/* TLB Hit. */
149
tcg_out_qemu_st_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, opc);
150
151
/* Record the current context of a store into ldst label */
152
- add_qemu_ldst_label(s, false, is64, oi, datalo, datahi, addrlo, addrhi,
153
- s->code_ptr, label_ptr);
154
+ add_qemu_ldst_label(s, false, data_type, oi, datalo, datahi,
155
+ addrlo, addrhi, s->code_ptr, label_ptr);
156
#else
157
- a_bits = get_alignment_bits(opc);
158
+ unsigned a_bits = get_alignment_bits(opc);
159
if (a_bits) {
160
tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits);
161
}
162
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
163
break;
164
165
case INDEX_op_qemu_ld_i32:
166
- tcg_out_qemu_ld(s, args, 0);
167
+ if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
168
+ tcg_out_qemu_ld(s, a0, -1, a1, -1, a2, TCG_TYPE_I32);
169
+ } else {
170
+ tcg_out_qemu_ld(s, a0, -1, a1, a2, args[3], TCG_TYPE_I32);
171
+ }
172
break;
173
case INDEX_op_qemu_ld_i64:
174
- tcg_out_qemu_ld(s, args, 1);
175
+ if (TCG_TARGET_REG_BITS == 64) {
176
+ tcg_out_qemu_ld(s, a0, -1, a1, -1, a2, TCG_TYPE_I64);
177
+ } else if (TARGET_LONG_BITS == 32) {
178
+ tcg_out_qemu_ld(s, a0, a1, a2, -1, args[3], TCG_TYPE_I64);
179
+ } else {
180
+ tcg_out_qemu_ld(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64);
181
+ }
182
break;
183
case INDEX_op_qemu_st_i32:
184
case INDEX_op_qemu_st8_i32:
185
- tcg_out_qemu_st(s, args, 0);
186
+ if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
187
+ tcg_out_qemu_st(s, a0, -1, a1, -1, a2, TCG_TYPE_I32);
188
+ } else {
189
+ tcg_out_qemu_st(s, a0, -1, a1, a2, args[3], TCG_TYPE_I32);
190
+ }
191
break;
192
case INDEX_op_qemu_st_i64:
193
- tcg_out_qemu_st(s, args, 1);
194
+ if (TCG_TARGET_REG_BITS == 64) {
195
+ tcg_out_qemu_st(s, a0, -1, a1, -1, a2, TCG_TYPE_I64);
196
+ } else if (TARGET_LONG_BITS == 32) {
197
+ tcg_out_qemu_st(s, a0, a1, a2, -1, args[3], TCG_TYPE_I64);
198
+ } else {
199
+ tcg_out_qemu_st(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64);
200
+ }
201
break;
202
203
OP_32_64(mulu2):
65
--
204
--
66
2.34.1
205
2.34.1
67
206
68
207
diff view generated by jsdifflib
New patch
1
Test for both base and index; use datahi as a temporary, overwritten
2
by the final load. Always perform the loads in ascending order, so
3
that any (user-only) fault sees the correct address.
1
4
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/i386/tcg-target.c.inc | 31 +++++++++++++++----------------
8
1 file changed, 15 insertions(+), 16 deletions(-)
9
10
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/i386/tcg-target.c.inc
13
+++ b/tcg/i386/tcg-target.c.inc
14
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
15
if (TCG_TARGET_REG_BITS == 64) {
16
tcg_out_modrm_sib_offset(s, movop + P_REXW + seg, datalo,
17
base, index, 0, ofs);
18
+ break;
19
+ }
20
+ if (use_movbe) {
21
+ TCGReg t = datalo;
22
+ datalo = datahi;
23
+ datahi = t;
24
+ }
25
+ if (base == datalo || index == datalo) {
26
+ tcg_out_modrm_sib_offset(s, OPC_LEA, datahi, base, index, 0, ofs);
27
+ tcg_out_modrm_offset(s, movop + seg, datalo, datahi, 0);
28
+ tcg_out_modrm_offset(s, movop + seg, datahi, datahi, 4);
29
} else {
30
- if (use_movbe) {
31
- TCGReg t = datalo;
32
- datalo = datahi;
33
- datahi = t;
34
- }
35
- if (base != datalo) {
36
- tcg_out_modrm_sib_offset(s, movop + seg, datalo,
37
- base, index, 0, ofs);
38
- tcg_out_modrm_sib_offset(s, movop + seg, datahi,
39
- base, index, 0, ofs + 4);
40
- } else {
41
- tcg_out_modrm_sib_offset(s, movop + seg, datahi,
42
- base, index, 0, ofs + 4);
43
- tcg_out_modrm_sib_offset(s, movop + seg, datalo,
44
- base, index, 0, ofs);
45
- }
46
+ tcg_out_modrm_sib_offset(s, movop + seg, datalo,
47
+ base, index, 0, ofs);
48
+ tcg_out_modrm_sib_offset(s, movop + seg, datahi,
49
+ base, index, 0, ofs + 4);
50
}
51
break;
52
default:
53
--
54
2.34.1
diff view generated by jsdifflib
New patch
1
Collect the 4 potential parts of the host address into a struct.
2
Reorg tcg_out_qemu_{ld,st}_direct to use it.
3
Reorg guest_base handling to use it.
1
4
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/i386/tcg-target.c.inc | 165 +++++++++++++++++++++-----------------
9
1 file changed, 90 insertions(+), 75 deletions(-)
10
11
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/i386/tcg-target.c.inc
14
+++ b/tcg/i386/tcg-target.c.inc
15
@@ -XXX,XX +XXX,XX @@ static void tcg_out_nopn(TCGContext *s, int n)
16
tcg_out8(s, 0x90);
17
}
18
19
+typedef struct {
20
+ TCGReg base;
21
+ int index;
22
+ int ofs;
23
+ int seg;
24
+} HostAddress;
25
+
26
#if defined(CONFIG_SOFTMMU)
27
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
28
* int mmu_idx, uintptr_t ra)
29
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
30
return tcg_out_fail_alignment(s, l);
31
}
32
33
-#if TCG_TARGET_REG_BITS == 32
34
-# define x86_guest_base_seg 0
35
-# define x86_guest_base_index -1
36
-# define x86_guest_base_offset guest_base
37
-#else
38
-static int x86_guest_base_seg;
39
-static int x86_guest_base_index = -1;
40
-static int32_t x86_guest_base_offset;
41
-# if defined(__x86_64__) && defined(__linux__)
42
-# include <asm/prctl.h>
43
-# include <sys/prctl.h>
44
+static HostAddress x86_guest_base = {
45
+ .index = -1
46
+};
47
+
48
+#if defined(__x86_64__) && defined(__linux__)
49
+# include <asm/prctl.h>
50
+# include <sys/prctl.h>
51
int arch_prctl(int code, unsigned long addr);
52
static inline int setup_guest_base_seg(void)
53
{
54
@@ -XXX,XX +XXX,XX @@ static inline int setup_guest_base_seg(void)
55
}
56
return 0;
57
}
58
-# elif defined (__FreeBSD__) || defined (__FreeBSD_kernel__)
59
-# include <machine/sysarch.h>
60
+#elif defined(__x86_64__) && \
61
+ (defined (__FreeBSD__) || defined (__FreeBSD_kernel__))
62
+# include <machine/sysarch.h>
63
static inline int setup_guest_base_seg(void)
64
{
65
if (sysarch(AMD64_SET_GSBASE, &guest_base) == 0) {
66
@@ -XXX,XX +XXX,XX @@ static inline int setup_guest_base_seg(void)
67
}
68
return 0;
69
}
70
-# else
71
+#else
72
static inline int setup_guest_base_seg(void)
73
{
74
return 0;
75
}
76
-# endif
77
-#endif
78
+#endif /* setup_guest_base_seg */
79
#endif /* SOFTMMU */
80
81
static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
82
- TCGReg base, int index, intptr_t ofs,
83
- int seg, TCGType type, MemOp memop)
84
+ HostAddress h, TCGType type, MemOp memop)
85
{
86
bool use_movbe = false;
87
int rexw = (type == TCG_TYPE_I32 ? 0 : P_REXW);
88
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
89
90
switch (memop & MO_SSIZE) {
91
case MO_UB:
92
- tcg_out_modrm_sib_offset(s, OPC_MOVZBL + seg, datalo,
93
- base, index, 0, ofs);
94
+ tcg_out_modrm_sib_offset(s, OPC_MOVZBL + h.seg, datalo,
95
+ h.base, h.index, 0, h.ofs);
96
break;
97
case MO_SB:
98
- tcg_out_modrm_sib_offset(s, OPC_MOVSBL + rexw + seg, datalo,
99
- base, index, 0, ofs);
100
+ tcg_out_modrm_sib_offset(s, OPC_MOVSBL + rexw + h.seg, datalo,
101
+ h.base, h.index, 0, h.ofs);
102
break;
103
case MO_UW:
104
if (use_movbe) {
105
/* There is no extending movbe; only low 16-bits are modified. */
106
- if (datalo != base && datalo != index) {
107
+ if (datalo != h.base && datalo != h.index) {
108
/* XOR breaks dependency chains. */
109
tgen_arithr(s, ARITH_XOR, datalo, datalo);
110
- tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + seg,
111
- datalo, base, index, 0, ofs);
112
+ tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + h.seg,
113
+ datalo, h.base, h.index, 0, h.ofs);
114
} else {
115
- tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + seg,
116
- datalo, base, index, 0, ofs);
117
+ tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + h.seg,
118
+ datalo, h.base, h.index, 0, h.ofs);
119
tcg_out_ext16u(s, datalo, datalo);
120
}
121
} else {
122
- tcg_out_modrm_sib_offset(s, OPC_MOVZWL + seg, datalo,
123
- base, index, 0, ofs);
124
+ tcg_out_modrm_sib_offset(s, OPC_MOVZWL + h.seg, datalo,
125
+ h.base, h.index, 0, h.ofs);
126
}
127
break;
128
case MO_SW:
129
if (use_movbe) {
130
- tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + seg,
131
- datalo, base, index, 0, ofs);
132
+ tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + h.seg,
133
+ datalo, h.base, h.index, 0, h.ofs);
134
tcg_out_ext16s(s, type, datalo, datalo);
135
} else {
136
- tcg_out_modrm_sib_offset(s, OPC_MOVSWL + rexw + seg,
137
- datalo, base, index, 0, ofs);
138
+ tcg_out_modrm_sib_offset(s, OPC_MOVSWL + rexw + h.seg,
139
+ datalo, h.base, h.index, 0, h.ofs);
140
}
141
break;
142
case MO_UL:
143
- tcg_out_modrm_sib_offset(s, movop + seg, datalo, base, index, 0, ofs);
144
+ tcg_out_modrm_sib_offset(s, movop + h.seg, datalo,
145
+ h.base, h.index, 0, h.ofs);
146
break;
147
#if TCG_TARGET_REG_BITS == 64
148
case MO_SL:
149
if (use_movbe) {
150
- tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + seg, datalo,
151
- base, index, 0, ofs);
152
+ tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + h.seg, datalo,
153
+ h.base, h.index, 0, h.ofs);
154
tcg_out_ext32s(s, datalo, datalo);
155
} else {
156
- tcg_out_modrm_sib_offset(s, OPC_MOVSLQ + seg, datalo,
157
- base, index, 0, ofs);
158
+ tcg_out_modrm_sib_offset(s, OPC_MOVSLQ + h.seg, datalo,
159
+ h.base, h.index, 0, h.ofs);
160
}
161
break;
162
#endif
163
case MO_UQ:
164
if (TCG_TARGET_REG_BITS == 64) {
165
- tcg_out_modrm_sib_offset(s, movop + P_REXW + seg, datalo,
166
- base, index, 0, ofs);
167
+ tcg_out_modrm_sib_offset(s, movop + P_REXW + h.seg, datalo,
168
+ h.base, h.index, 0, h.ofs);
169
break;
170
}
171
if (use_movbe) {
172
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
173
datalo = datahi;
174
datahi = t;
175
}
176
- if (base == datalo || index == datalo) {
177
- tcg_out_modrm_sib_offset(s, OPC_LEA, datahi, base, index, 0, ofs);
178
- tcg_out_modrm_offset(s, movop + seg, datalo, datahi, 0);
179
- tcg_out_modrm_offset(s, movop + seg, datahi, datahi, 4);
180
+ if (h.base == datalo || h.index == datalo) {
181
+ tcg_out_modrm_sib_offset(s, OPC_LEA, datahi,
182
+ h.base, h.index, 0, h.ofs);
183
+ tcg_out_modrm_offset(s, movop + h.seg, datalo, datahi, 0);
184
+ tcg_out_modrm_offset(s, movop + h.seg, datahi, datahi, 4);
185
} else {
186
- tcg_out_modrm_sib_offset(s, movop + seg, datalo,
187
- base, index, 0, ofs);
188
- tcg_out_modrm_sib_offset(s, movop + seg, datahi,
189
- base, index, 0, ofs + 4);
190
+ tcg_out_modrm_sib_offset(s, movop + h.seg, datalo,
191
+ h.base, h.index, 0, h.ofs);
192
+ tcg_out_modrm_sib_offset(s, movop + h.seg, datahi,
193
+ h.base, h.index, 0, h.ofs + 4);
194
}
195
break;
196
default:
197
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
198
MemOpIdx oi, TCGType data_type)
199
{
200
MemOp opc = get_memop(oi);
201
+ HostAddress h;
202
203
#if defined(CONFIG_SOFTMMU)
204
tcg_insn_unit *label_ptr[2];
205
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
206
label_ptr, offsetof(CPUTLBEntry, addr_read));
207
208
/* TLB Hit. */
209
- tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1,
210
- -1, 0, 0, data_type, opc);
211
+ h.base = TCG_REG_L1;
212
+ h.index = -1;
213
+ h.ofs = 0;
214
+ h.seg = 0;
215
+ tcg_out_qemu_ld_direct(s, datalo, datahi, h, data_type, opc);
216
217
/* Record the current context of a load into ldst label */
218
add_qemu_ldst_label(s, true, data_type, oi, datalo, datahi,
219
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
220
tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
221
}
222
223
- tcg_out_qemu_ld_direct(s, datalo, datahi, addrlo, x86_guest_base_index,
224
- x86_guest_base_offset, x86_guest_base_seg,
225
- data_type, opc);
226
+ h = x86_guest_base;
227
+ h.base = addrlo;
228
+ tcg_out_qemu_ld_direct(s, datalo, datahi, h, data_type, opc);
229
#endif
230
}
231
232
static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
233
- TCGReg base, int index, intptr_t ofs,
234
- int seg, MemOp memop)
235
+ HostAddress h, MemOp memop)
236
{
237
bool use_movbe = false;
238
int movop = OPC_MOVL_EvGv;
239
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
240
case MO_8:
241
/* This is handled with constraints on INDEX_op_qemu_st8_i32. */
242
tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || datalo < 4);
243
- tcg_out_modrm_sib_offset(s, OPC_MOVB_EvGv + P_REXB_R + seg,
244
- datalo, base, index, 0, ofs);
245
+ tcg_out_modrm_sib_offset(s, OPC_MOVB_EvGv + P_REXB_R + h.seg,
246
+ datalo, h.base, h.index, 0, h.ofs);
247
break;
248
case MO_16:
249
- tcg_out_modrm_sib_offset(s, movop + P_DATA16 + seg, datalo,
250
- base, index, 0, ofs);
251
+ tcg_out_modrm_sib_offset(s, movop + P_DATA16 + h.seg, datalo,
252
+ h.base, h.index, 0, h.ofs);
253
break;
254
case MO_32:
255
- tcg_out_modrm_sib_offset(s, movop + seg, datalo, base, index, 0, ofs);
256
+ tcg_out_modrm_sib_offset(s, movop + h.seg, datalo,
257
+ h.base, h.index, 0, h.ofs);
258
break;
259
case MO_64:
260
if (TCG_TARGET_REG_BITS == 64) {
261
- tcg_out_modrm_sib_offset(s, movop + P_REXW + seg, datalo,
262
- base, index, 0, ofs);
263
+ tcg_out_modrm_sib_offset(s, movop + P_REXW + h.seg, datalo,
264
+ h.base, h.index, 0, h.ofs);
265
} else {
266
if (use_movbe) {
267
TCGReg t = datalo;
268
datalo = datahi;
269
datahi = t;
270
}
271
- tcg_out_modrm_sib_offset(s, movop + seg, datalo,
272
- base, index, 0, ofs);
273
- tcg_out_modrm_sib_offset(s, movop + seg, datahi,
274
- base, index, 0, ofs + 4);
275
+ tcg_out_modrm_sib_offset(s, movop + h.seg, datalo,
276
+ h.base, h.index, 0, h.ofs);
277
+ tcg_out_modrm_sib_offset(s, movop + h.seg, datahi,
278
+ h.base, h.index, 0, h.ofs + 4);
279
}
280
break;
281
default:
282
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
283
MemOpIdx oi, TCGType data_type)
284
{
285
MemOp opc = get_memop(oi);
286
+ HostAddress h;
287
288
#if defined(CONFIG_SOFTMMU)
289
tcg_insn_unit *label_ptr[2];
290
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
291
label_ptr, offsetof(CPUTLBEntry, addr_write));
292
293
/* TLB Hit. */
294
- tcg_out_qemu_st_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, opc);
295
+ h.base = TCG_REG_L1;
296
+ h.index = -1;
297
+ h.ofs = 0;
298
+ h.seg = 0;
299
+ tcg_out_qemu_st_direct(s, datalo, datahi, h, opc);
300
301
/* Record the current context of a store into ldst label */
302
add_qemu_ldst_label(s, false, data_type, oi, datalo, datahi,
303
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
304
tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits);
305
}
306
307
- tcg_out_qemu_st_direct(s, datalo, datahi, addrlo, x86_guest_base_index,
308
- x86_guest_base_offset, x86_guest_base_seg, opc);
309
+ h = x86_guest_base;
310
+ h.base = addrlo;
311
+
312
+ tcg_out_qemu_st_direct(s, datalo, datahi, h, opc);
313
#endif
314
}
315
316
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
317
(ARRAY_SIZE(tcg_target_callee_save_regs) + 2) * 4
318
+ stack_addend);
319
#else
320
-# if !defined(CONFIG_SOFTMMU) && TCG_TARGET_REG_BITS == 64
321
+# if !defined(CONFIG_SOFTMMU)
322
if (guest_base) {
323
int seg = setup_guest_base_seg();
324
if (seg != 0) {
325
- x86_guest_base_seg = seg;
326
+ x86_guest_base.seg = seg;
327
} else if (guest_base == (int32_t)guest_base) {
328
- x86_guest_base_offset = guest_base;
329
+ x86_guest_base.ofs = guest_base;
330
} else {
331
/* Choose R12 because, as a base, it requires a SIB byte. */
332
- x86_guest_base_index = TCG_REG_R12;
333
- tcg_out_movi(s, TCG_TYPE_PTR, x86_guest_base_index, guest_base);
334
- tcg_regset_set_reg(s->reserved_regs, x86_guest_base_index);
335
+ x86_guest_base.index = TCG_REG_R12;
336
+ tcg_out_movi(s, TCG_TYPE_PTR, x86_guest_base.index, guest_base);
337
+ tcg_regset_set_reg(s->reserved_regs, x86_guest_base.index);
338
}
339
}
340
# endif
341
--
342
2.34.1
343
344
diff view generated by jsdifflib
New patch
1
Use TCG_REG_L[01] constants directly.
1
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/i386/tcg-target.c.inc | 32 ++++++++++++++++----------------
7
1 file changed, 16 insertions(+), 16 deletions(-)
8
9
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/i386/tcg-target.c.inc
12
+++ b/tcg/i386/tcg-target.c.inc
13
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
14
int mem_index, MemOp opc,
15
tcg_insn_unit **label_ptr, int which)
16
{
17
- const TCGReg r0 = TCG_REG_L0;
18
- const TCGReg r1 = TCG_REG_L1;
19
TCGType ttype = TCG_TYPE_I32;
20
TCGType tlbtype = TCG_TYPE_I32;
21
int trexw = 0, hrexw = 0, tlbrexw = 0;
22
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
23
}
24
}
25
26
- tcg_out_mov(s, tlbtype, r0, addrlo);
27
- tcg_out_shifti(s, SHIFT_SHR + tlbrexw, r0,
28
+ tcg_out_mov(s, tlbtype, TCG_REG_L0, addrlo);
29
+ tcg_out_shifti(s, SHIFT_SHR + tlbrexw, TCG_REG_L0,
30
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
31
32
- tcg_out_modrm_offset(s, OPC_AND_GvEv + trexw, r0, TCG_AREG0,
33
+ tcg_out_modrm_offset(s, OPC_AND_GvEv + trexw, TCG_REG_L0, TCG_AREG0,
34
TLB_MASK_TABLE_OFS(mem_index) +
35
offsetof(CPUTLBDescFast, mask));
36
37
- tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r0, TCG_AREG0,
38
+ tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, TCG_REG_L0, TCG_AREG0,
39
TLB_MASK_TABLE_OFS(mem_index) +
40
offsetof(CPUTLBDescFast, table));
41
42
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
43
copy the address and mask. For lesser alignments, check that we don't
44
cross pages for the complete access. */
45
if (a_bits >= s_bits) {
46
- tcg_out_mov(s, ttype, r1, addrlo);
47
+ tcg_out_mov(s, ttype, TCG_REG_L1, addrlo);
48
} else {
49
- tcg_out_modrm_offset(s, OPC_LEA + trexw, r1, addrlo, s_mask - a_mask);
50
+ tcg_out_modrm_offset(s, OPC_LEA + trexw, TCG_REG_L1,
51
+ addrlo, s_mask - a_mask);
52
}
53
tlb_mask = (target_ulong)TARGET_PAGE_MASK | a_mask;
54
- tgen_arithi(s, ARITH_AND + trexw, r1, tlb_mask, 0);
55
+ tgen_arithi(s, ARITH_AND + trexw, TCG_REG_L1, tlb_mask, 0);
56
57
- /* cmp 0(r0), r1 */
58
- tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw, r1, r0, which);
59
+ /* cmp 0(TCG_REG_L0), TCG_REG_L1 */
60
+ tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw,
61
+ TCG_REG_L1, TCG_REG_L0, which);
62
63
/* Prepare for both the fast path add of the tlb addend, and the slow
64
path function argument setup. */
65
- tcg_out_mov(s, ttype, r1, addrlo);
66
+ tcg_out_mov(s, ttype, TCG_REG_L1, addrlo);
67
68
/* jne slow_path */
69
tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
70
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
71
s->code_ptr += 4;
72
73
if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
74
- /* cmp 4(r0), addrhi */
75
- tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi, r0, which + 4);
76
+ /* cmp 4(TCG_REG_L0), addrhi */
77
+ tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi, TCG_REG_L0, which + 4);
78
79
/* jne slow_path */
80
tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
81
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
82
83
/* TLB Hit. */
84
85
- /* add addend(r0), r1 */
86
- tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r1, r0,
87
+ /* add addend(TCG_REG_L0), TCG_REG_L1 */
88
+ tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, TCG_REG_L1, TCG_REG_L0,
89
offsetof(CPUTLBEntry, addend));
90
}
91
92
--
93
2.34.1
94
95
diff view generated by jsdifflib
1
Prepare for targets to be able to produce TBs that can
1
Split out a helper for choosing testb vs testl.
2
run in more than one virtual context.
3
2
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
accel/tcg/internal.h | 4 +++
6
tcg/i386/tcg-target.c.inc | 30 ++++++++++++++++++------------
8
accel/tcg/tb-jmp-cache.h | 41 +++++++++++++++++++++++++
7
1 file changed, 18 insertions(+), 12 deletions(-)
9
include/exec/cpu-defs.h | 3 ++
10
include/exec/exec-all.h | 32 ++++++++++++++++++--
11
accel/tcg/cpu-exec.c | 16 ++++++----
12
accel/tcg/translate-all.c | 64 ++++++++++++++++++++++++++-------------
13
6 files changed, 131 insertions(+), 29 deletions(-)
14
8
15
diff --git a/accel/tcg/internal.h b/accel/tcg/internal.h
9
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
16
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
17
--- a/accel/tcg/internal.h
11
--- a/tcg/i386/tcg-target.c.inc
18
+++ b/accel/tcg/internal.h
12
+++ b/tcg/i386/tcg-target.c.inc
19
@@ -XXX,XX +XXX,XX @@ void tb_htable_init(void);
13
@@ -XXX,XX +XXX,XX @@ static void tcg_out_nopn(TCGContext *s, int n)
20
/* Return the current PC from CPU, which may be cached in TB. */
14
tcg_out8(s, 0x90);
21
static inline target_ulong log_pc(CPUState *cpu, const TranslationBlock *tb)
22
{
23
+#if TARGET_TB_PCREL
24
+ return cpu->cc->get_pc(cpu);
25
+#else
26
return tb_pc(tb);
27
+#endif
28
}
15
}
29
16
30
#endif /* ACCEL_TCG_INTERNAL_H */
17
+/* Test register R vs immediate bits I, setting Z flag for EQ/NE. */
31
diff --git a/accel/tcg/tb-jmp-cache.h b/accel/tcg/tb-jmp-cache.h
18
+static void __attribute__((unused))
32
index XXXXXXX..XXXXXXX 100644
19
+tcg_out_testi(TCGContext *s, TCGReg r, uint32_t i)
33
--- a/accel/tcg/tb-jmp-cache.h
34
+++ b/accel/tcg/tb-jmp-cache.h
35
@@ -XXX,XX +XXX,XX @@
36
37
/*
38
* Accessed in parallel; all accesses to 'tb' must be atomic.
39
+ * For TARGET_TB_PCREL, accesses to 'pc' must be protected by
40
+ * a load_acquire/store_release to 'tb'.
41
*/
42
struct CPUJumpCache {
43
struct {
44
TranslationBlock *tb;
45
+#if TARGET_TB_PCREL
46
+ target_ulong pc;
47
+#endif
48
} array[TB_JMP_CACHE_SIZE];
49
};
50
51
+static inline TranslationBlock *
52
+tb_jmp_cache_get_tb(CPUJumpCache *jc, uint32_t hash)
53
+{
20
+{
54
+#if TARGET_TB_PCREL
55
+ /* Use acquire to ensure current load of pc from jc. */
56
+ return qatomic_load_acquire(&jc->array[hash].tb);
57
+#else
58
+ /* Use rcu_read to ensure current load of pc from *tb. */
59
+ return qatomic_rcu_read(&jc->array[hash].tb);
60
+#endif
61
+}
62
+
63
+static inline target_ulong
64
+tb_jmp_cache_get_pc(CPUJumpCache *jc, uint32_t hash, TranslationBlock *tb)
65
+{
66
+#if TARGET_TB_PCREL
67
+ return jc->array[hash].pc;
68
+#else
69
+ return tb_pc(tb);
70
+#endif
71
+}
72
+
73
+static inline void
74
+tb_jmp_cache_set(CPUJumpCache *jc, uint32_t hash,
75
+ TranslationBlock *tb, target_ulong pc)
76
+{
77
+#if TARGET_TB_PCREL
78
+ jc->array[hash].pc = pc;
79
+ /* Use store_release on tb to ensure pc is written first. */
80
+ qatomic_store_release(&jc->array[hash].tb, tb);
81
+#else
82
+ /* Use the pc value already stored in tb->pc. */
83
+ qatomic_set(&jc->array[hash].tb, tb);
84
+#endif
85
+}
86
+
87
#endif /* ACCEL_TCG_TB_JMP_CACHE_H */
88
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
89
index XXXXXXX..XXXXXXX 100644
90
--- a/include/exec/cpu-defs.h
91
+++ b/include/exec/cpu-defs.h
92
@@ -XXX,XX +XXX,XX @@
93
# error TARGET_PAGE_BITS must be defined in cpu-param.h
94
# endif
95
#endif
96
+#ifndef TARGET_TB_PCREL
97
+# define TARGET_TB_PCREL 0
98
+#endif
99
100
#define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8)
101
102
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
103
index XXXXXXX..XXXXXXX 100644
104
--- a/include/exec/exec-all.h
105
+++ b/include/exec/exec-all.h
106
@@ -XXX,XX +XXX,XX @@ struct tb_tc {
107
};
108
109
struct TranslationBlock {
110
- target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
111
- target_ulong cs_base; /* CS base for this block */
112
+#if !TARGET_TB_PCREL
113
+ /*
21
+ /*
114
+ * Guest PC corresponding to this block. This must be the true
22
+ * This is used for testing alignment, so we can usually use testb.
115
+ * virtual address. Therefore e.g. x86 stores EIP + CS_BASE, and
23
+ * For i686, we have to use testl for %esi/%edi.
116
+ * targets like Arm, MIPS, HP-PA, which reuse low bits for ISA or
117
+ * privilege, must store those bits elsewhere.
118
+ *
119
+ * If TARGET_TB_PCREL, the opcodes for the TranslationBlock are
120
+ * written such that the TB is associated only with the physical
121
+ * page and may be run in any virtual address context. In this case,
122
+ * PC must always be taken from ENV in a target-specific manner.
123
+ * Unwind information is taken as offsets from the page, to be
124
+ * deposited into the "current" PC.
125
+ */
24
+ */
126
+ target_ulong pc;
25
+ if (i <= 0xff && (TCG_TARGET_REG_BITS == 64 || r < 4)) {
127
+#endif
26
+ tcg_out_modrm(s, OPC_GRP3_Eb | P_REXB_RM, EXT3_TESTi, r);
128
+
27
+ tcg_out8(s, i);
129
+ /*
130
+ * Target-specific data associated with the TranslationBlock, e.g.:
131
+ * x86: the original user, the Code Segment virtual base,
132
+ * arm: an extension of tb->flags,
133
+ * s390x: instruction data for EXECUTE,
134
+ * sparc: the next pc of the instruction queue (for delay slots).
135
+ */
136
+ target_ulong cs_base;
137
+
138
uint32_t flags; /* flags defining in which context the code was generated */
139
uint32_t cflags; /* compile flags */
140
141
@@ -XXX,XX +XXX,XX @@ struct TranslationBlock {
142
/* Hide the read to avoid ifdefs for TARGET_TB_PCREL. */
143
static inline target_ulong tb_pc(const TranslationBlock *tb)
144
{
145
+#if TARGET_TB_PCREL
146
+ qemu_build_not_reached();
147
+#else
148
return tb->pc;
149
+#endif
150
}
151
152
/* Hide the qatomic_read to make code a little easier on the eyes */
153
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
154
index XXXXXXX..XXXXXXX 100644
155
--- a/accel/tcg/cpu-exec.c
156
+++ b/accel/tcg/cpu-exec.c
157
@@ -XXX,XX +XXX,XX @@ static bool tb_lookup_cmp(const void *p, const void *d)
158
const TranslationBlock *tb = p;
159
const struct tb_desc *desc = d;
160
161
- if (tb_pc(tb) == desc->pc &&
162
+ if ((TARGET_TB_PCREL || tb_pc(tb) == desc->pc) &&
163
tb->page_addr[0] == desc->page_addr0 &&
164
tb->cs_base == desc->cs_base &&
165
tb->flags == desc->flags &&
166
@@ -XXX,XX +XXX,XX @@ static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
167
return NULL;
168
}
169
desc.page_addr0 = phys_pc;
170
- h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate);
171
+ h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : pc),
172
+ flags, cflags, *cpu->trace_dstate);
173
return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
174
}
175
176
@@ -XXX,XX +XXX,XX @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
177
uint32_t flags, uint32_t cflags)
178
{
179
TranslationBlock *tb;
180
+ CPUJumpCache *jc;
181
uint32_t hash;
182
183
/* we should never be trying to look up an INVALID tb */
184
tcg_debug_assert(!(cflags & CF_INVALID));
185
186
hash = tb_jmp_cache_hash_func(pc);
187
- tb = qatomic_rcu_read(&cpu->tb_jmp_cache->array[hash].tb);
188
+ jc = cpu->tb_jmp_cache;
189
+ tb = tb_jmp_cache_get_tb(jc, hash);
190
191
if (likely(tb &&
192
- tb->pc == pc &&
193
+ tb_jmp_cache_get_pc(jc, hash, tb) == pc &&
194
tb->cs_base == cs_base &&
195
tb->flags == flags &&
196
tb->trace_vcpu_dstate == *cpu->trace_dstate &&
197
@@ -XXX,XX +XXX,XX @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
198
if (tb == NULL) {
199
return NULL;
200
}
201
- qatomic_set(&cpu->tb_jmp_cache->array[hash].tb, tb);
202
+ tb_jmp_cache_set(jc, hash, tb, pc);
203
return tb;
204
}
205
206
@@ -XXX,XX +XXX,XX @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
207
if (cc->tcg_ops->synchronize_from_tb) {
208
cc->tcg_ops->synchronize_from_tb(cpu, last_tb);
209
} else {
210
+ assert(!TARGET_TB_PCREL);
211
assert(cc->set_pc);
212
cc->set_pc(cpu, tb_pc(last_tb));
213
}
214
@@ -XXX,XX +XXX,XX @@ int cpu_exec(CPUState *cpu)
215
* for the fast lookup
216
*/
217
h = tb_jmp_cache_hash_func(pc);
218
- qatomic_set(&cpu->tb_jmp_cache->array[h].tb, tb);
219
+ tb_jmp_cache_set(cpu->tb_jmp_cache, h, tb, pc);
220
}
221
222
#ifndef CONFIG_USER_ONLY
223
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
224
index XXXXXXX..XXXXXXX 100644
225
--- a/accel/tcg/translate-all.c
226
+++ b/accel/tcg/translate-all.c
227
@@ -XXX,XX +XXX,XX @@ static int encode_search(TranslationBlock *tb, uint8_t *block)
228
229
for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
230
if (i == 0) {
231
- prev = (j == 0 ? tb_pc(tb) : 0);
232
+ prev = (!TARGET_TB_PCREL && j == 0 ? tb_pc(tb) : 0);
233
} else {
234
prev = tcg_ctx->gen_insn_data[i - 1][j];
235
}
236
@@ -XXX,XX +XXX,XX @@ static int encode_search(TranslationBlock *tb, uint8_t *block)
237
static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
238
uintptr_t searched_pc, bool reset_icount)
239
{
240
- target_ulong data[TARGET_INSN_START_WORDS] = { tb_pc(tb) };
241
+ target_ulong data[TARGET_INSN_START_WORDS];
242
uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
243
CPUArchState *env = cpu->env_ptr;
244
const uint8_t *p = tb->tc.ptr + tb->tc.size;
245
@@ -XXX,XX +XXX,XX @@ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
246
return -1;
247
}
248
249
+ memset(data, 0, sizeof(data));
250
+ if (!TARGET_TB_PCREL) {
251
+ data[0] = tb_pc(tb);
252
+ }
253
+
254
/* Reconstruct the stored insn data while looking for the point at
255
which the end of the insn exceeds the searched_pc. */
256
for (i = 0; i < num_insns; ++i) {
257
@@ -XXX,XX +XXX,XX @@ static bool tb_cmp(const void *ap, const void *bp)
258
const TranslationBlock *a = ap;
259
const TranslationBlock *b = bp;
260
261
- return tb_pc(a) == tb_pc(b) &&
262
- a->cs_base == b->cs_base &&
263
- a->flags == b->flags &&
264
- (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) &&
265
- a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
266
- a->page_addr[0] == b->page_addr[0] &&
267
- a->page_addr[1] == b->page_addr[1];
268
+ return ((TARGET_TB_PCREL || tb_pc(a) == tb_pc(b)) &&
269
+ a->cs_base == b->cs_base &&
270
+ a->flags == b->flags &&
271
+ (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) &&
272
+ a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
273
+ a->page_addr[0] == b->page_addr[0] &&
274
+ a->page_addr[1] == b->page_addr[1]);
275
}
276
277
void tb_htable_init(void)
278
@@ -XXX,XX +XXX,XX @@ static inline void tb_jmp_unlink(TranslationBlock *dest)
279
qemu_spin_unlock(&dest->jmp_lock);
280
}
281
282
+static void tb_jmp_cache_inval_tb(TranslationBlock *tb)
283
+{
284
+ CPUState *cpu;
285
+
286
+ if (TARGET_TB_PCREL) {
287
+ /* A TB may be at any virtual address */
288
+ CPU_FOREACH(cpu) {
289
+ tcg_flush_jmp_cache(cpu);
290
+ }
291
+ } else {
28
+ } else {
292
+ uint32_t h = tb_jmp_cache_hash_func(tb_pc(tb));
29
+ tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_TESTi, r);
293
+
30
+ tcg_out32(s, i);
294
+ CPU_FOREACH(cpu) {
295
+ CPUJumpCache *jc = cpu->tb_jmp_cache;
296
+
297
+ if (qatomic_read(&jc->array[h].tb) == tb) {
298
+ qatomic_set(&jc->array[h].tb, NULL);
299
+ }
300
+ }
301
+ }
31
+ }
302
+}
32
+}
303
+
33
+
304
/*
34
typedef struct {
305
* In user-mode, call with mmap_lock held.
35
TCGReg base;
306
* In !user-mode, if @rm_from_page_list is set, call with the TB's pages'
36
int index;
307
@@ -XXX,XX +XXX,XX @@ static inline void tb_jmp_unlink(TranslationBlock *dest)
37
@@ -XXX,XX +XXX,XX @@ static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo,
308
*/
38
unsigned a_mask = (1 << a_bits) - 1;
309
static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
39
TCGLabelQemuLdst *label;
310
{
40
311
- CPUState *cpu;
41
- /*
312
PageDesc *p;
42
- * We are expecting a_bits to max out at 7, so we can usually use testb.
313
uint32_t h;
43
- * For i686, we have to use testl for %esi/%edi.
314
tb_page_addr_t phys_pc;
44
- */
315
@@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
45
- if (a_mask <= 0xff && (TCG_TARGET_REG_BITS == 64 || addrlo < 4)) {
316
46
- tcg_out_modrm(s, OPC_GRP3_Eb | P_REXB_RM, EXT3_TESTi, addrlo);
317
/* remove the TB from the hash list */
47
- tcg_out8(s, a_mask);
318
phys_pc = tb->page_addr[0];
48
- } else {
319
- h = tb_hash_func(phys_pc, tb_pc(tb), tb->flags, orig_cflags,
49
- tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_TESTi, addrlo);
320
- tb->trace_vcpu_dstate);
50
- tcg_out32(s, a_mask);
321
+ h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)),
322
+ tb->flags, orig_cflags, tb->trace_vcpu_dstate);
323
if (!qht_remove(&tb_ctx.htable, tb, h)) {
324
return;
325
}
326
@@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
327
}
328
329
/* remove the TB from the hash list */
330
- h = tb_jmp_cache_hash_func(tb->pc);
331
- CPU_FOREACH(cpu) {
332
- CPUJumpCache *jc = cpu->tb_jmp_cache;
333
- if (qatomic_read(&jc->array[h].tb) == tb) {
334
- qatomic_set(&jc->array[h].tb, NULL);
335
- }
336
- }
51
- }
337
+ tb_jmp_cache_inval_tb(tb);
52
-
338
53
+ tcg_out_testi(s, addrlo, a_mask);
339
/* suppress this TB from the two jump lists */
54
/* jne slow_path */
340
tb_remove_from_jmp_list(tb, 0);
55
tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
341
@@ -XXX,XX +XXX,XX @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
56
342
}
343
344
/* add in the hash table */
345
- h = tb_hash_func(phys_pc, tb_pc(tb), tb->flags, tb->cflags,
346
- tb->trace_vcpu_dstate);
347
+ h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)),
348
+ tb->flags, tb->cflags, tb->trace_vcpu_dstate);
349
qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
350
351
/* remove TB from the page(s) if we couldn't insert it */
352
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
353
354
gen_code_buf = tcg_ctx->code_gen_ptr;
355
tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf);
356
+#if !TARGET_TB_PCREL
357
tb->pc = pc;
358
+#endif
359
tb->cs_base = cs_base;
360
tb->flags = flags;
361
tb->cflags = cflags;
362
--
57
--
363
2.34.1
58
2.34.1
364
59
365
60
diff view generated by jsdifflib
New patch
1
Rename the 'ext' parameter 'data_type' to make the use clearer;
2
pass it to tcg_out_qemu_st as well to even out the interfaces.
3
Rename the 'otype' local 'addr_type' to make the use clearer.
1
4
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/aarch64/tcg-target.c.inc | 36 +++++++++++++++++-------------------
9
1 file changed, 17 insertions(+), 19 deletions(-)
10
11
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/aarch64/tcg-target.c.inc
14
+++ b/tcg/aarch64/tcg-target.c.inc
15
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop,
16
}
17
18
static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
19
- MemOpIdx oi, TCGType ext)
20
+ MemOpIdx oi, TCGType data_type)
21
{
22
MemOp memop = get_memop(oi);
23
- const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
24
+ TCGType addr_type = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
25
26
/* Byte swapping is left to middle-end expansion. */
27
tcg_debug_assert((memop & MO_BSWAP) == 0);
28
29
#ifdef CONFIG_SOFTMMU
30
- unsigned mem_index = get_mmuidx(oi);
31
tcg_insn_unit *label_ptr;
32
33
- tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 1);
34
- tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
35
- TCG_REG_X1, otype, addr_reg);
36
- add_qemu_ldst_label(s, true, oi, ext, data_reg, addr_reg,
37
+ tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, get_mmuidx(oi), 1);
38
+ tcg_out_qemu_ld_direct(s, memop, data_type, data_reg,
39
+ TCG_REG_X1, addr_type, addr_reg);
40
+ add_qemu_ldst_label(s, true, oi, data_type, data_reg, addr_reg,
41
s->code_ptr, label_ptr);
42
#else /* !CONFIG_SOFTMMU */
43
unsigned a_bits = get_alignment_bits(memop);
44
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
45
tcg_out_test_alignment(s, true, addr_reg, a_bits);
46
}
47
if (USE_GUEST_BASE) {
48
- tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
49
- TCG_REG_GUEST_BASE, otype, addr_reg);
50
+ tcg_out_qemu_ld_direct(s, memop, data_type, data_reg,
51
+ TCG_REG_GUEST_BASE, addr_type, addr_reg);
52
} else {
53
- tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
54
+ tcg_out_qemu_ld_direct(s, memop, data_type, data_reg,
55
addr_reg, TCG_TYPE_I64, TCG_REG_XZR);
56
}
57
#endif /* CONFIG_SOFTMMU */
58
}
59
60
static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
61
- MemOpIdx oi)
62
+ MemOpIdx oi, TCGType data_type)
63
{
64
MemOp memop = get_memop(oi);
65
- const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
66
+ TCGType addr_type = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
67
68
/* Byte swapping is left to middle-end expansion. */
69
tcg_debug_assert((memop & MO_BSWAP) == 0);
70
71
#ifdef CONFIG_SOFTMMU
72
- unsigned mem_index = get_mmuidx(oi);
73
tcg_insn_unit *label_ptr;
74
75
- tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 0);
76
+ tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, get_mmuidx(oi), 0);
77
tcg_out_qemu_st_direct(s, memop, data_reg,
78
- TCG_REG_X1, otype, addr_reg);
79
- add_qemu_ldst_label(s, false, oi, (memop & MO_SIZE)== MO_64,
80
- data_reg, addr_reg, s->code_ptr, label_ptr);
81
+ TCG_REG_X1, addr_type, addr_reg);
82
+ add_qemu_ldst_label(s, false, oi, data_type, data_reg, addr_reg,
83
+ s->code_ptr, label_ptr);
84
#else /* !CONFIG_SOFTMMU */
85
unsigned a_bits = get_alignment_bits(memop);
86
if (a_bits) {
87
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
88
}
89
if (USE_GUEST_BASE) {
90
tcg_out_qemu_st_direct(s, memop, data_reg,
91
- TCG_REG_GUEST_BASE, otype, addr_reg);
92
+ TCG_REG_GUEST_BASE, addr_type, addr_reg);
93
} else {
94
tcg_out_qemu_st_direct(s, memop, data_reg,
95
addr_reg, TCG_TYPE_I64, TCG_REG_XZR);
96
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
97
break;
98
case INDEX_op_qemu_st_i32:
99
case INDEX_op_qemu_st_i64:
100
- tcg_out_qemu_st(s, REG0(0), a1, a2);
101
+ tcg_out_qemu_st(s, REG0(0), a1, a2, ext);
102
break;
103
104
case INDEX_op_bswap64_i64:
105
--
106
2.34.1
107
108
diff view generated by jsdifflib
1
Let tb->page_addr[0] contain the address of the first byte of the
1
Collect the 3 potential parts of the host address into a struct.
2
translated block, rather than the address of the page containing the
2
Reorg tcg_out_qemu_{ld,st}_direct to use it.
3
start of the translated block. We need to recover this value anyway
4
at various points, and it is easier to discard a page offset when it
5
is not needed, which happens naturally via the existing find_page shift.
6
3
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
6
---
10
accel/tcg/cpu-exec.c | 16 ++++++++--------
7
tcg/aarch64/tcg-target.c.inc | 86 +++++++++++++++++++++++++-----------
11
accel/tcg/cputlb.c | 3 ++-
8
1 file changed, 59 insertions(+), 27 deletions(-)
12
accel/tcg/translate-all.c | 9 +++++----
13
3 files changed, 15 insertions(+), 13 deletions(-)
14
9
15
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
10
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
16
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
17
--- a/accel/tcg/cpu-exec.c
12
--- a/tcg/aarch64/tcg-target.c.inc
18
+++ b/accel/tcg/cpu-exec.c
13
+++ b/tcg/aarch64/tcg-target.c.inc
19
@@ -XXX,XX +XXX,XX @@ struct tb_desc {
14
@@ -XXX,XX +XXX,XX @@ static void tcg_out_adr(TCGContext *s, TCGReg rd, const void *target)
20
target_ulong pc;
15
tcg_out_insn(s, 3406, ADR, rd, offset);
21
target_ulong cs_base;
16
}
22
CPUArchState *env;
17
23
- tb_page_addr_t phys_page1;
18
+typedef struct {
24
+ tb_page_addr_t page_addr0;
19
+ TCGReg base;
25
uint32_t flags;
20
+ TCGReg index;
26
uint32_t cflags;
21
+ TCGType index_ext;
27
uint32_t trace_vcpu_dstate;
22
+} HostAddress;
28
@@ -XXX,XX +XXX,XX @@ static bool tb_lookup_cmp(const void *p, const void *d)
23
+
29
const struct tb_desc *desc = d;
24
#ifdef CONFIG_SOFTMMU
30
25
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
31
if (tb->pc == desc->pc &&
26
* MemOpIdx oi, uintptr_t ra)
32
- tb->page_addr[0] == desc->phys_page1 &&
27
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
33
+ tb->page_addr[0] == desc->page_addr0 &&
28
#endif /* CONFIG_SOFTMMU */
34
tb->cs_base == desc->cs_base &&
29
35
tb->flags == desc->flags &&
30
static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext,
36
tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
31
- TCGReg data_r, TCGReg addr_r,
37
@@ -XXX,XX +XXX,XX @@ static bool tb_lookup_cmp(const void *p, const void *d)
32
- TCGType otype, TCGReg off_r)
38
if (tb->page_addr[1] == -1) {
33
+ TCGReg data_r, HostAddress h)
39
return true;
34
{
40
} else {
35
switch (memop & MO_SSIZE) {
41
- tb_page_addr_t phys_page2;
36
case MO_UB:
42
- target_ulong virt_page2;
37
- tcg_out_ldst_r(s, I3312_LDRB, data_r, addr_r, otype, off_r);
43
+ tb_page_addr_t phys_page1;
38
+ tcg_out_ldst_r(s, I3312_LDRB, data_r, h.base, h.index_ext, h.index);
44
+ target_ulong virt_page1;
39
break;
45
40
case MO_SB:
46
/*
41
tcg_out_ldst_r(s, ext ? I3312_LDRSBX : I3312_LDRSBW,
47
* We know that the first page matched, and an otherwise valid TB
42
- data_r, addr_r, otype, off_r);
48
@@ -XXX,XX +XXX,XX @@ static bool tb_lookup_cmp(const void *p, const void *d)
43
+ data_r, h.base, h.index_ext, h.index);
49
* is different for the new TB. Therefore any exception raised
44
break;
50
* here by the faulting lookup is not premature.
45
case MO_UW:
51
*/
46
- tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, otype, off_r);
52
- virt_page2 = TARGET_PAGE_ALIGN(desc->pc);
47
+ tcg_out_ldst_r(s, I3312_LDRH, data_r, h.base, h.index_ext, h.index);
53
- phys_page2 = get_page_addr_code(desc->env, virt_page2);
48
break;
54
- if (tb->page_addr[1] == phys_page2) {
49
case MO_SW:
55
+ virt_page1 = TARGET_PAGE_ALIGN(desc->pc);
50
tcg_out_ldst_r(s, (ext ? I3312_LDRSHX : I3312_LDRSHW),
56
+ phys_page1 = get_page_addr_code(desc->env, virt_page1);
51
- data_r, addr_r, otype, off_r);
57
+ if (tb->page_addr[1] == phys_page1) {
52
+ data_r, h.base, h.index_ext, h.index);
58
return true;
53
break;
59
}
54
case MO_UL:
60
}
55
- tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, otype, off_r);
61
@@ -XXX,XX +XXX,XX @@ static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
56
+ tcg_out_ldst_r(s, I3312_LDRW, data_r, h.base, h.index_ext, h.index);
62
if (phys_pc == -1) {
57
break;
63
return NULL;
58
case MO_SL:
59
- tcg_out_ldst_r(s, I3312_LDRSWX, data_r, addr_r, otype, off_r);
60
+ tcg_out_ldst_r(s, I3312_LDRSWX, data_r, h.base, h.index_ext, h.index);
61
break;
62
case MO_UQ:
63
- tcg_out_ldst_r(s, I3312_LDRX, data_r, addr_r, otype, off_r);
64
+ tcg_out_ldst_r(s, I3312_LDRX, data_r, h.base, h.index_ext, h.index);
65
break;
66
default:
67
g_assert_not_reached();
68
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext,
69
}
70
71
static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop,
72
- TCGReg data_r, TCGReg addr_r,
73
- TCGType otype, TCGReg off_r)
74
+ TCGReg data_r, HostAddress h)
75
{
76
switch (memop & MO_SIZE) {
77
case MO_8:
78
- tcg_out_ldst_r(s, I3312_STRB, data_r, addr_r, otype, off_r);
79
+ tcg_out_ldst_r(s, I3312_STRB, data_r, h.base, h.index_ext, h.index);
80
break;
81
case MO_16:
82
- tcg_out_ldst_r(s, I3312_STRH, data_r, addr_r, otype, off_r);
83
+ tcg_out_ldst_r(s, I3312_STRH, data_r, h.base, h.index_ext, h.index);
84
break;
85
case MO_32:
86
- tcg_out_ldst_r(s, I3312_STRW, data_r, addr_r, otype, off_r);
87
+ tcg_out_ldst_r(s, I3312_STRW, data_r, h.base, h.index_ext, h.index);
88
break;
89
case MO_64:
90
- tcg_out_ldst_r(s, I3312_STRX, data_r, addr_r, otype, off_r);
91
+ tcg_out_ldst_r(s, I3312_STRX, data_r, h.base, h.index_ext, h.index);
92
break;
93
default:
94
g_assert_not_reached();
95
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
96
{
97
MemOp memop = get_memop(oi);
98
TCGType addr_type = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
99
+ HostAddress h;
100
101
/* Byte swapping is left to middle-end expansion. */
102
tcg_debug_assert((memop & MO_BSWAP) == 0);
103
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
104
tcg_insn_unit *label_ptr;
105
106
tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, get_mmuidx(oi), 1);
107
- tcg_out_qemu_ld_direct(s, memop, data_type, data_reg,
108
- TCG_REG_X1, addr_type, addr_reg);
109
+
110
+ h = (HostAddress){
111
+ .base = TCG_REG_X1,
112
+ .index = addr_reg,
113
+ .index_ext = addr_type
114
+ };
115
+ tcg_out_qemu_ld_direct(s, memop, data_type, data_reg, h);
116
+
117
add_qemu_ldst_label(s, true, oi, data_type, data_reg, addr_reg,
118
s->code_ptr, label_ptr);
119
#else /* !CONFIG_SOFTMMU */
120
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
121
tcg_out_test_alignment(s, true, addr_reg, a_bits);
64
}
122
}
65
- desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
123
if (USE_GUEST_BASE) {
66
+ desc.page_addr0 = phys_pc;
124
- tcg_out_qemu_ld_direct(s, memop, data_type, data_reg,
67
h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate);
125
- TCG_REG_GUEST_BASE, addr_type, addr_reg);
68
return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
126
+ h = (HostAddress){
127
+ .base = TCG_REG_GUEST_BASE,
128
+ .index = addr_reg,
129
+ .index_ext = addr_type
130
+ };
131
} else {
132
- tcg_out_qemu_ld_direct(s, memop, data_type, data_reg,
133
- addr_reg, TCG_TYPE_I64, TCG_REG_XZR);
134
+ h = (HostAddress){
135
+ .base = addr_reg,
136
+ .index = TCG_REG_XZR,
137
+ .index_ext = TCG_TYPE_I64
138
+ };
139
}
140
+ tcg_out_qemu_ld_direct(s, memop, data_type, data_reg, h);
141
#endif /* CONFIG_SOFTMMU */
69
}
142
}
70
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
143
71
index XXXXXXX..XXXXXXX 100644
144
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
72
--- a/accel/tcg/cputlb.c
73
+++ b/accel/tcg/cputlb.c
74
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
75
can be detected */
76
void tlb_protect_code(ram_addr_t ram_addr)
77
{
145
{
78
- cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
146
MemOp memop = get_memop(oi);
79
+ cpu_physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK,
147
TCGType addr_type = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
80
+ TARGET_PAGE_SIZE,
148
+ HostAddress h;
81
DIRTY_MEMORY_CODE);
149
150
/* Byte swapping is left to middle-end expansion. */
151
tcg_debug_assert((memop & MO_BSWAP) == 0);
152
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
153
tcg_insn_unit *label_ptr;
154
155
tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, get_mmuidx(oi), 0);
156
- tcg_out_qemu_st_direct(s, memop, data_reg,
157
- TCG_REG_X1, addr_type, addr_reg);
158
+
159
+ h = (HostAddress){
160
+ .base = TCG_REG_X1,
161
+ .index = addr_reg,
162
+ .index_ext = addr_type
163
+ };
164
+ tcg_out_qemu_st_direct(s, memop, data_reg, h);
165
+
166
add_qemu_ldst_label(s, false, oi, data_type, data_reg, addr_reg,
167
s->code_ptr, label_ptr);
168
#else /* !CONFIG_SOFTMMU */
169
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
170
tcg_out_test_alignment(s, false, addr_reg, a_bits);
171
}
172
if (USE_GUEST_BASE) {
173
- tcg_out_qemu_st_direct(s, memop, data_reg,
174
- TCG_REG_GUEST_BASE, addr_type, addr_reg);
175
+ h = (HostAddress){
176
+ .base = TCG_REG_GUEST_BASE,
177
+ .index = addr_reg,
178
+ .index_ext = addr_type
179
+ };
180
} else {
181
- tcg_out_qemu_st_direct(s, memop, data_reg,
182
- addr_reg, TCG_TYPE_I64, TCG_REG_XZR);
183
+ h = (HostAddress){
184
+ .base = addr_reg,
185
+ .index = TCG_REG_XZR,
186
+ .index_ext = TCG_TYPE_I64
187
+ };
188
}
189
+ tcg_out_qemu_st_direct(s, memop, data_reg, h);
190
#endif /* CONFIG_SOFTMMU */
82
}
191
}
83
192
84
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
85
index XXXXXXX..XXXXXXX 100644
86
--- a/accel/tcg/translate-all.c
87
+++ b/accel/tcg/translate-all.c
88
@@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
89
qemu_spin_unlock(&tb->jmp_lock);
90
91
/* remove the TB from the hash list */
92
- phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
93
+ phys_pc = tb->page_addr[0];
94
h = tb_hash_func(phys_pc, tb->pc, tb->flags, orig_cflags,
95
tb->trace_vcpu_dstate);
96
if (!qht_remove(&tb_ctx.htable, tb, h)) {
97
@@ -XXX,XX +XXX,XX @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
98
* we can only insert TBs that are fully initialized.
99
*/
100
page_lock_pair(&p, phys_pc, &p2, phys_page2, true);
101
- tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK);
102
+ tb_page_add(p, tb, 0, phys_pc);
103
if (p2) {
104
tb_page_add(p2, tb, 1, phys_page2);
105
} else {
106
@@ -XXX,XX +XXX,XX @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
107
if (n == 0) {
108
/* NOTE: tb_end may be after the end of the page, but
109
it is not a problem */
110
- tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
111
+ tb_start = tb->page_addr[0];
112
tb_end = tb_start + tb->size;
113
} else {
114
tb_start = tb->page_addr[1];
115
- tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
116
+ tb_end = tb_start + ((tb->page_addr[0] + tb->size)
117
+ & ~TARGET_PAGE_MASK);
118
}
119
if (!(tb_end <= start || tb_start >= end)) {
120
#ifdef TARGET_HAS_PRECISE_SMC
121
--
193
--
122
2.34.1
194
2.34.1
123
195
124
196
diff view generated by jsdifflib
1
Allow the target to cache items from the guest page tables.
1
Interpret the variable argument placement in the caller.
2
Pass data_type instead of is_64. We need to set this in
3
TCGLabelQemuLdst, so plumb this all the way through from tcg_out_op.
2
4
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
7
---
8
include/exec/cpu-defs.h | 9 +++++++++
8
tcg/arm/tcg-target.c.inc | 113 +++++++++++++++++++--------------------
9
1 file changed, 9 insertions(+)
9
1 file changed, 56 insertions(+), 57 deletions(-)
10
10
11
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
11
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
12
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
13
--- a/include/exec/cpu-defs.h
13
--- a/tcg/arm/tcg-target.c.inc
14
+++ b/include/exec/cpu-defs.h
14
+++ b/tcg/arm/tcg-target.c.inc
15
@@ -XXX,XX +XXX,XX @@ typedef struct CPUTLBEntryFull {
15
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
16
16
/* Record the context of a call to the out of line helper code for the slow
17
/* @lg_page_size contains the log2 of the page size. */
17
path for a load or store, so that we can later generate the correct
18
uint8_t lg_page_size;
18
helper code. */
19
+
19
-static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi,
20
- TCGReg datalo, TCGReg datahi, TCGReg addrlo,
21
- TCGReg addrhi, tcg_insn_unit *raddr,
22
+static void add_qemu_ldst_label(TCGContext *s, bool is_ld,
23
+ MemOpIdx oi, TCGType type,
24
+ TCGReg datalo, TCGReg datahi,
25
+ TCGReg addrlo, TCGReg addrhi,
26
+ tcg_insn_unit *raddr,
27
tcg_insn_unit *label_ptr)
28
{
29
TCGLabelQemuLdst *label = new_ldst_label(s);
30
31
label->is_ld = is_ld;
32
label->oi = oi;
33
+ label->type = type;
34
label->datalo_reg = datalo;
35
label->datahi_reg = datahi;
36
label->addrlo_reg = addrlo;
37
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo,
38
}
39
#endif
40
41
-static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
42
+static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
43
+ TCGReg addrlo, TCGReg addrhi,
44
+ MemOpIdx oi, TCGType data_type)
45
{
46
- TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
47
- MemOpIdx oi;
48
- MemOp opc;
49
-#ifdef CONFIG_SOFTMMU
50
- int mem_index;
51
- TCGReg addend;
52
- tcg_insn_unit *label_ptr;
53
-#else
54
- unsigned a_bits;
55
-#endif
56
-
57
- datalo = *args++;
58
- datahi = (is64 ? *args++ : 0);
59
- addrlo = *args++;
60
- addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0);
61
- oi = *args++;
62
- opc = get_memop(oi);
63
+ MemOp opc = get_memop(oi);
64
65
#ifdef CONFIG_SOFTMMU
66
- mem_index = get_mmuidx(oi);
67
- addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 1);
68
+ TCGReg addend= tcg_out_tlb_read(s, addrlo, addrhi, opc, get_mmuidx(oi), 1);
69
70
- /* This a conditional BL only to load a pointer within this opcode into LR
71
- for the slow path. We will not be using the value for a tail call. */
72
- label_ptr = s->code_ptr;
20
+ /*
73
+ /*
21
+ * Allow target-specific additions to this structure.
74
+ * This a conditional BL only to load a pointer within this opcode into
22
+ * This may be used to cache items from the guest cpu
75
+ * LR for the slow path. We will not be using the value for a tail call.
23
+ * page tables for later use by the implementation.
24
+ */
76
+ */
25
+#ifdef TARGET_PAGE_ENTRY_EXTRA
77
+ tcg_insn_unit *label_ptr = s->code_ptr;
26
+ TARGET_PAGE_ENTRY_EXTRA
78
tcg_out_bl_imm(s, COND_NE, 0);
27
+#endif
79
28
} CPUTLBEntryFull;
80
tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend, true);
29
81
30
/*
82
- add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi,
83
- s->code_ptr, label_ptr);
84
+ add_qemu_ldst_label(s, true, oi, data_type, datalo, datahi,
85
+ addrlo, addrhi, s->code_ptr, label_ptr);
86
#else /* !CONFIG_SOFTMMU */
87
- a_bits = get_alignment_bits(opc);
88
+ unsigned a_bits = get_alignment_bits(opc);
89
if (a_bits) {
90
tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
91
}
92
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
93
}
94
#endif
95
96
-static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
97
+static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
98
+ TCGReg addrlo, TCGReg addrhi,
99
+ MemOpIdx oi, TCGType data_type)
100
{
101
- TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
102
- MemOpIdx oi;
103
- MemOp opc;
104
-#ifdef CONFIG_SOFTMMU
105
- int mem_index;
106
- TCGReg addend;
107
- tcg_insn_unit *label_ptr;
108
-#else
109
- unsigned a_bits;
110
-#endif
111
-
112
- datalo = *args++;
113
- datahi = (is64 ? *args++ : 0);
114
- addrlo = *args++;
115
- addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0);
116
- oi = *args++;
117
- opc = get_memop(oi);
118
+ MemOp opc = get_memop(oi);
119
120
#ifdef CONFIG_SOFTMMU
121
- mem_index = get_mmuidx(oi);
122
- addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 0);
123
+ TCGReg addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, get_mmuidx(oi), 0);
124
125
tcg_out_qemu_st_index(s, COND_EQ, opc, datalo, datahi,
126
addrlo, addend, true);
127
128
/* The conditional call must come last, as we're going to return here. */
129
- label_ptr = s->code_ptr;
130
+ tcg_insn_unit *label_ptr = s->code_ptr;
131
tcg_out_bl_imm(s, COND_NE, 0);
132
133
- add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi,
134
- s->code_ptr, label_ptr);
135
+ add_qemu_ldst_label(s, false, oi, data_type, datalo, datahi,
136
+ addrlo, addrhi, s->code_ptr, label_ptr);
137
#else /* !CONFIG_SOFTMMU */
138
- a_bits = get_alignment_bits(opc);
139
+ unsigned a_bits = get_alignment_bits(opc);
140
if (a_bits) {
141
tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits);
142
}
143
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
144
break;
145
146
case INDEX_op_qemu_ld_i32:
147
- tcg_out_qemu_ld(s, args, 0);
148
+ if (TARGET_LONG_BITS == 32) {
149
+ tcg_out_qemu_ld(s, args[0], -1, args[1], -1,
150
+ args[2], TCG_TYPE_I32);
151
+ } else {
152
+ tcg_out_qemu_ld(s, args[0], -1, args[1], args[2],
153
+ args[3], TCG_TYPE_I32);
154
+ }
155
break;
156
case INDEX_op_qemu_ld_i64:
157
- tcg_out_qemu_ld(s, args, 1);
158
+ if (TARGET_LONG_BITS == 32) {
159
+ tcg_out_qemu_ld(s, args[0], args[1], args[2], -1,
160
+ args[3], TCG_TYPE_I64);
161
+ } else {
162
+ tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3],
163
+ args[4], TCG_TYPE_I64);
164
+ }
165
break;
166
case INDEX_op_qemu_st_i32:
167
- tcg_out_qemu_st(s, args, 0);
168
+ if (TARGET_LONG_BITS == 32) {
169
+ tcg_out_qemu_st(s, args[0], -1, args[1], -1,
170
+ args[2], TCG_TYPE_I32);
171
+ } else {
172
+ tcg_out_qemu_st(s, args[0], -1, args[1], args[2],
173
+ args[3], TCG_TYPE_I32);
174
+ }
175
break;
176
case INDEX_op_qemu_st_i64:
177
- tcg_out_qemu_st(s, args, 1);
178
+ if (TARGET_LONG_BITS == 32) {
179
+ tcg_out_qemu_st(s, args[0], args[1], args[2], -1,
180
+ args[3], TCG_TYPE_I64);
181
+ } else {
182
+ tcg_out_qemu_st(s, args[0], args[1], args[2], args[3],
183
+ args[4], TCG_TYPE_I64);
184
+ }
185
break;
186
187
case INDEX_op_bswap16_i32:
31
--
188
--
32
2.34.1
189
2.34.1
33
190
34
191
diff view generated by jsdifflib
New patch
1
Collect the parts of the host address, and condition, into a struct.
2
Merge tcg_out_qemu_*_{index,direct} and use it.
1
3
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/arm/tcg-target.c.inc | 248 ++++++++++++++++++---------------------
7
1 file changed, 115 insertions(+), 133 deletions(-)
8
9
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/arm/tcg-target.c.inc
12
+++ b/tcg/arm/tcg-target.c.inc
13
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vldst(TCGContext *s, ARMInsn insn,
14
tcg_out32(s, insn | (rn << 16) | encode_vd(rd) | 0xf);
15
}
16
17
+typedef struct {
18
+ ARMCond cond;
19
+ TCGReg base;
20
+ int index;
21
+ bool index_scratch;
22
+} HostAddress;
23
+
24
#ifdef CONFIG_SOFTMMU
25
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
26
* int mmu_idx, uintptr_t ra)
27
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
28
}
29
#endif /* SOFTMMU */
30
31
-static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
32
- TCGReg datalo, TCGReg datahi,
33
- TCGReg addrlo, TCGReg addend,
34
- bool scratch_addend)
35
+static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo,
36
+ TCGReg datahi, HostAddress h)
37
{
38
+ TCGReg base;
39
+
40
/* Byte swapping is left to middle-end expansion. */
41
tcg_debug_assert((opc & MO_BSWAP) == 0);
42
43
switch (opc & MO_SSIZE) {
44
case MO_UB:
45
- tcg_out_ld8_r(s, COND_AL, datalo, addrlo, addend);
46
+ if (h.index < 0) {
47
+ tcg_out_ld8_12(s, h.cond, datalo, h.base, 0);
48
+ } else {
49
+ tcg_out_ld8_r(s, h.cond, datalo, h.base, h.index);
50
+ }
51
break;
52
case MO_SB:
53
- tcg_out_ld8s_r(s, COND_AL, datalo, addrlo, addend);
54
+ if (h.index < 0) {
55
+ tcg_out_ld8s_8(s, h.cond, datalo, h.base, 0);
56
+ } else {
57
+ tcg_out_ld8s_r(s, h.cond, datalo, h.base, h.index);
58
+ }
59
break;
60
case MO_UW:
61
- tcg_out_ld16u_r(s, COND_AL, datalo, addrlo, addend);
62
+ if (h.index < 0) {
63
+ tcg_out_ld16u_8(s, h.cond, datalo, h.base, 0);
64
+ } else {
65
+ tcg_out_ld16u_r(s, h.cond, datalo, h.base, h.index);
66
+ }
67
break;
68
case MO_SW:
69
- tcg_out_ld16s_r(s, COND_AL, datalo, addrlo, addend);
70
+ if (h.index < 0) {
71
+ tcg_out_ld16s_8(s, h.cond, datalo, h.base, 0);
72
+ } else {
73
+ tcg_out_ld16s_r(s, h.cond, datalo, h.base, h.index);
74
+ }
75
break;
76
case MO_UL:
77
- tcg_out_ld32_r(s, COND_AL, datalo, addrlo, addend);
78
+ if (h.index < 0) {
79
+ tcg_out_ld32_12(s, h.cond, datalo, h.base, 0);
80
+ } else {
81
+ tcg_out_ld32_r(s, h.cond, datalo, h.base, h.index);
82
+ }
83
break;
84
case MO_UQ:
85
/* We used pair allocation for datalo, so already should be aligned. */
86
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
87
tcg_debug_assert(datahi == datalo + 1);
88
/* LDRD requires alignment; double-check that. */
89
if (get_alignment_bits(opc) >= MO_64) {
90
+ if (h.index < 0) {
91
+ tcg_out_ldrd_8(s, h.cond, datalo, h.base, 0);
92
+ break;
93
+ }
94
/*
95
* Rm (the second address op) must not overlap Rt or Rt + 1.
96
* Since datalo is aligned, we can simplify the test via alignment.
97
* Flip the two address arguments if that works.
98
*/
99
- if ((addend & ~1) != datalo) {
100
- tcg_out_ldrd_r(s, COND_AL, datalo, addrlo, addend);
101
+ if ((h.index & ~1) != datalo) {
102
+ tcg_out_ldrd_r(s, h.cond, datalo, h.base, h.index);
103
break;
104
}
105
- if ((addrlo & ~1) != datalo) {
106
- tcg_out_ldrd_r(s, COND_AL, datalo, addend, addrlo);
107
+ if ((h.base & ~1) != datalo) {
108
+ tcg_out_ldrd_r(s, h.cond, datalo, h.index, h.base);
109
break;
110
}
111
}
112
- if (scratch_addend) {
113
- tcg_out_ld32_rwb(s, COND_AL, datalo, addend, addrlo);
114
- tcg_out_ld32_12(s, COND_AL, datahi, addend, 4);
115
+ if (h.index < 0) {
116
+ base = h.base;
117
+ if (datalo == h.base) {
118
+ tcg_out_mov_reg(s, h.cond, TCG_REG_TMP, base);
119
+ base = TCG_REG_TMP;
120
+ }
121
+ } else if (h.index_scratch) {
122
+ tcg_out_ld32_rwb(s, h.cond, datalo, h.index, h.base);
123
+ tcg_out_ld32_12(s, h.cond, datahi, h.index, 4);
124
+ break;
125
} else {
126
- tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_TMP,
127
- addend, addrlo, SHIFT_IMM_LSL(0));
128
- tcg_out_ld32_12(s, COND_AL, datalo, TCG_REG_TMP, 0);
129
- tcg_out_ld32_12(s, COND_AL, datahi, TCG_REG_TMP, 4);
130
+ tcg_out_dat_reg(s, h.cond, ARITH_ADD, TCG_REG_TMP,
131
+ h.base, h.index, SHIFT_IMM_LSL(0));
132
+ base = TCG_REG_TMP;
133
}
134
+ tcg_out_ld32_12(s, h.cond, datalo, base, 0);
135
+ tcg_out_ld32_12(s, h.cond, datahi, base, 4);
136
break;
137
default:
138
g_assert_not_reached();
139
}
140
}
141
142
-#ifndef CONFIG_SOFTMMU
143
-static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo,
144
- TCGReg datahi, TCGReg addrlo)
145
-{
146
- /* Byte swapping is left to middle-end expansion. */
147
- tcg_debug_assert((opc & MO_BSWAP) == 0);
148
-
149
- switch (opc & MO_SSIZE) {
150
- case MO_UB:
151
- tcg_out_ld8_12(s, COND_AL, datalo, addrlo, 0);
152
- break;
153
- case MO_SB:
154
- tcg_out_ld8s_8(s, COND_AL, datalo, addrlo, 0);
155
- break;
156
- case MO_UW:
157
- tcg_out_ld16u_8(s, COND_AL, datalo, addrlo, 0);
158
- break;
159
- case MO_SW:
160
- tcg_out_ld16s_8(s, COND_AL, datalo, addrlo, 0);
161
- break;
162
- case MO_UL:
163
- tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
164
- break;
165
- case MO_UQ:
166
- /* We used pair allocation for datalo, so already should be aligned. */
167
- tcg_debug_assert((datalo & 1) == 0);
168
- tcg_debug_assert(datahi == datalo + 1);
169
- /* LDRD requires alignment; double-check that. */
170
- if (get_alignment_bits(opc) >= MO_64) {
171
- tcg_out_ldrd_8(s, COND_AL, datalo, addrlo, 0);
172
- } else if (datalo == addrlo) {
173
- tcg_out_ld32_12(s, COND_AL, datahi, addrlo, 4);
174
- tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
175
- } else {
176
- tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
177
- tcg_out_ld32_12(s, COND_AL, datahi, addrlo, 4);
178
- }
179
- break;
180
- default:
181
- g_assert_not_reached();
182
- }
183
-}
184
-#endif
185
-
186
static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
187
TCGReg addrlo, TCGReg addrhi,
188
MemOpIdx oi, TCGType data_type)
189
{
190
MemOp opc = get_memop(oi);
191
+ HostAddress h;
192
193
#ifdef CONFIG_SOFTMMU
194
- TCGReg addend= tcg_out_tlb_read(s, addrlo, addrhi, opc, get_mmuidx(oi), 1);
195
+ h.cond = COND_AL;
196
+ h.base = addrlo;
197
+ h.index_scratch = true;
198
+ h.index = tcg_out_tlb_read(s, addrlo, addrhi, opc, get_mmuidx(oi), 1);
199
200
/*
201
* This a conditional BL only to load a pointer within this opcode into
202
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
203
tcg_insn_unit *label_ptr = s->code_ptr;
204
tcg_out_bl_imm(s, COND_NE, 0);
205
206
- tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend, true);
207
+ tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h);
208
209
add_qemu_ldst_label(s, true, oi, data_type, datalo, datahi,
210
addrlo, addrhi, s->code_ptr, label_ptr);
211
-#else /* !CONFIG_SOFTMMU */
212
+#else
213
unsigned a_bits = get_alignment_bits(opc);
214
if (a_bits) {
215
tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
216
}
217
- if (guest_base) {
218
- tcg_out_qemu_ld_index(s, opc, datalo, datahi,
219
- addrlo, TCG_REG_GUEST_BASE, false);
220
- } else {
221
- tcg_out_qemu_ld_direct(s, opc, datalo, datahi, addrlo);
222
- }
223
+
224
+ h.cond = COND_AL;
225
+ h.base = addrlo;
226
+ h.index = guest_base ? TCG_REG_GUEST_BASE : -1;
227
+ h.index_scratch = false;
228
+ tcg_out_qemu_ld_direct(s, opc, datalo, datahi, h);
229
#endif
230
}
231
232
-static void tcg_out_qemu_st_index(TCGContext *s, ARMCond cond, MemOp opc,
233
- TCGReg datalo, TCGReg datahi,
234
- TCGReg addrlo, TCGReg addend,
235
- bool scratch_addend)
236
-{
237
- /* Byte swapping is left to middle-end expansion. */
238
- tcg_debug_assert((opc & MO_BSWAP) == 0);
239
-
240
- switch (opc & MO_SIZE) {
241
- case MO_8:
242
- tcg_out_st8_r(s, cond, datalo, addrlo, addend);
243
- break;
244
- case MO_16:
245
- tcg_out_st16_r(s, cond, datalo, addrlo, addend);
246
- break;
247
- case MO_32:
248
- tcg_out_st32_r(s, cond, datalo, addrlo, addend);
249
- break;
250
- case MO_64:
251
- /* We used pair allocation for datalo, so already should be aligned. */
252
- tcg_debug_assert((datalo & 1) == 0);
253
- tcg_debug_assert(datahi == datalo + 1);
254
- /* STRD requires alignment; double-check that. */
255
- if (get_alignment_bits(opc) >= MO_64) {
256
- tcg_out_strd_r(s, cond, datalo, addrlo, addend);
257
- } else if (scratch_addend) {
258
- tcg_out_st32_rwb(s, cond, datalo, addend, addrlo);
259
- tcg_out_st32_12(s, cond, datahi, addend, 4);
260
- } else {
261
- tcg_out_dat_reg(s, cond, ARITH_ADD, TCG_REG_TMP,
262
- addend, addrlo, SHIFT_IMM_LSL(0));
263
- tcg_out_st32_12(s, cond, datalo, TCG_REG_TMP, 0);
264
- tcg_out_st32_12(s, cond, datahi, TCG_REG_TMP, 4);
265
- }
266
- break;
267
- default:
268
- g_assert_not_reached();
269
- }
270
-}
271
-
272
-#ifndef CONFIG_SOFTMMU
273
static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
274
- TCGReg datahi, TCGReg addrlo)
275
+ TCGReg datahi, HostAddress h)
276
{
277
/* Byte swapping is left to middle-end expansion. */
278
tcg_debug_assert((opc & MO_BSWAP) == 0);
279
280
switch (opc & MO_SIZE) {
281
case MO_8:
282
- tcg_out_st8_12(s, COND_AL, datalo, addrlo, 0);
283
+ if (h.index < 0) {
284
+ tcg_out_st8_12(s, h.cond, datalo, h.base, 0);
285
+ } else {
286
+ tcg_out_st8_r(s, h.cond, datalo, h.base, h.index);
287
+ }
288
break;
289
case MO_16:
290
- tcg_out_st16_8(s, COND_AL, datalo, addrlo, 0);
291
+ if (h.index < 0) {
292
+ tcg_out_st16_8(s, h.cond, datalo, h.base, 0);
293
+ } else {
294
+ tcg_out_st16_r(s, h.cond, datalo, h.base, h.index);
295
+ }
296
break;
297
case MO_32:
298
- tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0);
299
+ if (h.index < 0) {
300
+ tcg_out_st32_12(s, h.cond, datalo, h.base, 0);
301
+ } else {
302
+ tcg_out_st32_r(s, h.cond, datalo, h.base, h.index);
303
+ }
304
break;
305
case MO_64:
306
/* We used pair allocation for datalo, so already should be aligned. */
307
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
308
tcg_debug_assert(datahi == datalo + 1);
309
/* STRD requires alignment; double-check that. */
310
if (get_alignment_bits(opc) >= MO_64) {
311
- tcg_out_strd_8(s, COND_AL, datalo, addrlo, 0);
312
+ if (h.index < 0) {
313
+ tcg_out_strd_8(s, h.cond, datalo, h.base, 0);
314
+ } else {
315
+ tcg_out_strd_r(s, h.cond, datalo, h.base, h.index);
316
+ }
317
+ } else if (h.index_scratch) {
318
+ tcg_out_st32_rwb(s, h.cond, datalo, h.index, h.base);
319
+ tcg_out_st32_12(s, h.cond, datahi, h.index, 4);
320
} else {
321
- tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0);
322
- tcg_out_st32_12(s, COND_AL, datahi, addrlo, 4);
323
+ tcg_out_dat_reg(s, h.cond, ARITH_ADD, TCG_REG_TMP,
324
+ h.base, h.index, SHIFT_IMM_LSL(0));
325
+ tcg_out_st32_12(s, h.cond, datalo, TCG_REG_TMP, 0);
326
+ tcg_out_st32_12(s, h.cond, datahi, TCG_REG_TMP, 4);
327
}
328
break;
329
default:
330
g_assert_not_reached();
331
}
332
}
333
-#endif
334
335
static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
336
TCGReg addrlo, TCGReg addrhi,
337
MemOpIdx oi, TCGType data_type)
338
{
339
MemOp opc = get_memop(oi);
340
+ HostAddress h;
341
342
#ifdef CONFIG_SOFTMMU
343
- TCGReg addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, get_mmuidx(oi), 0);
344
-
345
- tcg_out_qemu_st_index(s, COND_EQ, opc, datalo, datahi,
346
- addrlo, addend, true);
347
+ h.cond = COND_EQ;
348
+ h.base = addrlo;
349
+ h.index_scratch = true;
350
+ h.index = tcg_out_tlb_read(s, addrlo, addrhi, opc, get_mmuidx(oi), 0);
351
+ tcg_out_qemu_st_direct(s, opc, datalo, datahi, h);
352
353
/* The conditional call must come last, as we're going to return here. */
354
tcg_insn_unit *label_ptr = s->code_ptr;
355
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
356
357
add_qemu_ldst_label(s, false, oi, data_type, datalo, datahi,
358
addrlo, addrhi, s->code_ptr, label_ptr);
359
-#else /* !CONFIG_SOFTMMU */
360
+#else
361
unsigned a_bits = get_alignment_bits(opc);
362
+
363
+ h.cond = COND_AL;
364
if (a_bits) {
365
tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits);
366
+ h.cond = COND_EQ;
367
}
368
- if (guest_base) {
369
- tcg_out_qemu_st_index(s, COND_AL, opc, datalo, datahi,
370
- addrlo, TCG_REG_GUEST_BASE, false);
371
- } else {
372
- tcg_out_qemu_st_direct(s, opc, datalo, datahi, addrlo);
373
- }
374
+
375
+ h.base = addrlo;
376
+ h.index = guest_base ? TCG_REG_GUEST_BASE : -1;
377
+ h.index_scratch = false;
378
+ tcg_out_qemu_st_direct(s, opc, datalo, datahi, h);
379
#endif
380
}
381
382
--
383
2.34.1
diff view generated by jsdifflib
1
This structure will shortly contain more than just
1
Interpret the variable argument placement in the caller. Shift some
2
data for accessing MMIO. Rename the 'addr' member
2
code around slightly to share more between softmmu and user-only.
3
to 'xlat_section' to more clearly indicate its purpose.
4
3
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
6
---
10
include/exec/cpu-defs.h | 22 ++++----
7
tcg/loongarch64/tcg-target.c.inc | 100 +++++++++++++------------------
11
accel/tcg/cputlb.c | 102 +++++++++++++++++++------------------
8
1 file changed, 42 insertions(+), 58 deletions(-)
12
target/arm/mte_helper.c | 14 ++---
13
target/arm/sve_helper.c | 4 +-
14
target/arm/translate-a64.c | 2 +-
15
5 files changed, 73 insertions(+), 71 deletions(-)
16
9
17
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
10
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
18
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
19
--- a/include/exec/cpu-defs.h
12
--- a/tcg/loongarch64/tcg-target.c.inc
20
+++ b/include/exec/cpu-defs.h
13
+++ b/tcg/loongarch64/tcg-target.c.inc
21
@@ -XXX,XX +XXX,XX @@ typedef uint64_t target_ulong;
14
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_indexed(TCGContext *s, TCGReg rd, TCGReg rj,
22
# endif
23
# endif
24
25
+/* Minimalized TLB entry for use by TCG fast path. */
26
typedef struct CPUTLBEntry {
27
/* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address
28
bit TARGET_PAGE_BITS-1..4 : Nonzero for accesses that should not
29
@@ -XXX,XX +XXX,XX @@ typedef struct CPUTLBEntry {
30
31
QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS));
32
33
-/* The IOTLB is not accessed directly inline by generated TCG code,
34
- * so the CPUIOTLBEntry layout is not as critical as that of the
35
- * CPUTLBEntry. (This is also why we don't want to combine the two
36
- * structs into one.)
37
+/*
38
+ * The full TLB entry, which is not accessed by generated TCG code,
39
+ * so the layout is not as critical as that of CPUTLBEntry. This is
40
+ * also why we don't want to combine the two structs.
41
*/
42
-typedef struct CPUIOTLBEntry {
43
+typedef struct CPUTLBEntryFull {
44
/*
45
- * @addr contains:
46
+ * @xlat_section contains:
47
* - in the lower TARGET_PAGE_BITS, a physical section number
48
* - with the lower TARGET_PAGE_BITS masked off, an offset which
49
* must be added to the virtual address to obtain:
50
@@ -XXX,XX +XXX,XX @@ typedef struct CPUIOTLBEntry {
51
* number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM)
52
* + the offset within the target MemoryRegion (otherwise)
53
*/
54
- hwaddr addr;
55
+ hwaddr xlat_section;
56
MemTxAttrs attrs;
57
-} CPUIOTLBEntry;
58
+} CPUTLBEntryFull;
59
60
/*
61
* Data elements that are per MMU mode, minus the bits accessed by
62
@@ -XXX,XX +XXX,XX @@ typedef struct CPUTLBDesc {
63
size_t vindex;
64
/* The tlb victim table, in two parts. */
65
CPUTLBEntry vtable[CPU_VTLB_SIZE];
66
- CPUIOTLBEntry viotlb[CPU_VTLB_SIZE];
67
- /* The iotlb. */
68
- CPUIOTLBEntry *iotlb;
69
+ CPUTLBEntryFull vfulltlb[CPU_VTLB_SIZE];
70
+ CPUTLBEntryFull *fulltlb;
71
} CPUTLBDesc;
72
73
/*
74
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
75
index XXXXXXX..XXXXXXX 100644
76
--- a/accel/tcg/cputlb.c
77
+++ b/accel/tcg/cputlb.c
78
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
79
}
80
81
g_free(fast->table);
82
- g_free(desc->iotlb);
83
+ g_free(desc->fulltlb);
84
85
tlb_window_reset(desc, now, 0);
86
/* desc->n_used_entries is cleared by the caller */
87
fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
88
fast->table = g_try_new(CPUTLBEntry, new_size);
89
- desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
90
+ desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
91
92
/*
93
* If the allocations fail, try smaller sizes. We just freed some
94
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
95
* allocations to fail though, so we progressively reduce the allocation
96
* size, aborting if we cannot even allocate the smallest TLB we support.
97
*/
98
- while (fast->table == NULL || desc->iotlb == NULL) {
99
+ while (fast->table == NULL || desc->fulltlb == NULL) {
100
if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
101
error_report("%s: %s", __func__, strerror(errno));
102
abort();
103
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
104
fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
105
106
g_free(fast->table);
107
- g_free(desc->iotlb);
108
+ g_free(desc->fulltlb);
109
fast->table = g_try_new(CPUTLBEntry, new_size);
110
- desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
111
+ desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
112
}
15
}
113
}
16
}
114
17
115
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
18
-static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, TCGType type)
116
desc->n_used_entries = 0;
19
+static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
117
fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
20
+ MemOpIdx oi, TCGType data_type)
118
fast->table = g_new(CPUTLBEntry, n_entries);
21
{
119
- desc->iotlb = g_new(CPUIOTLBEntry, n_entries);
22
- TCGReg addr_regl;
120
+ desc->fulltlb = g_new(CPUTLBEntryFull, n_entries);
23
- TCGReg data_regl;
121
tlb_mmu_flush_locked(desc, fast);
24
- MemOpIdx oi;
25
- MemOp opc;
26
-#if defined(CONFIG_SOFTMMU)
27
+ MemOp opc = get_memop(oi);
28
+ TCGReg base, index;
29
+
30
+#ifdef CONFIG_SOFTMMU
31
tcg_insn_unit *label_ptr[1];
32
-#else
33
- unsigned a_bits;
34
-#endif
35
- TCGReg base;
36
37
- data_regl = *args++;
38
- addr_regl = *args++;
39
- oi = *args++;
40
- opc = get_memop(oi);
41
-
42
-#if defined(CONFIG_SOFTMMU)
43
- tcg_out_tlb_load(s, addr_regl, oi, label_ptr, 1);
44
- base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
45
- tcg_out_qemu_ld_indexed(s, data_regl, base, TCG_REG_TMP2, opc, type);
46
- add_qemu_ldst_label(s, 1, oi, type,
47
- data_regl, addr_regl,
48
- s->code_ptr, label_ptr);
49
+ tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 1);
50
+ index = TCG_REG_TMP2;
51
#else
52
- a_bits = get_alignment_bits(opc);
53
+ unsigned a_bits = get_alignment_bits(opc);
54
if (a_bits) {
55
- tcg_out_test_alignment(s, true, addr_regl, a_bits);
56
+ tcg_out_test_alignment(s, true, addr_reg, a_bits);
57
}
58
- base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
59
- TCGReg guest_base_reg = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
60
- tcg_out_qemu_ld_indexed(s, data_regl, base, guest_base_reg, opc, type);
61
+ index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
62
+#endif
63
+
64
+ base = tcg_out_zext_addr_if_32_bit(s, addr_reg, TCG_REG_TMP0);
65
+ tcg_out_qemu_ld_indexed(s, data_reg, base, index, opc, data_type);
66
+
67
+#ifdef CONFIG_SOFTMMU
68
+ add_qemu_ldst_label(s, true, oi, data_type, data_reg, addr_reg,
69
+ s->code_ptr, label_ptr);
70
#endif
122
}
71
}
123
72
124
@@ -XXX,XX +XXX,XX @@ void tlb_destroy(CPUState *cpu)
73
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_indexed(TCGContext *s, TCGReg data,
125
CPUTLBDescFast *fast = &env_tlb(env)->f[i];
126
127
g_free(fast->table);
128
- g_free(desc->iotlb);
129
+ g_free(desc->fulltlb);
130
}
74
}
131
}
75
}
132
76
133
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
77
-static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, TCGType type)
134
78
+static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
135
/* Evict the old entry into the victim tlb. */
79
+ MemOpIdx oi, TCGType data_type)
136
copy_tlb_helper_locked(tv, te);
80
{
137
- desc->viotlb[vidx] = desc->iotlb[index];
81
- TCGReg addr_regl;
138
+ desc->vfulltlb[vidx] = desc->fulltlb[index];
82
- TCGReg data_regl;
139
tlb_n_used_entries_dec(env, mmu_idx);
83
- MemOpIdx oi;
84
- MemOp opc;
85
-#if defined(CONFIG_SOFTMMU)
86
+ MemOp opc = get_memop(oi);
87
+ TCGReg base, index;
88
+
89
+#ifdef CONFIG_SOFTMMU
90
tcg_insn_unit *label_ptr[1];
91
-#else
92
- unsigned a_bits;
93
-#endif
94
- TCGReg base;
95
96
- data_regl = *args++;
97
- addr_regl = *args++;
98
- oi = *args++;
99
- opc = get_memop(oi);
100
-
101
-#if defined(CONFIG_SOFTMMU)
102
- tcg_out_tlb_load(s, addr_regl, oi, label_ptr, 0);
103
- base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
104
- tcg_out_qemu_st_indexed(s, data_regl, base, TCG_REG_TMP2, opc);
105
- add_qemu_ldst_label(s, 0, oi, type,
106
- data_regl, addr_regl,
107
- s->code_ptr, label_ptr);
108
+ tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 0);
109
+ index = TCG_REG_TMP2;
110
#else
111
- a_bits = get_alignment_bits(opc);
112
+ unsigned a_bits = get_alignment_bits(opc);
113
if (a_bits) {
114
- tcg_out_test_alignment(s, false, addr_regl, a_bits);
115
+ tcg_out_test_alignment(s, false, addr_reg, a_bits);
140
}
116
}
141
117
- base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
142
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
118
- TCGReg guest_base_reg = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
143
* subtract here is that of the page base, and not the same as the
119
- tcg_out_qemu_st_indexed(s, data_regl, base, guest_base_reg, opc);
144
* vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
120
+ index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
145
*/
121
+#endif
146
- desc->iotlb[index].addr = iotlb - vaddr_page;
122
+
147
- desc->iotlb[index].attrs = attrs;
123
+ base = tcg_out_zext_addr_if_32_bit(s, addr_reg, TCG_REG_TMP0);
148
+ desc->fulltlb[index].xlat_section = iotlb - vaddr_page;
124
+ tcg_out_qemu_st_indexed(s, data_reg, base, index, opc);
149
+ desc->fulltlb[index].attrs = attrs;
125
+
150
126
+#ifdef CONFIG_SOFTMMU
151
/* Now calculate the new entry */
127
+ add_qemu_ldst_label(s, false, oi, data_type, data_reg, addr_reg,
152
tn.addend = addend - vaddr_page;
128
+ s->code_ptr, label_ptr);
153
@@ -XXX,XX +XXX,XX @@ static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
154
}
155
}
156
157
-static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
158
+static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
159
int mmu_idx, target_ulong addr, uintptr_t retaddr,
160
MMUAccessType access_type, MemOp op)
161
{
162
@@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
163
bool locked = false;
164
MemTxResult r;
165
166
- section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
167
+ section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
168
mr = section->mr;
169
- mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
170
+ mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
171
cpu->mem_io_pc = retaddr;
172
if (!cpu->can_do_io) {
173
cpu_io_recompile(cpu, retaddr);
174
@@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
175
qemu_mutex_lock_iothread();
176
locked = true;
177
}
178
- r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs);
179
+ r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs);
180
if (r != MEMTX_OK) {
181
hwaddr physaddr = mr_offset +
182
section->offset_within_address_space -
183
section->offset_within_region;
184
185
cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
186
- mmu_idx, iotlbentry->attrs, r, retaddr);
187
+ mmu_idx, full->attrs, r, retaddr);
188
}
189
if (locked) {
190
qemu_mutex_unlock_iothread();
191
@@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
192
}
193
194
/*
195
- * Save a potentially trashed IOTLB entry for later lookup by plugin.
196
- * This is read by tlb_plugin_lookup if the iotlb entry doesn't match
197
+ * Save a potentially trashed CPUTLBEntryFull for later lookup by plugin.
198
+ * This is read by tlb_plugin_lookup if the fulltlb entry doesn't match
199
* because of the side effect of io_writex changing memory layout.
200
*/
201
static void save_iotlb_data(CPUState *cs, hwaddr addr,
202
@@ -XXX,XX +XXX,XX @@ static void save_iotlb_data(CPUState *cs, hwaddr addr,
203
#endif
129
#endif
204
}
130
}
205
131
206
-static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
132
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
207
+static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
133
break;
208
int mmu_idx, uint64_t val, target_ulong addr,
134
209
uintptr_t retaddr, MemOp op)
135
case INDEX_op_qemu_ld_i32:
210
{
136
- tcg_out_qemu_ld(s, args, TCG_TYPE_I32);
211
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
137
+ tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
212
bool locked = false;
138
break;
213
MemTxResult r;
139
case INDEX_op_qemu_ld_i64:
214
140
- tcg_out_qemu_ld(s, args, TCG_TYPE_I64);
215
- section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
141
+ tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
216
+ section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
142
break;
217
mr = section->mr;
143
case INDEX_op_qemu_st_i32:
218
- mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
144
- tcg_out_qemu_st(s, args, TCG_TYPE_I32);
219
+ mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
145
+ tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
220
if (!cpu->can_do_io) {
146
break;
221
cpu_io_recompile(cpu, retaddr);
147
case INDEX_op_qemu_st_i64:
222
}
148
- tcg_out_qemu_st(s, args, TCG_TYPE_I64);
223
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
149
+ tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
224
* The memory_region_dispatch may trigger a flush/resize
150
break;
225
* so for plugins we save the iotlb_data just in case.
151
226
*/
152
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
227
- save_iotlb_data(cpu, iotlbentry->addr, section, mr_offset);
228
+ save_iotlb_data(cpu, full->xlat_section, section, mr_offset);
229
230
if (!qemu_mutex_iothread_locked()) {
231
qemu_mutex_lock_iothread();
232
locked = true;
233
}
234
- r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs);
235
+ r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs);
236
if (r != MEMTX_OK) {
237
hwaddr physaddr = mr_offset +
238
section->offset_within_address_space -
239
section->offset_within_region;
240
241
cpu_transaction_failed(cpu, physaddr, addr, memop_size(op),
242
- MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r,
243
+ MMU_DATA_STORE, mmu_idx, full->attrs, r,
244
retaddr);
245
}
246
if (locked) {
247
@@ -XXX,XX +XXX,XX @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
248
copy_tlb_helper_locked(vtlb, &tmptlb);
249
qemu_spin_unlock(&env_tlb(env)->c.lock);
250
251
- CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index];
252
- CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx];
253
- tmpio = *io; *io = *vio; *vio = tmpio;
254
+ CPUTLBEntryFull *f1 = &env_tlb(env)->d[mmu_idx].fulltlb[index];
255
+ CPUTLBEntryFull *f2 = &env_tlb(env)->d[mmu_idx].vfulltlb[vidx];
256
+ CPUTLBEntryFull tmpf;
257
+ tmpf = *f1; *f1 = *f2; *f2 = tmpf;
258
return true;
259
}
260
}
261
@@ -XXX,XX +XXX,XX @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
262
(ADDR) & TARGET_PAGE_MASK)
263
264
static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
265
- CPUIOTLBEntry *iotlbentry, uintptr_t retaddr)
266
+ CPUTLBEntryFull *full, uintptr_t retaddr)
267
{
268
- ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr;
269
+ ram_addr_t ram_addr = mem_vaddr + full->xlat_section;
270
271
trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
272
273
@@ -XXX,XX +XXX,XX @@ int probe_access_flags(CPUArchState *env, target_ulong addr,
274
/* Handle clean RAM pages. */
275
if (unlikely(flags & TLB_NOTDIRTY)) {
276
uintptr_t index = tlb_index(env, mmu_idx, addr);
277
- CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
278
+ CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
279
280
- notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr);
281
+ notdirty_write(env_cpu(env), addr, 1, full, retaddr);
282
flags &= ~TLB_NOTDIRTY;
283
}
284
285
@@ -XXX,XX +XXX,XX @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
286
287
if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
288
uintptr_t index = tlb_index(env, mmu_idx, addr);
289
- CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
290
+ CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
291
292
/* Handle watchpoints. */
293
if (flags & TLB_WATCHPOINT) {
294
int wp_access = (access_type == MMU_DATA_STORE
295
? BP_MEM_WRITE : BP_MEM_READ);
296
cpu_check_watchpoint(env_cpu(env), addr, size,
297
- iotlbentry->attrs, wp_access, retaddr);
298
+ full->attrs, wp_access, retaddr);
299
}
300
301
/* Handle clean RAM pages. */
302
if (flags & TLB_NOTDIRTY) {
303
- notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr);
304
+ notdirty_write(env_cpu(env), addr, 1, full, retaddr);
305
}
306
}
307
308
@@ -XXX,XX +XXX,XX @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
309
* should have just filled the TLB. The one corner case is io_writex
310
* which can cause TLB flushes and potential resizing of the TLBs
311
* losing the information we need. In those cases we need to recover
312
- * data from a copy of the iotlbentry. As long as this always occurs
313
+ * data from a copy of the CPUTLBEntryFull. As long as this always occurs
314
* from the same thread (which a mem callback will be) this is safe.
315
*/
316
317
@@ -XXX,XX +XXX,XX @@ bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
318
if (likely(tlb_hit(tlb_addr, addr))) {
319
/* We must have an iotlb entry for MMIO */
320
if (tlb_addr & TLB_MMIO) {
321
- CPUIOTLBEntry *iotlbentry;
322
- iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
323
+ CPUTLBEntryFull *full;
324
+ full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
325
data->is_io = true;
326
- data->v.io.section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
327
- data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
328
+ data->v.io.section =
329
+ iotlb_to_section(cpu, full->xlat_section, full->attrs);
330
+ data->v.io.offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
331
} else {
332
data->is_io = false;
333
data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
334
@@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
335
336
if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
337
notdirty_write(env_cpu(env), addr, size,
338
- &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr);
339
+ &env_tlb(env)->d[mmu_idx].fulltlb[index], retaddr);
340
}
341
342
return hostaddr;
343
@@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
344
345
/* Handle anything that isn't just a straight memory access. */
346
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
347
- CPUIOTLBEntry *iotlbentry;
348
+ CPUTLBEntryFull *full;
349
bool need_swap;
350
351
/* For anything that is unaligned, recurse through full_load. */
352
@@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
353
goto do_unaligned_access;
354
}
355
356
- iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
357
+ full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
358
359
/* Handle watchpoints. */
360
if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
361
/* On watchpoint hit, this will longjmp out. */
362
cpu_check_watchpoint(env_cpu(env), addr, size,
363
- iotlbentry->attrs, BP_MEM_READ, retaddr);
364
+ full->attrs, BP_MEM_READ, retaddr);
365
}
366
367
need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
368
369
/* Handle I/O access. */
370
if (likely(tlb_addr & TLB_MMIO)) {
371
- return io_readx(env, iotlbentry, mmu_idx, addr, retaddr,
372
+ return io_readx(env, full, mmu_idx, addr, retaddr,
373
access_type, op ^ (need_swap * MO_BSWAP));
374
}
375
376
@@ -XXX,XX +XXX,XX @@ store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
377
*/
378
if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
379
cpu_check_watchpoint(env_cpu(env), addr, size - size2,
380
- env_tlb(env)->d[mmu_idx].iotlb[index].attrs,
381
+ env_tlb(env)->d[mmu_idx].fulltlb[index].attrs,
382
BP_MEM_WRITE, retaddr);
383
}
384
if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) {
385
cpu_check_watchpoint(env_cpu(env), page2, size2,
386
- env_tlb(env)->d[mmu_idx].iotlb[index2].attrs,
387
+ env_tlb(env)->d[mmu_idx].fulltlb[index2].attrs,
388
BP_MEM_WRITE, retaddr);
389
}
390
391
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
392
393
/* Handle anything that isn't just a straight memory access. */
394
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
395
- CPUIOTLBEntry *iotlbentry;
396
+ CPUTLBEntryFull *full;
397
bool need_swap;
398
399
/* For anything that is unaligned, recurse through byte stores. */
400
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
401
goto do_unaligned_access;
402
}
403
404
- iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
405
+ full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
406
407
/* Handle watchpoints. */
408
if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
409
/* On watchpoint hit, this will longjmp out. */
410
cpu_check_watchpoint(env_cpu(env), addr, size,
411
- iotlbentry->attrs, BP_MEM_WRITE, retaddr);
412
+ full->attrs, BP_MEM_WRITE, retaddr);
413
}
414
415
need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
416
417
/* Handle I/O access. */
418
if (tlb_addr & TLB_MMIO) {
419
- io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr,
420
+ io_writex(env, full, mmu_idx, val, addr, retaddr,
421
op ^ (need_swap * MO_BSWAP));
422
return;
423
}
424
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
425
426
/* Handle clean RAM pages. */
427
if (tlb_addr & TLB_NOTDIRTY) {
428
- notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr);
429
+ notdirty_write(env_cpu(env), addr, size, full, retaddr);
430
}
431
432
haddr = (void *)((uintptr_t)addr + entry->addend);
433
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
434
index XXXXXXX..XXXXXXX 100644
435
--- a/target/arm/mte_helper.c
436
+++ b/target/arm/mte_helper.c
437
@@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
438
return tags + index;
439
#else
440
uintptr_t index;
441
- CPUIOTLBEntry *iotlbentry;
442
+ CPUTLBEntryFull *full;
443
int in_page, flags;
444
ram_addr_t ptr_ra;
445
hwaddr ptr_paddr, tag_paddr, xlat;
446
@@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
447
assert(!(flags & TLB_INVALID_MASK));
448
449
/*
450
- * Find the iotlbentry for ptr. This *must* be present in the TLB
451
+ * Find the CPUTLBEntryFull for ptr. This *must* be present in the TLB
452
* because we just found the mapping.
453
* TODO: Perhaps there should be a cputlb helper that returns a
454
* matching tlb entry + iotlb entry.
455
@@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
456
g_assert(tlb_hit(comparator, ptr));
457
}
458
# endif
459
- iotlbentry = &env_tlb(env)->d[ptr_mmu_idx].iotlb[index];
460
+ full = &env_tlb(env)->d[ptr_mmu_idx].fulltlb[index];
461
462
/* If the virtual page MemAttr != Tagged, access unchecked. */
463
- if (!arm_tlb_mte_tagged(&iotlbentry->attrs)) {
464
+ if (!arm_tlb_mte_tagged(&full->attrs)) {
465
return NULL;
466
}
467
468
@@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
469
int wp = ptr_access == MMU_DATA_LOAD ? BP_MEM_READ : BP_MEM_WRITE;
470
assert(ra != 0);
471
cpu_check_watchpoint(env_cpu(env), ptr, ptr_size,
472
- iotlbentry->attrs, wp, ra);
473
+ full->attrs, wp, ra);
474
}
475
476
/*
477
@@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
478
tag_paddr = ptr_paddr >> (LOG2_TAG_GRANULE + 1);
479
480
/* Look up the address in tag space. */
481
- tag_asi = iotlbentry->attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS;
482
+ tag_asi = full->attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS;
483
tag_as = cpu_get_address_space(env_cpu(env), tag_asi);
484
mr = address_space_translate(tag_as, tag_paddr, &xlat, NULL,
485
tag_access == MMU_DATA_STORE,
486
- iotlbentry->attrs);
487
+ full->attrs);
488
489
/*
490
* Note that @mr will never be NULL. If there is nothing in the address
491
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
492
index XXXXXXX..XXXXXXX 100644
493
--- a/target/arm/sve_helper.c
494
+++ b/target/arm/sve_helper.c
495
@@ -XXX,XX +XXX,XX @@ bool sve_probe_page(SVEHostPage *info, bool nofault, CPUARMState *env,
496
g_assert(tlb_hit(comparator, addr));
497
# endif
498
499
- CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
500
- info->attrs = iotlbentry->attrs;
501
+ CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
502
+ info->attrs = full->attrs;
503
}
504
#endif
505
506
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
507
index XXXXXXX..XXXXXXX 100644
508
--- a/target/arm/translate-a64.c
509
+++ b/target/arm/translate-a64.c
510
@@ -XXX,XX +XXX,XX @@ static bool is_guarded_page(CPUARMState *env, DisasContext *s)
511
* table entry even for that case.
512
*/
513
return (tlb_hit(entry->addr_code, addr) &&
514
- arm_tlb_bti_gp(&env_tlb(env)->d[mmu_idx].iotlb[index].attrs));
515
+ arm_tlb_bti_gp(&env_tlb(env)->d[mmu_idx].fulltlb[index].attrs));
516
#endif
517
}
518
519
--
153
--
520
2.34.1
154
2.34.1
521
155
522
156
diff view generated by jsdifflib
1
Bool is more appropriate type for the alloc parameter.
1
Collect the 2 parts of the host address into a struct.
2
Reorg tcg_out_qemu_{ld,st}_direct to use it.
2
3
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
6
---
7
accel/tcg/translate-all.c | 14 +++++++-------
7
tcg/loongarch64/tcg-target.c.inc | 55 +++++++++++++++++---------------
8
1 file changed, 7 insertions(+), 7 deletions(-)
8
1 file changed, 30 insertions(+), 25 deletions(-)
9
9
10
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
10
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
11
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
12
--- a/accel/tcg/translate-all.c
12
--- a/tcg/loongarch64/tcg-target.c.inc
13
+++ b/accel/tcg/translate-all.c
13
+++ b/tcg/loongarch64/tcg-target.c.inc
14
@@ -XXX,XX +XXX,XX @@ void page_init(void)
14
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_zext_addr_if_32_bit(TCGContext *s,
15
return addr;
16
}
17
18
-static void tcg_out_qemu_ld_indexed(TCGContext *s, TCGReg rd, TCGReg rj,
19
- TCGReg rk, MemOp opc, TCGType type)
20
+typedef struct {
21
+ TCGReg base;
22
+ TCGReg index;
23
+} HostAddress;
24
+
25
+static void tcg_out_qemu_ld_indexed(TCGContext *s, MemOp opc, TCGType type,
26
+ TCGReg rd, HostAddress h)
27
{
28
/* Byte swapping is left to middle-end expansion. */
29
tcg_debug_assert((opc & MO_BSWAP) == 0);
30
31
switch (opc & MO_SSIZE) {
32
case MO_UB:
33
- tcg_out_opc_ldx_bu(s, rd, rj, rk);
34
+ tcg_out_opc_ldx_bu(s, rd, h.base, h.index);
35
break;
36
case MO_SB:
37
- tcg_out_opc_ldx_b(s, rd, rj, rk);
38
+ tcg_out_opc_ldx_b(s, rd, h.base, h.index);
39
break;
40
case MO_UW:
41
- tcg_out_opc_ldx_hu(s, rd, rj, rk);
42
+ tcg_out_opc_ldx_hu(s, rd, h.base, h.index);
43
break;
44
case MO_SW:
45
- tcg_out_opc_ldx_h(s, rd, rj, rk);
46
+ tcg_out_opc_ldx_h(s, rd, h.base, h.index);
47
break;
48
case MO_UL:
49
if (type == TCG_TYPE_I64) {
50
- tcg_out_opc_ldx_wu(s, rd, rj, rk);
51
+ tcg_out_opc_ldx_wu(s, rd, h.base, h.index);
52
break;
53
}
54
/* fallthrough */
55
case MO_SL:
56
- tcg_out_opc_ldx_w(s, rd, rj, rk);
57
+ tcg_out_opc_ldx_w(s, rd, h.base, h.index);
58
break;
59
case MO_UQ:
60
- tcg_out_opc_ldx_d(s, rd, rj, rk);
61
+ tcg_out_opc_ldx_d(s, rd, h.base, h.index);
62
break;
63
default:
64
g_assert_not_reached();
65
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
66
MemOpIdx oi, TCGType data_type)
67
{
68
MemOp opc = get_memop(oi);
69
- TCGReg base, index;
70
+ HostAddress h;
71
72
#ifdef CONFIG_SOFTMMU
73
tcg_insn_unit *label_ptr[1];
74
75
tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 1);
76
- index = TCG_REG_TMP2;
77
+ h.index = TCG_REG_TMP2;
78
#else
79
unsigned a_bits = get_alignment_bits(opc);
80
if (a_bits) {
81
tcg_out_test_alignment(s, true, addr_reg, a_bits);
82
}
83
- index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
84
+ h.index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
85
#endif
86
87
- base = tcg_out_zext_addr_if_32_bit(s, addr_reg, TCG_REG_TMP0);
88
- tcg_out_qemu_ld_indexed(s, data_reg, base, index, opc, data_type);
89
+ h.base = tcg_out_zext_addr_if_32_bit(s, addr_reg, TCG_REG_TMP0);
90
+ tcg_out_qemu_ld_indexed(s, opc, data_type, data_reg, h);
91
92
#ifdef CONFIG_SOFTMMU
93
add_qemu_ldst_label(s, true, oi, data_type, data_reg, addr_reg,
94
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
15
#endif
95
#endif
16
}
96
}
17
97
18
-static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
98
-static void tcg_out_qemu_st_indexed(TCGContext *s, TCGReg data,
19
+static PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc)
99
- TCGReg rj, TCGReg rk, MemOp opc)
100
+static void tcg_out_qemu_st_indexed(TCGContext *s, MemOp opc,
101
+ TCGReg rd, HostAddress h)
20
{
102
{
21
PageDesc *pd;
103
/* Byte swapping is left to middle-end expansion. */
22
void **lp;
104
tcg_debug_assert((opc & MO_BSWAP) == 0);
23
@@ -XXX,XX +XXX,XX @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
105
24
106
switch (opc & MO_SIZE) {
25
static inline PageDesc *page_find(tb_page_addr_t index)
107
case MO_8:
108
- tcg_out_opc_stx_b(s, data, rj, rk);
109
+ tcg_out_opc_stx_b(s, rd, h.base, h.index);
110
break;
111
case MO_16:
112
- tcg_out_opc_stx_h(s, data, rj, rk);
113
+ tcg_out_opc_stx_h(s, rd, h.base, h.index);
114
break;
115
case MO_32:
116
- tcg_out_opc_stx_w(s, data, rj, rk);
117
+ tcg_out_opc_stx_w(s, rd, h.base, h.index);
118
break;
119
case MO_64:
120
- tcg_out_opc_stx_d(s, data, rj, rk);
121
+ tcg_out_opc_stx_d(s, rd, h.base, h.index);
122
break;
123
default:
124
g_assert_not_reached();
125
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
126
MemOpIdx oi, TCGType data_type)
26
{
127
{
27
- return page_find_alloc(index, 0);
128
MemOp opc = get_memop(oi);
28
+ return page_find_alloc(index, false);
129
- TCGReg base, index;
29
}
130
+ HostAddress h;
30
131
31
static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
132
#ifdef CONFIG_SOFTMMU
32
- PageDesc **ret_p2, tb_page_addr_t phys2, int alloc);
133
tcg_insn_unit *label_ptr[1];
33
+ PageDesc **ret_p2, tb_page_addr_t phys2, bool alloc);
134
34
135
tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 0);
35
/* In user-mode page locks aren't used; mmap_lock is enough */
136
- index = TCG_REG_TMP2;
36
#ifdef CONFIG_USER_ONLY
137
+ h.index = TCG_REG_TMP2;
37
@@ -XXX,XX +XXX,XX @@ static inline void page_unlock(PageDesc *pd)
138
#else
38
/* lock the page(s) of a TB in the correct acquisition order */
139
unsigned a_bits = get_alignment_bits(opc);
39
static inline void page_lock_tb(const TranslationBlock *tb)
140
if (a_bits) {
40
{
141
tcg_out_test_alignment(s, false, addr_reg, a_bits);
41
- page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], 0);
142
}
42
+ page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], false);
143
- index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
43
}
144
+ h.index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
44
145
#endif
45
static inline void page_unlock_tb(const TranslationBlock *tb)
146
46
@@ -XXX,XX +XXX,XX @@ void page_collection_unlock(struct page_collection *set)
147
- base = tcg_out_zext_addr_if_32_bit(s, addr_reg, TCG_REG_TMP0);
47
#endif /* !CONFIG_USER_ONLY */
148
- tcg_out_qemu_st_indexed(s, data_reg, base, index, opc);
48
149
+ h.base = tcg_out_zext_addr_if_32_bit(s, addr_reg, TCG_REG_TMP0);
49
static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
150
+ tcg_out_qemu_st_indexed(s, opc, data_reg, h);
50
- PageDesc **ret_p2, tb_page_addr_t phys2, int alloc)
151
51
+ PageDesc **ret_p2, tb_page_addr_t phys2, bool alloc)
152
#ifdef CONFIG_SOFTMMU
52
{
153
add_qemu_ldst_label(s, false, oi, data_type, data_reg, addr_reg,
53
PageDesc *p1, *p2;
54
tb_page_addr_t page1;
55
@@ -XXX,XX +XXX,XX @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
56
* Note that inserting into the hash table first isn't an option, since
57
* we can only insert TBs that are fully initialized.
58
*/
59
- page_lock_pair(&p, phys_pc, &p2, phys_page2, 1);
60
+ page_lock_pair(&p, phys_pc, &p2, phys_page2, true);
61
tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK);
62
if (p2) {
63
tb_page_add(p2, tb, 1, phys_page2);
64
@@ -XXX,XX +XXX,XX @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
65
for (addr = start, len = end - start;
66
len != 0;
67
len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
68
- PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
69
+ PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, true);
70
71
/* If the write protection bit is set, then we invalidate
72
the code inside. */
73
--
154
--
74
2.34.1
155
2.34.1
75
156
76
157
diff view generated by jsdifflib
1
Use the pc coming from db->pc_first rather than the TB.
1
Interpret the variable argument placement in the caller. There are
2
several places where we already convert back from bool to type.
3
Clean things up by using type throughout.
2
4
3
Use the cached host_addr rather than re-computing for the
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
first page. We still need a separate lookup for the second
5
page because it won't be computed for DisasContextBase until
6
the translator actually performs a read from the page.
7
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
7
---
11
include/exec/plugin-gen.h | 7 ++++---
8
tcg/mips/tcg-target.c.inc | 186 +++++++++++++++++++-------------------
12
accel/tcg/plugin-gen.c | 22 +++++++++++-----------
9
1 file changed, 95 insertions(+), 91 deletions(-)
13
accel/tcg/translator.c | 2 +-
14
3 files changed, 16 insertions(+), 15 deletions(-)
15
10
16
diff --git a/include/exec/plugin-gen.h b/include/exec/plugin-gen.h
11
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
17
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
18
--- a/include/exec/plugin-gen.h
13
--- a/tcg/mips/tcg-target.c.inc
19
+++ b/include/exec/plugin-gen.h
14
+++ b/tcg/mips/tcg-target.c.inc
20
@@ -XXX,XX +XXX,XX @@ struct DisasContextBase;
15
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
21
16
#endif /* SOFTMMU */
22
#ifdef CONFIG_PLUGIN
17
23
18
static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
24
-bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb, bool supress);
19
- TCGReg base, MemOp opc, bool is_64)
25
+bool plugin_gen_tb_start(CPUState *cpu, const struct DisasContextBase *db,
20
+ TCGReg base, MemOp opc, TCGType type)
26
+ bool supress);
27
void plugin_gen_tb_end(CPUState *cpu);
28
void plugin_gen_insn_start(CPUState *cpu, const struct DisasContextBase *db);
29
void plugin_gen_insn_end(void);
30
@@ -XXX,XX +XXX,XX @@ static inline void plugin_insn_append(abi_ptr pc, const void *from, size_t size)
31
32
#else /* !CONFIG_PLUGIN */
33
34
-static inline
35
-bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb, bool supress)
36
+static inline bool
37
+plugin_gen_tb_start(CPUState *cpu, const struct DisasContextBase *db, bool sup)
38
{
21
{
39
return false;
22
switch (opc & (MO_SSIZE | MO_BSWAP)) {
23
case MO_UB:
24
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
25
tcg_out_opc_imm(s, OPC_LH, lo, base, 0);
26
break;
27
case MO_UL | MO_BSWAP:
28
- if (TCG_TARGET_REG_BITS == 64 && is_64) {
29
+ if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I64) {
30
if (use_mips32r2_instructions) {
31
tcg_out_opc_imm(s, OPC_LWU, lo, base, 0);
32
tcg_out_bswap32(s, lo, lo, TCG_BSWAP_IZ | TCG_BSWAP_OZ);
33
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
34
}
35
break;
36
case MO_UL:
37
- if (TCG_TARGET_REG_BITS == 64 && is_64) {
38
+ if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I64) {
39
tcg_out_opc_imm(s, OPC_LWU, lo, base, 0);
40
break;
41
}
42
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
40
}
43
}
41
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
44
42
index XXXXXXX..XXXXXXX 100644
45
static void tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
43
--- a/accel/tcg/plugin-gen.c
46
- TCGReg base, MemOp opc, bool is_64)
44
+++ b/accel/tcg/plugin-gen.c
47
+ TCGReg base, MemOp opc, TCGType type)
45
@@ -XXX,XX +XXX,XX @@ static void plugin_gen_inject(const struct qemu_plugin_tb *plugin_tb)
48
{
46
pr_ops();
49
const MIPSInsn lw1 = MIPS_BE ? OPC_LWL : OPC_LWR;
50
const MIPSInsn lw2 = MIPS_BE ? OPC_LWR : OPC_LWL;
51
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
52
case MO_UL:
53
tcg_out_opc_imm(s, lw1, lo, base, 0);
54
tcg_out_opc_imm(s, lw2, lo, base, 3);
55
- if (TCG_TARGET_REG_BITS == 64 && is_64 && !sgn) {
56
+ if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I64 && !sgn) {
57
tcg_out_ext32u(s, lo, lo);
58
}
59
break;
60
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
61
tcg_out_opc_imm(s, lw1, lo, base, 0);
62
tcg_out_opc_imm(s, lw2, lo, base, 3);
63
tcg_out_bswap32(s, lo, lo,
64
- TCG_TARGET_REG_BITS == 64 && is_64
65
+ TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I64
66
? (sgn ? TCG_BSWAP_OS : TCG_BSWAP_OZ) : 0);
67
} else {
68
const tcg_insn_unit *subr =
69
- (TCG_TARGET_REG_BITS == 64 && is_64 && !sgn
70
+ (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I64 && !sgn
71
? bswap32u_addr : bswap32_addr);
72
73
tcg_out_opc_imm(s, lw1, TCG_TMP0, base, 0);
74
tcg_out_bswap_subr(s, subr);
75
/* delay slot */
76
tcg_out_opc_imm(s, lw2, TCG_TMP0, base, 3);
77
- tcg_out_mov(s, is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32, lo, TCG_TMP3);
78
+ tcg_out_mov(s, type, lo, TCG_TMP3);
79
}
80
break;
81
82
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
83
}
47
}
84
}
48
85
49
-bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb, bool mem_only)
86
-static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
50
+bool plugin_gen_tb_start(CPUState *cpu, const DisasContextBase *db,
87
+static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
51
+ bool mem_only)
88
+ TCGReg addrlo, TCGReg addrhi,
89
+ MemOpIdx oi, TCGType data_type)
52
{
90
{
53
bool ret = false;
91
- TCGReg addr_regl, addr_regh __attribute__((unused));
54
92
- TCGReg data_regl, data_regh;
55
@@ -XXX,XX +XXX,XX @@ bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb, bool mem_onl
93
- MemOpIdx oi;
56
94
- MemOp opc;
57
ret = true;
95
-#if defined(CONFIG_SOFTMMU)
58
96
- tcg_insn_unit *label_ptr[2];
59
- ptb->vaddr = tb->pc;
97
-#else
60
+ ptb->vaddr = db->pc_first;
98
-#endif
61
ptb->vaddr2 = -1;
99
- unsigned a_bits, s_bits;
62
- get_page_addr_code_hostp(cpu->env_ptr, tb->pc, &ptb->haddr1);
100
- TCGReg base = TCG_REG_A0;
63
+ ptb->haddr1 = db->host_addr[0];
101
-
64
ptb->haddr2 = NULL;
102
- data_regl = *args++;
65
ptb->mem_only = mem_only;
103
- data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
66
104
- addr_regl = *args++;
67
@@ -XXX,XX +XXX,XX @@ void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db)
105
- addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
68
* Note that we skip this when haddr1 == NULL, e.g. when we're
106
- oi = *args++;
69
* fetching instructions from a region not backed by RAM.
107
- opc = get_memop(oi);
108
- a_bits = get_alignment_bits(opc);
109
- s_bits = opc & MO_SIZE;
110
+ MemOp opc = get_memop(oi);
111
+ unsigned a_bits = get_alignment_bits(opc);
112
+ unsigned s_bits = opc & MO_SIZE;
113
+ TCGReg base;
114
115
/*
116
* R6 removes the left/right instructions but requires the
117
* system to support misaligned memory accesses.
70
*/
118
*/
71
- if (likely(ptb->haddr1 != NULL && ptb->vaddr2 == -1) &&
119
#if defined(CONFIG_SOFTMMU)
72
- unlikely((db->pc_next & TARGET_PAGE_MASK) !=
120
- tcg_out_tlb_load(s, base, addr_regl, addr_regh, oi, label_ptr, 1);
73
- (db->pc_first & TARGET_PAGE_MASK))) {
121
+ tcg_insn_unit *label_ptr[2];
74
- get_page_addr_code_hostp(cpu->env_ptr, db->pc_next,
122
+
75
- &ptb->haddr2);
123
+ base = TCG_REG_A0;
76
- ptb->vaddr2 = db->pc_next;
124
+ tcg_out_tlb_load(s, base, addrlo, addrhi, oi, label_ptr, 1);
77
- }
125
if (use_mips32r6_instructions || a_bits >= s_bits) {
78
- if (likely(ptb->vaddr2 == -1)) {
126
- tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
79
+ if (ptb->haddr1 == NULL) {
127
+ tcg_out_qemu_ld_direct(s, datalo, datahi, base, opc, data_type);
80
+ pinsn->haddr = NULL;
81
+ } else if (is_same_page(db, db->pc_next)) {
82
pinsn->haddr = ptb->haddr1 + pinsn->vaddr - ptb->vaddr;
83
} else {
128
} else {
84
+ if (ptb->vaddr2 == -1) {
129
- tcg_out_qemu_ld_unalign(s, data_regl, data_regh, base, opc, is_64);
85
+ ptb->vaddr2 = TARGET_PAGE_ALIGN(db->pc_first);
130
+ tcg_out_qemu_ld_unalign(s, datalo, datahi, base, opc, data_type);
86
+ get_page_addr_code_hostp(cpu->env_ptr, ptb->vaddr2, &ptb->haddr2);
131
}
87
+ }
132
- add_qemu_ldst_label(s, 1, oi,
88
pinsn->haddr = ptb->haddr2 + pinsn->vaddr - ptb->vaddr2;
133
- (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
134
- data_regl, data_regh, addr_regl, addr_regh,
135
- s->code_ptr, label_ptr);
136
+ add_qemu_ldst_label(s, true, oi, data_type, datalo, datahi,
137
+ addrlo, addrhi, s->code_ptr, label_ptr);
138
#else
139
+ base = addrlo;
140
if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
141
- tcg_out_ext32u(s, base, addr_regl);
142
- addr_regl = base;
143
+ tcg_out_ext32u(s, TCG_REG_A0, base);
144
+ base = TCG_REG_A0;
145
}
146
- if (guest_base == 0 && data_regl != addr_regl) {
147
- base = addr_regl;
148
- } else if (guest_base == (int16_t)guest_base) {
149
- tcg_out_opc_imm(s, ALIAS_PADDI, base, addr_regl, guest_base);
150
- } else {
151
- tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_GUEST_BASE_REG, addr_regl);
152
+ if (guest_base) {
153
+ if (guest_base == (int16_t)guest_base) {
154
+ tcg_out_opc_imm(s, ALIAS_PADDI, TCG_REG_A0, base, guest_base);
155
+ } else {
156
+ tcg_out_opc_reg(s, ALIAS_PADD, TCG_REG_A0, base,
157
+ TCG_GUEST_BASE_REG);
158
+ }
159
+ base = TCG_REG_A0;
160
}
161
if (use_mips32r6_instructions) {
162
if (a_bits) {
163
- tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits);
164
+ tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
165
}
166
- tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
167
+ tcg_out_qemu_ld_direct(s, datalo, datahi, base, opc, data_type);
168
} else {
169
if (a_bits && a_bits != s_bits) {
170
- tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits);
171
+ tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
172
}
173
if (a_bits >= s_bits) {
174
- tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
175
+ tcg_out_qemu_ld_direct(s, datalo, datahi, base, opc, data_type);
176
} else {
177
- tcg_out_qemu_ld_unalign(s, data_regl, data_regh, base, opc, is_64);
178
+ tcg_out_qemu_ld_unalign(s, datalo, datahi, base, opc, data_type);
179
}
180
}
181
#endif
182
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
183
g_assert_not_reached();
89
}
184
}
90
}
185
}
91
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
186
-static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
92
index XXXXXXX..XXXXXXX 100644
187
-{
93
--- a/accel/tcg/translator.c
188
- TCGReg addr_regl, addr_regh __attribute__((unused));
94
+++ b/accel/tcg/translator.c
189
- TCGReg data_regl, data_regh;
95
@@ -XXX,XX +XXX,XX @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns,
190
- MemOpIdx oi;
96
ops->tb_start(db, cpu);
191
- MemOp opc;
97
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
192
-#if defined(CONFIG_SOFTMMU)
98
193
- tcg_insn_unit *label_ptr[2];
99
- plugin_enabled = plugin_gen_tb_start(cpu, tb, cflags & CF_MEMI_ONLY);
194
-#endif
100
+ plugin_enabled = plugin_gen_tb_start(cpu, db, cflags & CF_MEMI_ONLY);
195
- unsigned a_bits, s_bits;
101
196
- TCGReg base = TCG_REG_A0;
102
while (true) {
197
103
db->num_insns++;
198
- data_regl = *args++;
199
- data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
200
- addr_regl = *args++;
201
- addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
202
- oi = *args++;
203
- opc = get_memop(oi);
204
- a_bits = get_alignment_bits(opc);
205
- s_bits = opc & MO_SIZE;
206
+static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
207
+ TCGReg addrlo, TCGReg addrhi,
208
+ MemOpIdx oi, TCGType data_type)
209
+{
210
+ MemOp opc = get_memop(oi);
211
+ unsigned a_bits = get_alignment_bits(opc);
212
+ unsigned s_bits = opc & MO_SIZE;
213
+ TCGReg base;
214
215
/*
216
* R6 removes the left/right instructions but requires the
217
* system to support misaligned memory accesses.
218
*/
219
#if defined(CONFIG_SOFTMMU)
220
- tcg_out_tlb_load(s, base, addr_regl, addr_regh, oi, label_ptr, 0);
221
+ tcg_insn_unit *label_ptr[2];
222
+
223
+ base = TCG_REG_A0;
224
+ tcg_out_tlb_load(s, base, addrlo, addrhi, oi, label_ptr, 0);
225
if (use_mips32r6_instructions || a_bits >= s_bits) {
226
- tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
227
+ tcg_out_qemu_st_direct(s, datalo, datahi, base, opc);
228
} else {
229
- tcg_out_qemu_st_unalign(s, data_regl, data_regh, base, opc);
230
+ tcg_out_qemu_st_unalign(s, datalo, datahi, base, opc);
231
}
232
- add_qemu_ldst_label(s, 0, oi,
233
- (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
234
- data_regl, data_regh, addr_regl, addr_regh,
235
- s->code_ptr, label_ptr);
236
+ add_qemu_ldst_label(s, false, oi, data_type, datalo, datahi,
237
+ addrlo, addrhi, s->code_ptr, label_ptr);
238
#else
239
+ base = addrlo;
240
if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
241
- tcg_out_ext32u(s, base, addr_regl);
242
- addr_regl = base;
243
+ tcg_out_ext32u(s, TCG_REG_A0, base);
244
+ base = TCG_REG_A0;
245
}
246
- if (guest_base == 0) {
247
- base = addr_regl;
248
- } else if (guest_base == (int16_t)guest_base) {
249
- tcg_out_opc_imm(s, ALIAS_PADDI, base, addr_regl, guest_base);
250
- } else {
251
- tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_GUEST_BASE_REG, addr_regl);
252
+ if (guest_base) {
253
+ if (guest_base == (int16_t)guest_base) {
254
+ tcg_out_opc_imm(s, ALIAS_PADDI, TCG_REG_A0, base, guest_base);
255
+ } else {
256
+ tcg_out_opc_reg(s, ALIAS_PADD, TCG_REG_A0, base,
257
+ TCG_GUEST_BASE_REG);
258
+ }
259
+ base = TCG_REG_A0;
260
}
261
if (use_mips32r6_instructions) {
262
if (a_bits) {
263
- tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits);
264
+ tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
265
}
266
- tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
267
+ tcg_out_qemu_st_direct(s, datalo, datahi, base, opc);
268
} else {
269
if (a_bits && a_bits != s_bits) {
270
- tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits);
271
+ tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
272
}
273
if (a_bits >= s_bits) {
274
- tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
275
+ tcg_out_qemu_st_direct(s, datalo, datahi, base, opc);
276
} else {
277
- tcg_out_qemu_st_unalign(s, data_regl, data_regh, base, opc);
278
+ tcg_out_qemu_st_unalign(s, datalo, datahi, base, opc);
279
}
280
}
281
#endif
282
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
283
break;
284
285
case INDEX_op_qemu_ld_i32:
286
- tcg_out_qemu_ld(s, args, false);
287
+ if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
288
+ tcg_out_qemu_ld(s, a0, 0, a1, 0, a2, TCG_TYPE_I32);
289
+ } else {
290
+ tcg_out_qemu_ld(s, a0, 0, a1, a2, args[3], TCG_TYPE_I32);
291
+ }
292
break;
293
case INDEX_op_qemu_ld_i64:
294
- tcg_out_qemu_ld(s, args, true);
295
+ if (TCG_TARGET_REG_BITS == 64) {
296
+ tcg_out_qemu_ld(s, a0, 0, a1, 0, a2, TCG_TYPE_I64);
297
+ } else if (TARGET_LONG_BITS == 32) {
298
+ tcg_out_qemu_ld(s, a0, a1, a2, 0, args[3], TCG_TYPE_I64);
299
+ } else {
300
+ tcg_out_qemu_ld(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64);
301
+ }
302
break;
303
case INDEX_op_qemu_st_i32:
304
- tcg_out_qemu_st(s, args, false);
305
+ if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
306
+ tcg_out_qemu_st(s, a0, 0, a1, 0, a2, TCG_TYPE_I32);
307
+ } else {
308
+ tcg_out_qemu_st(s, a0, 0, a1, a2, args[3], TCG_TYPE_I32);
309
+ }
310
break;
311
case INDEX_op_qemu_st_i64:
312
- tcg_out_qemu_st(s, args, true);
313
+ if (TCG_TARGET_REG_BITS == 64) {
314
+ tcg_out_qemu_st(s, a0, 0, a1, 0, a2, TCG_TYPE_I64);
315
+ } else if (TARGET_LONG_BITS == 32) {
316
+ tcg_out_qemu_st(s, a0, a1, a2, 0, args[3], TCG_TYPE_I64);
317
+ } else {
318
+ tcg_out_qemu_st(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64);
319
+ }
320
break;
321
322
case INDEX_op_add2_i32:
104
--
323
--
105
2.34.1
324
2.34.1
106
325
107
326
diff view generated by jsdifflib
1
From: Leandro Lupori <leandro.lupori@eldorado.org.br>
1
Interpret the variable argument placement in the caller. Pass data_type
2
2
instead of is64 -- there are several places where we already convert back
3
PowerPC64 processors handle direct branches better than indirect
3
from bool to type. Clean things up by using type throughout.
4
ones, resulting in less stalled cycles and branch misses.
4
5
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
However, PPC's tb_target_set_jmp_target() was only using direct
6
Reviewed-by: Daniel Henrique Barboza <danielhb413@gmail.com>
7
branches for 16-bit jumps, while PowerPC64's unconditional branch
8
instructions are able to handle displacements of up to 26 bits.
9
To take advantage of this, now jumps whose displacements fit in
10
between 17 and 26 bits are also converted to direct branches.
11
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Signed-off-by: Leandro Lupori <leandro.lupori@eldorado.org.br>
14
[rth: Expanded some commentary.]
15
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
16
---
8
---
17
tcg/ppc/tcg-target.c.inc | 119 +++++++++++++++++++++++++++++----------
9
tcg/ppc/tcg-target.c.inc | 110 +++++++++++++++++++++------------------
18
1 file changed, 88 insertions(+), 31 deletions(-)
10
1 file changed, 59 insertions(+), 51 deletions(-)
19
11
20
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
12
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
21
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
22
--- a/tcg/ppc/tcg-target.c.inc
14
--- a/tcg/ppc/tcg-target.c.inc
23
+++ b/tcg/ppc/tcg-target.c.inc
15
+++ b/tcg/ppc/tcg-target.c.inc
24
@@ -XXX,XX +XXX,XX @@ static void tcg_out_mb(TCGContext *s, TCGArg a0)
16
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_read(TCGContext *s, MemOp opc,
25
tcg_out32(s, insn);
17
/* Record the context of a call to the out of line helper code for the slow
18
path for a load or store, so that we can later generate the correct
19
helper code. */
20
-static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi,
21
+static void add_qemu_ldst_label(TCGContext *s, bool is_ld,
22
+ TCGType type, MemOpIdx oi,
23
TCGReg datalo_reg, TCGReg datahi_reg,
24
TCGReg addrlo_reg, TCGReg addrhi_reg,
25
tcg_insn_unit *raddr, tcg_insn_unit *lptr)
26
@@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi,
27
TCGLabelQemuLdst *label = new_ldst_label(s);
28
29
label->is_ld = is_ld;
30
+ label->type = type;
31
label->oi = oi;
32
label->datalo_reg = datalo_reg;
33
label->datahi_reg = datahi_reg;
34
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
35
36
#endif /* SOFTMMU */
37
38
-static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
39
+static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
40
+ TCGReg addrlo, TCGReg addrhi,
41
+ MemOpIdx oi, TCGType data_type)
42
{
43
- TCGReg datalo, datahi, addrlo, rbase;
44
- TCGReg addrhi __attribute__((unused));
45
- MemOpIdx oi;
46
- MemOp opc, s_bits;
47
+ MemOp opc = get_memop(oi);
48
+ MemOp s_bits = opc & MO_SIZE;
49
+ TCGReg rbase;
50
+
51
#ifdef CONFIG_SOFTMMU
52
- int mem_index;
53
tcg_insn_unit *label_ptr;
54
-#else
55
- unsigned a_bits;
56
-#endif
57
58
- datalo = *args++;
59
- datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
60
- addrlo = *args++;
61
- addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
62
- oi = *args++;
63
- opc = get_memop(oi);
64
- s_bits = opc & MO_SIZE;
65
-
66
-#ifdef CONFIG_SOFTMMU
67
- mem_index = get_mmuidx(oi);
68
- addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, mem_index, true);
69
+ addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, get_mmuidx(oi), true);
70
71
/* Load a pointer into the current opcode w/conditional branch-link. */
72
label_ptr = s->code_ptr;
73
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
74
75
rbase = TCG_REG_R3;
76
#else /* !CONFIG_SOFTMMU */
77
- a_bits = get_alignment_bits(opc);
78
+ unsigned a_bits = get_alignment_bits(opc);
79
if (a_bits) {
80
tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
81
}
82
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
83
}
84
85
#ifdef CONFIG_SOFTMMU
86
- add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi,
87
- s->code_ptr, label_ptr);
88
+ add_qemu_ldst_label(s, true, data_type, oi, datalo, datahi,
89
+ addrlo, addrhi, s->code_ptr, label_ptr);
90
#endif
26
}
91
}
27
92
28
+static inline uint64_t make_pair(tcg_insn_unit i1, tcg_insn_unit i2)
93
-static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
29
+{
94
+static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
30
+ if (HOST_BIG_ENDIAN) {
95
+ TCGReg addrlo, TCGReg addrhi,
31
+ return (uint64_t)i1 << 32 | i2;
96
+ MemOpIdx oi, TCGType data_type)
32
+ }
97
{
33
+ return (uint64_t)i2 << 32 | i1;
98
- TCGReg datalo, datahi, addrlo, rbase;
34
+}
99
- TCGReg addrhi __attribute__((unused));
100
- MemOpIdx oi;
101
- MemOp opc, s_bits;
102
+ MemOp opc = get_memop(oi);
103
+ MemOp s_bits = opc & MO_SIZE;
104
+ TCGReg rbase;
35
+
105
+
36
+static inline void ppc64_replace2(uintptr_t rx, uintptr_t rw,
106
#ifdef CONFIG_SOFTMMU
37
+ tcg_insn_unit i0, tcg_insn_unit i1)
107
- int mem_index;
38
+{
108
tcg_insn_unit *label_ptr;
39
+#if TCG_TARGET_REG_BITS == 64
40
+ qatomic_set((uint64_t *)rw, make_pair(i0, i1));
41
+ flush_idcache_range(rx, rw, 8);
42
+#else
43
+ qemu_build_not_reached();
44
+#endif
45
+}
46
+
47
+static inline void ppc64_replace4(uintptr_t rx, uintptr_t rw,
48
+ tcg_insn_unit i0, tcg_insn_unit i1,
49
+ tcg_insn_unit i2, tcg_insn_unit i3)
50
+{
51
+ uint64_t p[2];
52
+
53
+ p[!HOST_BIG_ENDIAN] = make_pair(i0, i1);
54
+ p[HOST_BIG_ENDIAN] = make_pair(i2, i3);
55
+
56
+ /*
57
+ * There's no convenient way to get the compiler to allocate a pair
58
+ * of registers at an even index, so copy into r6/r7 and clobber.
59
+ */
60
+ asm("mr %%r6, %1\n\t"
61
+ "mr %%r7, %2\n\t"
62
+ "stq %%r6, %0"
63
+ : "=Q"(*(__int128 *)rw) : "r"(p[0]), "r"(p[1]) : "r6", "r7");
64
+ flush_idcache_range(rx, rw, 16);
65
+}
66
+
67
void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
68
uintptr_t jmp_rw, uintptr_t addr)
69
{
70
- if (TCG_TARGET_REG_BITS == 64) {
71
- tcg_insn_unit i1, i2;
72
- intptr_t tb_diff = addr - tc_ptr;
73
- intptr_t br_diff = addr - (jmp_rx + 4);
74
- uint64_t pair;
75
+ tcg_insn_unit i0, i1, i2, i3;
76
+ intptr_t tb_diff = addr - tc_ptr;
77
+ intptr_t br_diff = addr - (jmp_rx + 4);
78
+ intptr_t lo, hi;
79
80
- /* This does not exercise the range of the branch, but we do
81
- still need to be able to load the new value of TCG_REG_TB.
82
- But this does still happen quite often. */
83
- if (tb_diff == (int16_t)tb_diff) {
84
- i1 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, tb_diff);
85
- i2 = B | (br_diff & 0x3fffffc);
86
- } else {
87
- intptr_t lo = (int16_t)tb_diff;
88
- intptr_t hi = (int32_t)(tb_diff - lo);
89
- assert(tb_diff == hi + lo);
90
- i1 = ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, hi >> 16);
91
- i2 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, lo);
92
- }
93
-#if HOST_BIG_ENDIAN
94
- pair = (uint64_t)i1 << 32 | i2;
95
-#else
109
-#else
96
- pair = (uint64_t)i2 << 32 | i1;
110
- unsigned a_bits;
97
-#endif
111
-#endif
112
113
- datalo = *args++;
114
- datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
115
- addrlo = *args++;
116
- addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
117
- oi = *args++;
118
- opc = get_memop(oi);
119
- s_bits = opc & MO_SIZE;
98
-
120
-
99
- /* As per the enclosing if, this is ppc64. Avoid the _Static_assert
121
-#ifdef CONFIG_SOFTMMU
100
- within qatomic_set that would fail to build a ppc32 host. */
122
- mem_index = get_mmuidx(oi);
101
- qatomic_set__nocheck((uint64_t *)jmp_rw, pair);
123
- addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, mem_index, false);
102
- flush_idcache_range(jmp_rx, jmp_rw, 8);
124
+ addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, get_mmuidx(oi), false);
103
- } else {
125
104
+ if (TCG_TARGET_REG_BITS == 32) {
126
/* Load a pointer into the current opcode w/conditional branch-link. */
105
intptr_t diff = addr - jmp_rx;
127
label_ptr = s->code_ptr;
106
tcg_debug_assert(in_range_b(diff));
128
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
107
qatomic_set((uint32_t *)jmp_rw, B | (diff & 0x3fffffc));
129
108
flush_idcache_range(jmp_rx, jmp_rw, 4);
130
rbase = TCG_REG_R3;
109
+ return;
131
#else /* !CONFIG_SOFTMMU */
110
}
132
- a_bits = get_alignment_bits(opc);
111
+
133
+ unsigned a_bits = get_alignment_bits(opc);
112
+ /*
134
if (a_bits) {
113
+ * For 16-bit displacements, we can use a single add + branch.
135
tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits);
114
+ * This happens quite often.
136
}
115
+ */
137
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
116
+ if (tb_diff == (int16_t)tb_diff) {
138
}
117
+ i0 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, tb_diff);
139
118
+ i1 = B | (br_diff & 0x3fffffc);
140
#ifdef CONFIG_SOFTMMU
119
+ ppc64_replace2(jmp_rx, jmp_rw, i0, i1);
141
- add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi,
120
+ return;
142
- s->code_ptr, label_ptr);
121
+ }
143
+ add_qemu_ldst_label(s, false, data_type, oi, datalo, datahi,
122
+
144
+ addrlo, addrhi, s->code_ptr, label_ptr);
123
+ lo = (int16_t)tb_diff;
145
#endif
124
+ hi = (int32_t)(tb_diff - lo);
125
+ assert(tb_diff == hi + lo);
126
+ i0 = ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, hi >> 16);
127
+ i1 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, lo);
128
+
129
+ /*
130
+ * Without stq from 2.07, we can only update two insns,
131
+ * and those must be the ones that load the target address.
132
+ */
133
+ if (!have_isa_2_07) {
134
+ ppc64_replace2(jmp_rx, jmp_rw, i0, i1);
135
+ return;
136
+ }
137
+
138
+ /*
139
+ * For 26-bit displacements, we can use a direct branch.
140
+ * Otherwise we still need the indirect branch, which we
141
+ * must restore after a potential direct branch write.
142
+ */
143
+ br_diff -= 4;
144
+ if (in_range_b(br_diff)) {
145
+ i2 = B | (br_diff & 0x3fffffc);
146
+ i3 = NOP;
147
+ } else {
148
+ i2 = MTSPR | RS(TCG_REG_TB) | CTR;
149
+ i3 = BCCTR | BO_ALWAYS;
150
+ }
151
+ ppc64_replace4(jmp_rx, jmp_rw, i0, i1, i2, i3);
152
}
146
}
153
147
154
static void tcg_out_call_int(TCGContext *s, int lk,
155
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
148
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
156
if (s->tb_jmp_insn_offset) {
149
break;
157
/* Direct jump. */
150
158
if (TCG_TARGET_REG_BITS == 64) {
151
case INDEX_op_qemu_ld_i32:
159
- /* Ensure the next insns are 8-byte aligned. */
152
- tcg_out_qemu_ld(s, args, false);
160
- if ((uintptr_t)s->code_ptr & 7) {
153
+ if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
161
+ /* Ensure the next insns are 8 or 16-byte aligned. */
154
+ tcg_out_qemu_ld(s, args[0], -1, args[1], -1,
162
+ while ((uintptr_t)s->code_ptr & (have_isa_2_07 ? 15 : 7)) {
155
+ args[2], TCG_TYPE_I32);
163
tcg_out32(s, NOP);
156
+ } else {
164
}
157
+ tcg_out_qemu_ld(s, args[0], -1, args[1], args[2],
165
s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
158
+ args[3], TCG_TYPE_I32);
159
+ }
160
break;
161
case INDEX_op_qemu_ld_i64:
162
- tcg_out_qemu_ld(s, args, true);
163
+ if (TCG_TARGET_REG_BITS == 64) {
164
+ tcg_out_qemu_ld(s, args[0], -1, args[1], -1,
165
+ args[2], TCG_TYPE_I64);
166
+ } else if (TARGET_LONG_BITS == 32) {
167
+ tcg_out_qemu_ld(s, args[0], args[1], args[2], -1,
168
+ args[3], TCG_TYPE_I64);
169
+ } else {
170
+ tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3],
171
+ args[4], TCG_TYPE_I64);
172
+ }
173
break;
174
case INDEX_op_qemu_st_i32:
175
- tcg_out_qemu_st(s, args, false);
176
+ if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
177
+ tcg_out_qemu_st(s, args[0], -1, args[1], -1,
178
+ args[2], TCG_TYPE_I32);
179
+ } else {
180
+ tcg_out_qemu_st(s, args[0], -1, args[1], args[2],
181
+ args[3], TCG_TYPE_I32);
182
+ }
183
break;
184
case INDEX_op_qemu_st_i64:
185
- tcg_out_qemu_st(s, args, true);
186
+ if (TCG_TARGET_REG_BITS == 64) {
187
+ tcg_out_qemu_st(s, args[0], -1, args[1], -1,
188
+ args[2], TCG_TYPE_I64);
189
+ } else if (TARGET_LONG_BITS == 32) {
190
+ tcg_out_qemu_st(s, args[0], args[1], args[2], -1,
191
+ args[3], TCG_TYPE_I64);
192
+ } else {
193
+ tcg_out_qemu_st(s, args[0], args[1], args[2], args[3],
194
+ args[4], TCG_TYPE_I64);
195
+ }
196
break;
197
198
case INDEX_op_setcond_i32:
166
--
199
--
167
2.34.1
200
2.34.1
201
202
diff view generated by jsdifflib
1
This bitmap is created and discarded immediately.
1
Collect the parts of the host address into a struct.
2
We gain nothing by its existence.
2
Reorg tcg_out_qemu_{ld,st} to use it.
3
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-Id: <20220822232338.1727934-2-richard.henderson@linaro.org>
7
---
6
---
8
accel/tcg/translate-all.c | 78 ++-------------------------------------
7
tcg/ppc/tcg-target.c.inc | 90 +++++++++++++++++++++-------------------
9
1 file changed, 4 insertions(+), 74 deletions(-)
8
1 file changed, 47 insertions(+), 43 deletions(-)
10
9
11
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
10
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
12
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/translate-all.c
12
--- a/tcg/ppc/tcg-target.c.inc
14
+++ b/accel/tcg/translate-all.c
13
+++ b/tcg/ppc/tcg-target.c.inc
15
@@ -XXX,XX +XXX,XX @@
14
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
16
#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
15
{
16
return tcg_out_fail_alignment(s, l);
17
}
18
-
19
#endif /* SOFTMMU */
20
21
+typedef struct {
22
+ TCGReg base;
23
+ TCGReg index;
24
+} HostAddress;
25
+
26
static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
27
TCGReg addrlo, TCGReg addrhi,
28
MemOpIdx oi, TCGType data_type)
29
{
30
MemOp opc = get_memop(oi);
31
MemOp s_bits = opc & MO_SIZE;
32
- TCGReg rbase;
33
+ HostAddress h;
34
35
#ifdef CONFIG_SOFTMMU
36
tcg_insn_unit *label_ptr;
37
38
- addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, get_mmuidx(oi), true);
39
+ h.index = tcg_out_tlb_read(s, opc, addrlo, addrhi, get_mmuidx(oi), true);
40
+ h.base = TCG_REG_R3;
41
42
/* Load a pointer into the current opcode w/conditional branch-link. */
43
label_ptr = s->code_ptr;
44
tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
45
-
46
- rbase = TCG_REG_R3;
47
#else /* !CONFIG_SOFTMMU */
48
unsigned a_bits = get_alignment_bits(opc);
49
if (a_bits) {
50
tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
51
}
52
- rbase = guest_base ? TCG_GUEST_BASE_REG : 0;
53
+ h.base = guest_base ? TCG_GUEST_BASE_REG : 0;
54
+ h.index = addrlo;
55
if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
56
tcg_out_ext32u(s, TCG_REG_TMP1, addrlo);
57
- addrlo = TCG_REG_TMP1;
58
+ h.index = TCG_REG_TMP1;
59
}
17
#endif
60
#endif
18
61
19
-#define SMC_BITMAP_USE_THRESHOLD 10
62
if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) {
20
-
63
if (opc & MO_BSWAP) {
21
typedef struct PageDesc {
64
- tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
22
/* list of TBs intersecting this ram page */
65
- tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo));
23
uintptr_t first_tb;
66
- tcg_out32(s, LWBRX | TAB(datahi, rbase, TCG_REG_R0));
24
-#ifdef CONFIG_SOFTMMU
67
- } else if (rbase != 0) {
25
- /* in order to optimize self modifying code, we count the number
68
- tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
26
- of lookups we do to a given page to use a bitmap */
69
- tcg_out32(s, LWZX | TAB(datahi, rbase, addrlo));
27
- unsigned long *code_bitmap;
70
- tcg_out32(s, LWZX | TAB(datalo, rbase, TCG_REG_R0));
28
- unsigned int code_write_count;
71
- } else if (addrlo == datahi) {
29
-#else
72
- tcg_out32(s, LWZ | TAI(datalo, addrlo, 4));
30
+#ifdef CONFIG_USER_ONLY
73
- tcg_out32(s, LWZ | TAI(datahi, addrlo, 0));
31
unsigned long flags;
74
+ tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
32
void *target_data;
75
+ tcg_out32(s, LWBRX | TAB(datalo, h.base, h.index));
33
#endif
76
+ tcg_out32(s, LWBRX | TAB(datahi, h.base, TCG_REG_R0));
34
-#ifndef CONFIG_USER_ONLY
77
+ } else if (h.base != 0) {
35
+#ifdef CONFIG_SOFTMMU
78
+ tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
36
QemuSpin lock;
79
+ tcg_out32(s, LWZX | TAB(datahi, h.base, h.index));
37
#endif
80
+ tcg_out32(s, LWZX | TAB(datalo, h.base, TCG_REG_R0));
38
} PageDesc;
81
+ } else if (h.index == datahi) {
39
@@ -XXX,XX +XXX,XX @@ void tb_htable_init(void)
82
+ tcg_out32(s, LWZ | TAI(datalo, h.index, 4));
40
qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode);
83
+ tcg_out32(s, LWZ | TAI(datahi, h.index, 0));
41
}
84
} else {
42
85
- tcg_out32(s, LWZ | TAI(datahi, addrlo, 0));
43
-/* call with @p->lock held */
86
- tcg_out32(s, LWZ | TAI(datalo, addrlo, 4));
44
-static inline void invalidate_page_bitmap(PageDesc *p)
87
+ tcg_out32(s, LWZ | TAI(datahi, h.index, 0));
45
-{
88
+ tcg_out32(s, LWZ | TAI(datalo, h.index, 4));
46
- assert_page_locked(p);
47
-#ifdef CONFIG_SOFTMMU
48
- g_free(p->code_bitmap);
49
- p->code_bitmap = NULL;
50
- p->code_write_count = 0;
51
-#endif
52
-}
53
-
54
/* Set to NULL all the 'first_tb' fields in all PageDescs. */
55
static void page_flush_tb_1(int level, void **lp)
56
{
57
@@ -XXX,XX +XXX,XX @@ static void page_flush_tb_1(int level, void **lp)
58
for (i = 0; i < V_L2_SIZE; ++i) {
59
page_lock(&pd[i]);
60
pd[i].first_tb = (uintptr_t)NULL;
61
- invalidate_page_bitmap(pd + i);
62
page_unlock(&pd[i]);
63
}
89
}
64
} else {
90
} else {
65
@@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
91
uint32_t insn = qemu_ldx_opc[opc & (MO_BSWAP | MO_SSIZE)];
66
if (rm_from_page_list) {
92
if (!have_isa_2_06 && insn == LDBRX) {
67
p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
93
- tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
68
tb_page_remove(p, tb);
94
- tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo));
69
- invalidate_page_bitmap(p);
95
- tcg_out32(s, LWBRX | TAB(TCG_REG_R0, rbase, TCG_REG_R0));
70
if (tb->page_addr[1] != -1) {
96
+ tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
71
p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
97
+ tcg_out32(s, LWBRX | TAB(datalo, h.base, h.index));
72
tb_page_remove(p, tb);
98
+ tcg_out32(s, LWBRX | TAB(TCG_REG_R0, h.base, TCG_REG_R0));
73
- invalidate_page_bitmap(p);
99
tcg_out_rld(s, RLDIMI, datalo, TCG_REG_R0, 32, 0);
100
} else if (insn) {
101
- tcg_out32(s, insn | TAB(datalo, rbase, addrlo));
102
+ tcg_out32(s, insn | TAB(datalo, h.base, h.index));
103
} else {
104
insn = qemu_ldx_opc[opc & (MO_SIZE | MO_BSWAP)];
105
- tcg_out32(s, insn | TAB(datalo, rbase, addrlo));
106
+ tcg_out32(s, insn | TAB(datalo, h.base, h.index));
107
tcg_out_movext(s, TCG_TYPE_REG, datalo,
108
TCG_TYPE_REG, opc & MO_SSIZE, datalo);
109
}
110
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
111
{
112
MemOp opc = get_memop(oi);
113
MemOp s_bits = opc & MO_SIZE;
114
- TCGReg rbase;
115
+ HostAddress h;
116
117
#ifdef CONFIG_SOFTMMU
118
tcg_insn_unit *label_ptr;
119
120
- addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, get_mmuidx(oi), false);
121
+ h.index = tcg_out_tlb_read(s, opc, addrlo, addrhi, get_mmuidx(oi), false);
122
+ h.base = TCG_REG_R3;
123
124
/* Load a pointer into the current opcode w/conditional branch-link. */
125
label_ptr = s->code_ptr;
126
tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
127
-
128
- rbase = TCG_REG_R3;
129
#else /* !CONFIG_SOFTMMU */
130
unsigned a_bits = get_alignment_bits(opc);
131
if (a_bits) {
132
tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits);
133
}
134
- rbase = guest_base ? TCG_GUEST_BASE_REG : 0;
135
+ h.base = guest_base ? TCG_GUEST_BASE_REG : 0;
136
+ h.index = addrlo;
137
if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
138
tcg_out_ext32u(s, TCG_REG_TMP1, addrlo);
139
- addrlo = TCG_REG_TMP1;
140
+ h.index = TCG_REG_TMP1;
141
}
142
#endif
143
144
if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) {
145
if (opc & MO_BSWAP) {
146
- tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
147
- tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo));
148
- tcg_out32(s, STWBRX | SAB(datahi, rbase, TCG_REG_R0));
149
- } else if (rbase != 0) {
150
- tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
151
- tcg_out32(s, STWX | SAB(datahi, rbase, addrlo));
152
- tcg_out32(s, STWX | SAB(datalo, rbase, TCG_REG_R0));
153
+ tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
154
+ tcg_out32(s, STWBRX | SAB(datalo, h.base, h.index));
155
+ tcg_out32(s, STWBRX | SAB(datahi, h.base, TCG_REG_R0));
156
+ } else if (h.base != 0) {
157
+ tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
158
+ tcg_out32(s, STWX | SAB(datahi, h.base, h.index));
159
+ tcg_out32(s, STWX | SAB(datalo, h.base, TCG_REG_R0));
160
} else {
161
- tcg_out32(s, STW | TAI(datahi, addrlo, 0));
162
- tcg_out32(s, STW | TAI(datalo, addrlo, 4));
163
+ tcg_out32(s, STW | TAI(datahi, h.index, 0));
164
+ tcg_out32(s, STW | TAI(datalo, h.index, 4));
165
}
166
} else {
167
uint32_t insn = qemu_stx_opc[opc & (MO_BSWAP | MO_SIZE)];
168
if (!have_isa_2_06 && insn == STDBRX) {
169
- tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo));
170
- tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, addrlo, 4));
171
+ tcg_out32(s, STWBRX | SAB(datalo, h.base, h.index));
172
+ tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, h.index, 4));
173
tcg_out_shri64(s, TCG_REG_R0, datalo, 32);
174
- tcg_out32(s, STWBRX | SAB(TCG_REG_R0, rbase, TCG_REG_TMP1));
175
+ tcg_out32(s, STWBRX | SAB(TCG_REG_R0, h.base, TCG_REG_TMP1));
176
} else {
177
- tcg_out32(s, insn | SAB(datalo, rbase, addrlo));
178
+ tcg_out32(s, insn | SAB(datalo, h.base, h.index));
74
}
179
}
75
}
180
}
76
181
77
@@ -XXX,XX +XXX,XX @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
78
}
79
}
80
81
-#ifdef CONFIG_SOFTMMU
82
-/* call with @p->lock held */
83
-static void build_page_bitmap(PageDesc *p)
84
-{
85
- int n, tb_start, tb_end;
86
- TranslationBlock *tb;
87
-
88
- assert_page_locked(p);
89
- p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
90
-
91
- PAGE_FOR_EACH_TB(p, tb, n) {
92
- /* NOTE: this is subtle as a TB may span two physical pages */
93
- if (n == 0) {
94
- /* NOTE: tb_end may be after the end of the page, but
95
- it is not a problem */
96
- tb_start = tb->pc & ~TARGET_PAGE_MASK;
97
- tb_end = tb_start + tb->size;
98
- if (tb_end > TARGET_PAGE_SIZE) {
99
- tb_end = TARGET_PAGE_SIZE;
100
- }
101
- } else {
102
- tb_start = 0;
103
- tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
104
- }
105
- bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
106
- }
107
-}
108
-#endif
109
-
110
/* add the tb in the target page and protect it if necessary
111
*
112
* Called with mmap_lock held for user-mode emulation.
113
@@ -XXX,XX +XXX,XX @@ static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
114
page_already_protected = p->first_tb != (uintptr_t)NULL;
115
#endif
116
p->first_tb = (uintptr_t)tb | n;
117
- invalidate_page_bitmap(p);
118
119
#if defined(CONFIG_USER_ONLY)
120
/* translator_loop() must have made all TB pages non-writable */
121
@@ -XXX,XX +XXX,XX @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
122
/* remove TB from the page(s) if we couldn't insert it */
123
if (unlikely(existing_tb)) {
124
tb_page_remove(p, tb);
125
- invalidate_page_bitmap(p);
126
if (p2) {
127
tb_page_remove(p2, tb);
128
- invalidate_page_bitmap(p2);
129
}
130
tb = existing_tb;
131
}
132
@@ -XXX,XX +XXX,XX @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
133
#if !defined(CONFIG_USER_ONLY)
134
/* if no code remaining, no need to continue to use slow writes */
135
if (!p->first_tb) {
136
- invalidate_page_bitmap(p);
137
tlb_unprotect_code(start);
138
}
139
#endif
140
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_page_fast(struct page_collection *pages,
141
}
142
143
assert_page_locked(p);
144
- if (!p->code_bitmap &&
145
- ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
146
- build_page_bitmap(p);
147
- }
148
- if (p->code_bitmap) {
149
- unsigned int nr;
150
- unsigned long b;
151
-
152
- nr = start & ~TARGET_PAGE_MASK;
153
- b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
154
- if (b & ((1 << len) - 1)) {
155
- goto do_invalidate;
156
- }
157
- } else {
158
- do_invalidate:
159
- tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
160
- retaddr);
161
- }
162
+ tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
163
+ retaddr);
164
}
165
#else
166
/* Called with mmap_lock held. If pc is not 0 then it indicates the
167
--
182
--
168
2.34.1
183
2.34.1
169
184
170
185
diff view generated by jsdifflib
1
Wrap the bare TranslationBlock pointer into a structure.
1
The port currently does not support "oversize" guests, which
2
means riscv32 can only target 32-bit guests. We will soon be
3
building TCG once for all guests. This implies that we can
4
only support riscv64.
2
5
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Since all Linux distributions target riscv64 not riscv32,
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
this is not much of a restriction and simplifies the code.
8
9
The brcond2 and setcond2 opcodes are exclusive to 32-bit hosts,
10
so we can and should remove the stubs.
11
12
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
13
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
15
---
7
accel/tcg/tb-hash.h | 1 +
16
tcg/riscv/tcg-target-con-set.h | 8 --
8
accel/tcg/tb-jmp-cache.h | 24 ++++++++++++++++++++++++
17
tcg/riscv/tcg-target.h | 22 ++--
9
include/exec/cpu-common.h | 1 +
18
tcg/riscv/tcg-target.c.inc | 232 +++++++++------------------------
10
include/hw/core/cpu.h | 15 +--------------
19
3 files changed, 72 insertions(+), 190 deletions(-)
11
include/qemu/typedefs.h | 1 +
12
accel/stubs/tcg-stub.c | 4 ++++
13
accel/tcg/cpu-exec.c | 10 +++++++---
14
accel/tcg/cputlb.c | 9 +++++----
15
accel/tcg/translate-all.c | 28 +++++++++++++++++++++++++---
16
hw/core/cpu-common.c | 3 +--
17
plugins/core.c | 2 +-
18
trace/control-target.c | 2 +-
19
12 files changed, 72 insertions(+), 28 deletions(-)
20
create mode 100644 accel/tcg/tb-jmp-cache.h
21
20
22
diff --git a/accel/tcg/tb-hash.h b/accel/tcg/tb-hash.h
21
diff --git a/tcg/riscv/tcg-target-con-set.h b/tcg/riscv/tcg-target-con-set.h
23
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
24
--- a/accel/tcg/tb-hash.h
23
--- a/tcg/riscv/tcg-target-con-set.h
25
+++ b/accel/tcg/tb-hash.h
24
+++ b/tcg/riscv/tcg-target-con-set.h
25
@@ -XXX,XX +XXX,XX @@ C_O0_I1(r)
26
C_O0_I2(LZ, L)
27
C_O0_I2(rZ, r)
28
C_O0_I2(rZ, rZ)
29
-C_O0_I3(LZ, L, L)
30
-C_O0_I3(LZ, LZ, L)
31
-C_O0_I4(LZ, LZ, L, L)
32
-C_O0_I4(rZ, rZ, rZ, rZ)
33
C_O1_I1(r, L)
34
C_O1_I1(r, r)
35
-C_O1_I2(r, L, L)
36
C_O1_I2(r, r, ri)
37
C_O1_I2(r, r, rI)
38
C_O1_I2(r, rZ, rN)
39
C_O1_I2(r, rZ, rZ)
40
-C_O1_I4(r, rZ, rZ, rZ, rZ)
41
-C_O2_I1(r, r, L)
42
-C_O2_I2(r, r, L, L)
43
C_O2_I4(r, r, rZ, rZ, rM, rM)
44
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
45
index XXXXXXX..XXXXXXX 100644
46
--- a/tcg/riscv/tcg-target.h
47
+++ b/tcg/riscv/tcg-target.h
26
@@ -XXX,XX +XXX,XX @@
48
@@ -XXX,XX +XXX,XX @@
27
#include "exec/cpu-defs.h"
49
#ifndef RISCV_TCG_TARGET_H
28
#include "exec/exec-all.h"
50
#define RISCV_TCG_TARGET_H
29
#include "qemu/xxhash.h"
51
30
+#include "tb-jmp-cache.h"
52
-#if __riscv_xlen == 32
31
53
-# define TCG_TARGET_REG_BITS 32
32
#ifdef CONFIG_SOFTMMU
54
-#elif __riscv_xlen == 64
33
55
-# define TCG_TARGET_REG_BITS 64
34
diff --git a/accel/tcg/tb-jmp-cache.h b/accel/tcg/tb-jmp-cache.h
35
new file mode 100644
36
index XXXXXXX..XXXXXXX
37
--- /dev/null
38
+++ b/accel/tcg/tb-jmp-cache.h
39
@@ -XXX,XX +XXX,XX @@
40
+/*
56
+/*
41
+ * The per-CPU TranslationBlock jump cache.
57
+ * We don't support oversize guests.
42
+ *
58
+ * Since we will only build tcg once, this in turn requires a 64-bit host.
43
+ * Copyright (c) 2003 Fabrice Bellard
44
+ *
45
+ * SPDX-License-Identifier: GPL-2.0-or-later
46
+ */
59
+ */
47
+
60
+#if __riscv_xlen != 64
48
+#ifndef ACCEL_TCG_TB_JMP_CACHE_H
61
+#error "unsupported code generation mode"
49
+#define ACCEL_TCG_TB_JMP_CACHE_H
62
#endif
50
+
63
+#define TCG_TARGET_REG_BITS 64
51
+#define TB_JMP_CACHE_BITS 12
64
52
+#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
65
#define TCG_TARGET_INSN_UNIT_SIZE 4
53
+
66
#define TCG_TARGET_TLB_DISPLACEMENT_BITS 20
54
+/*
67
@@ -XXX,XX +XXX,XX @@ typedef enum {
55
+ * Accessed in parallel; all accesses to 'tb' must be atomic.
68
#define TCG_TARGET_STACK_ALIGN 16
56
+ */
69
#define TCG_TARGET_CALL_STACK_OFFSET 0
57
+struct CPUJumpCache {
70
#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL
58
+ struct {
71
-#if TCG_TARGET_REG_BITS == 32
59
+ TranslationBlock *tb;
72
-#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN
60
+ } array[TB_JMP_CACHE_SIZE];
73
-#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN
61
+};
74
-#else
62
+
75
#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
63
+#endif /* ACCEL_TCG_TB_JMP_CACHE_H */
76
#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
64
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
77
-#endif
78
#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
79
80
/* optional instructions */
81
@@ -XXX,XX +XXX,XX @@ typedef enum {
82
#define TCG_TARGET_HAS_sub2_i32 1
83
#define TCG_TARGET_HAS_mulu2_i32 0
84
#define TCG_TARGET_HAS_muls2_i32 0
85
-#define TCG_TARGET_HAS_muluh_i32 (TCG_TARGET_REG_BITS == 32)
86
-#define TCG_TARGET_HAS_mulsh_i32 (TCG_TARGET_REG_BITS == 32)
87
+#define TCG_TARGET_HAS_muluh_i32 0
88
+#define TCG_TARGET_HAS_mulsh_i32 0
89
#define TCG_TARGET_HAS_ext8s_i32 1
90
#define TCG_TARGET_HAS_ext16s_i32 1
91
#define TCG_TARGET_HAS_ext8u_i32 1
92
@@ -XXX,XX +XXX,XX @@ typedef enum {
93
#define TCG_TARGET_HAS_setcond2 1
94
#define TCG_TARGET_HAS_qemu_st8_i32 0
95
96
-#if TCG_TARGET_REG_BITS == 64
97
#define TCG_TARGET_HAS_movcond_i64 0
98
#define TCG_TARGET_HAS_div_i64 1
99
#define TCG_TARGET_HAS_rem_i64 1
100
@@ -XXX,XX +XXX,XX @@ typedef enum {
101
#define TCG_TARGET_HAS_muls2_i64 0
102
#define TCG_TARGET_HAS_muluh_i64 1
103
#define TCG_TARGET_HAS_mulsh_i64 1
104
-#endif
105
106
#define TCG_TARGET_DEFAULT_MO (0)
107
108
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
65
index XXXXXXX..XXXXXXX 100644
109
index XXXXXXX..XXXXXXX 100644
66
--- a/include/exec/cpu-common.h
110
--- a/tcg/riscv/tcg-target.c.inc
67
+++ b/include/exec/cpu-common.h
111
+++ b/tcg/riscv/tcg-target.c.inc
68
@@ -XXX,XX +XXX,XX @@ void cpu_list_unlock(void);
112
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
69
unsigned int cpu_list_generation_id_get(void);
113
#define SOFTMMU_RESERVE_REGS 0
70
114
#endif
71
void tcg_flush_softmmu_tlb(CPUState *cs);
115
72
+void tcg_flush_jmp_cache(CPUState *cs);
116
-
73
117
-static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len)
74
void tcg_iommu_init_notifier_list(CPUState *cpu);
75
void tcg_iommu_free_notifier_list(CPUState *cpu);
76
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
77
index XXXXXXX..XXXXXXX 100644
78
--- a/include/hw/core/cpu.h
79
+++ b/include/hw/core/cpu.h
80
@@ -XXX,XX +XXX,XX @@ struct kvm_run;
81
struct hax_vcpu_state;
82
struct hvf_vcpu_state;
83
84
-#define TB_JMP_CACHE_BITS 12
85
-#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
86
-
87
/* work queue */
88
89
/* The union type allows passing of 64 bit target pointers on 32 bit
90
@@ -XXX,XX +XXX,XX @@ struct CPUState {
91
CPUArchState *env_ptr;
92
IcountDecr *icount_decr_ptr;
93
94
- /* Accessed in parallel; all accesses must be atomic */
95
- TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
96
+ CPUJumpCache *tb_jmp_cache;
97
98
struct GDBRegisterState *gdb_regs;
99
int gdb_num_regs;
100
@@ -XXX,XX +XXX,XX @@ extern CPUTailQ cpus;
101
102
extern __thread CPUState *current_cpu;
103
104
-static inline void cpu_tb_jmp_cache_clear(CPUState *cpu)
105
-{
118
-{
106
- unsigned int i;
119
- if (TCG_TARGET_REG_BITS == 32) {
107
-
120
- return sextract32(val, pos, len);
108
- for (i = 0; i < TB_JMP_CACHE_SIZE; i++) {
121
- } else {
109
- qatomic_set(&cpu->tb_jmp_cache[i], NULL);
122
- return sextract64(val, pos, len);
110
- }
123
- }
111
-}
124
-}
112
-
125
+#define sextreg sextract64
113
/**
126
114
* qemu_tcg_mttcg_enabled:
127
/* test if a constant matches the constraint */
115
* Check whether we are running MultiThread TCG or not.
128
static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
116
diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h
129
@@ -XXX,XX +XXX,XX @@ typedef enum {
117
index XXXXXXX..XXXXXXX 100644
130
OPC_XOR = 0x4033,
118
--- a/include/qemu/typedefs.h
131
OPC_XORI = 0x4013,
119
+++ b/include/qemu/typedefs.h
132
120
@@ -XXX,XX +XXX,XX @@ typedef struct CoMutex CoMutex;
133
-#if TCG_TARGET_REG_BITS == 64
121
typedef struct ConfidentialGuestSupport ConfidentialGuestSupport;
134
OPC_ADDIW = 0x1b,
122
typedef struct CPUAddressSpace CPUAddressSpace;
135
OPC_ADDW = 0x3b,
123
typedef struct CPUArchState CPUArchState;
136
OPC_DIVUW = 0x200503b,
124
+typedef struct CPUJumpCache CPUJumpCache;
137
@@ -XXX,XX +XXX,XX @@ typedef enum {
125
typedef struct CPUState CPUState;
138
OPC_SRLIW = 0x501b,
126
typedef struct CPUTLBEntryFull CPUTLBEntryFull;
139
OPC_SRLW = 0x503b,
127
typedef struct DeviceListener DeviceListener;
140
OPC_SUBW = 0x4000003b,
128
diff --git a/accel/stubs/tcg-stub.c b/accel/stubs/tcg-stub.c
141
-#else
129
index XXXXXXX..XXXXXXX 100644
142
- /* Simplify code throughout by defining aliases for RV32. */
130
--- a/accel/stubs/tcg-stub.c
143
- OPC_ADDIW = OPC_ADDI,
131
+++ b/accel/stubs/tcg-stub.c
144
- OPC_ADDW = OPC_ADD,
132
@@ -XXX,XX +XXX,XX @@ void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
145
- OPC_DIVUW = OPC_DIVU,
133
{
146
- OPC_DIVW = OPC_DIV,
134
}
147
- OPC_MULW = OPC_MUL,
135
148
- OPC_REMUW = OPC_REMU,
136
+void tcg_flush_jmp_cache(CPUState *cpu)
149
- OPC_REMW = OPC_REM,
137
+{
150
- OPC_SLLIW = OPC_SLLI,
138
+}
151
- OPC_SLLW = OPC_SLL,
139
+
152
- OPC_SRAIW = OPC_SRAI,
140
int probe_access_flags(CPUArchState *env, target_ulong addr,
153
- OPC_SRAW = OPC_SRA,
141
MMUAccessType access_type, int mmu_idx,
154
- OPC_SRLIW = OPC_SRLI,
142
bool nonfault, void **phost, uintptr_t retaddr)
155
- OPC_SRLW = OPC_SRL,
143
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
156
- OPC_SUBW = OPC_SUB,
144
index XXXXXXX..XXXXXXX 100644
157
-#endif
145
--- a/accel/tcg/cpu-exec.c
158
146
+++ b/accel/tcg/cpu-exec.c
159
OPC_FENCE = 0x0000000f,
147
@@ -XXX,XX +XXX,XX @@
160
OPC_NOP = OPC_ADDI, /* nop = addi r0,r0,0 */
148
#include "sysemu/replay.h"
161
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
149
#include "sysemu/tcg.h"
162
tcg_target_long lo, hi, tmp;
150
#include "exec/helper-proto.h"
163
int shift, ret;
151
+#include "tb-jmp-cache.h"
164
152
#include "tb-hash.h"
165
- if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
153
#include "tb-context.h"
166
+ if (type == TCG_TYPE_I32) {
154
#include "internal.h"
167
val = (int32_t)val;
155
@@ -XXX,XX +XXX,XX @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
168
}
156
tcg_debug_assert(!(cflags & CF_INVALID));
169
157
170
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
158
hash = tb_jmp_cache_hash_func(pc);
171
}
159
- tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]);
172
160
+ tb = qatomic_rcu_read(&cpu->tb_jmp_cache->array[hash].tb);
173
hi = val - lo;
161
174
- if (TCG_TARGET_REG_BITS == 32 || val == (int32_t)val) {
162
if (likely(tb &&
175
+ if (val == (int32_t)val) {
163
tb->pc == pc &&
176
tcg_out_opc_upper(s, OPC_LUI, rd, hi);
164
@@ -XXX,XX +XXX,XX @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
177
if (lo != 0) {
165
if (tb == NULL) {
178
tcg_out_opc_imm(s, OPC_ADDIW, rd, rd, lo);
166
return NULL;
179
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
167
}
168
- qatomic_set(&cpu->tb_jmp_cache[hash], tb);
169
+ qatomic_set(&cpu->tb_jmp_cache->array[hash].tb, tb);
170
return tb;
171
}
172
173
@@ -XXX,XX +XXX,XX @@ int cpu_exec(CPUState *cpu)
174
175
tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
176
if (tb == NULL) {
177
+ uint32_t h;
178
+
179
mmap_lock();
180
tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
181
mmap_unlock();
182
@@ -XXX,XX +XXX,XX @@ int cpu_exec(CPUState *cpu)
183
* We add the TB in the virtual pc hash table
184
* for the fast lookup
185
*/
186
- qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
187
+ h = tb_jmp_cache_hash_func(pc);
188
+ qatomic_set(&cpu->tb_jmp_cache->array[h].tb, tb);
189
}
190
191
#ifndef CONFIG_USER_ONLY
192
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
193
index XXXXXXX..XXXXXXX 100644
194
--- a/accel/tcg/cputlb.c
195
+++ b/accel/tcg/cputlb.c
196
@@ -XXX,XX +XXX,XX @@ static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
197
198
static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
199
{
200
- unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
201
+ int i, i0 = tb_jmp_cache_hash_page(page_addr);
202
+ CPUJumpCache *jc = cpu->tb_jmp_cache;
203
204
for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
205
- qatomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
206
+ qatomic_set(&jc->array[i0 + i].tb, NULL);
207
}
208
}
209
210
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
211
212
qemu_spin_unlock(&env_tlb(env)->c.lock);
213
214
- cpu_tb_jmp_cache_clear(cpu);
215
+ tcg_flush_jmp_cache(cpu);
216
217
if (to_clean == ALL_MMUIDX_BITS) {
218
qatomic_set(&env_tlb(env)->c.full_flush_count,
219
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
220
* longer to clear each entry individually than it will to clear it all.
221
*/
222
if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) {
223
- cpu_tb_jmp_cache_clear(cpu);
224
+ tcg_flush_jmp_cache(cpu);
225
return;
180
return;
226
}
181
}
227
182
228
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
183
- /* We can only be here if TCG_TARGET_REG_BITS != 32 */
229
index XXXXXXX..XXXXXXX 100644
184
tmp = tcg_pcrel_diff(s, (void *)val);
230
--- a/accel/tcg/translate-all.c
185
if (tmp == (int32_t)tmp) {
231
+++ b/accel/tcg/translate-all.c
186
tcg_out_opc_upper(s, OPC_AUIPC, rd, 0);
232
@@ -XXX,XX +XXX,XX @@
187
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ldst(TCGContext *s, RISCVInsn opc, TCGReg data,
233
#include "sysemu/tcg.h"
188
static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
234
#include "qapi/error.h"
189
TCGReg arg1, intptr_t arg2)
235
#include "hw/core/tcg-cpu-ops.h"
190
{
236
+#include "tb-jmp-cache.h"
191
- bool is32bit = (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32);
237
#include "tb-hash.h"
192
- tcg_out_ldst(s, is32bit ? OPC_LW : OPC_LD, arg, arg1, arg2);
238
#include "tb-context.h"
193
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_LW : OPC_LD;
239
#include "internal.h"
194
+ tcg_out_ldst(s, insn, arg, arg1, arg2);
240
@@ -XXX,XX +XXX,XX @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
195
}
241
}
196
242
197
static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
243
CPU_FOREACH(cpu) {
198
TCGReg arg1, intptr_t arg2)
244
- cpu_tb_jmp_cache_clear(cpu);
199
{
245
+ tcg_flush_jmp_cache(cpu);
200
- bool is32bit = (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32);
246
}
201
- tcg_out_ldst(s, is32bit ? OPC_SW : OPC_SD, arg, arg1, arg2);
247
202
+ RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SW : OPC_SD;
248
qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
203
+ tcg_out_ldst(s, insn, arg, arg1, arg2);
249
@@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
204
}
250
/* remove the TB from the hash list */
205
251
h = tb_jmp_cache_hash_func(tb->pc);
206
static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
252
CPU_FOREACH(cpu) {
207
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
253
- if (qatomic_read(&cpu->tb_jmp_cache[h]) == tb) {
208
}
254
- qatomic_set(&cpu->tb_jmp_cache[h], NULL);
209
}
255
+ CPUJumpCache *jc = cpu->tb_jmp_cache;
210
256
+ if (qatomic_read(&jc->array[h].tb) == tb) {
211
-static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah,
257
+ qatomic_set(&jc->array[h].tb, NULL);
212
- TCGReg bl, TCGReg bh, TCGLabel *l)
213
-{
214
- /* todo */
215
- g_assert_not_reached();
216
-}
217
-
218
-static void tcg_out_setcond2(TCGContext *s, TCGCond cond, TCGReg ret,
219
- TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh)
220
-{
221
- /* todo */
222
- g_assert_not_reached();
223
-}
224
-
225
static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
226
{
227
TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA;
228
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
229
if (offset == sextreg(offset, 0, 20)) {
230
/* short jump: -2097150 to 2097152 */
231
tcg_out_opc_jump(s, OPC_JAL, link, offset);
232
- } else if (TCG_TARGET_REG_BITS == 32 || offset == (int32_t)offset) {
233
+ } else if (offset == (int32_t)offset) {
234
/* long jump: -2147483646 to 2147483648 */
235
tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP0, 0);
236
tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, 0);
237
ret = reloc_call(s->code_ptr - 2, arg);
238
tcg_debug_assert(ret == true);
239
- } else if (TCG_TARGET_REG_BITS == 64) {
240
+ } else {
241
/* far jump: 64-bit */
242
tcg_target_long imm = sextreg((tcg_target_long)arg, 0, 12);
243
tcg_target_long base = (tcg_target_long)arg - imm;
244
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, base);
245
tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, imm);
246
- } else {
247
- g_assert_not_reached();
248
}
249
}
250
251
@@ -XXX,XX +XXX,XX @@ static void * const qemu_st_helpers[MO_SIZE + 1] = {
252
#endif
253
};
254
255
-/* We don't support oversize guests */
256
-QEMU_BUILD_BUG_ON(TCG_TARGET_REG_BITS < TARGET_LONG_BITS);
257
-
258
/* We expect to use a 12-bit negative offset from ENV. */
259
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
260
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11));
261
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
262
tcg_debug_assert(ok);
263
}
264
265
-static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addrl,
266
- TCGReg addrh, MemOpIdx oi,
267
+static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, MemOpIdx oi,
268
tcg_insn_unit **label_ptr, bool is_load)
269
{
270
MemOp opc = get_memop(oi);
271
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addrl,
272
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, mask_base, mask_ofs);
273
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, table_base, table_ofs);
274
275
- tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addrl,
276
+ tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addr,
277
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
278
tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
279
tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
280
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addrl,
281
/* Clear the non-page, non-alignment bits from the address. */
282
compare_mask = (tcg_target_long)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
283
if (compare_mask == sextreg(compare_mask, 0, 12)) {
284
- tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addrl, compare_mask);
285
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr, compare_mask);
286
} else {
287
tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask);
288
- tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addrl);
289
+ tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addr);
290
}
291
292
/* Compare masked address with the TLB entry. */
293
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addrl,
294
tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0);
295
296
/* TLB Hit - translate address using addend. */
297
- if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
298
- tcg_out_ext32u(s, TCG_REG_TMP0, addrl);
299
- addrl = TCG_REG_TMP0;
300
+ if (TARGET_LONG_BITS == 32) {
301
+ tcg_out_ext32u(s, TCG_REG_TMP0, addr);
302
+ addr = TCG_REG_TMP0;
303
}
304
- tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addrl);
305
+ tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addr);
306
return TCG_REG_TMP0;
307
}
308
309
static void add_qemu_ldst_label(TCGContext *s, int is_ld, MemOpIdx oi,
310
- TCGType ext,
311
- TCGReg datalo, TCGReg datahi,
312
- TCGReg addrlo, TCGReg addrhi,
313
- void *raddr, tcg_insn_unit **label_ptr)
314
+ TCGType data_type, TCGReg data_reg,
315
+ TCGReg addr_reg, void *raddr,
316
+ tcg_insn_unit **label_ptr)
317
{
318
TCGLabelQemuLdst *label = new_ldst_label(s);
319
320
label->is_ld = is_ld;
321
label->oi = oi;
322
- label->type = ext;
323
- label->datalo_reg = datalo;
324
- label->datahi_reg = datahi;
325
- label->addrlo_reg = addrlo;
326
- label->addrhi_reg = addrhi;
327
+ label->type = data_type;
328
+ label->datalo_reg = data_reg;
329
+ label->addrlo_reg = addr_reg;
330
label->raddr = tcg_splitwx_to_rx(raddr);
331
label->label_ptr[0] = label_ptr[0];
332
}
333
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
334
TCGReg a2 = tcg_target_call_iarg_regs[2];
335
TCGReg a3 = tcg_target_call_iarg_regs[3];
336
337
- /* We don't support oversize guests */
338
- if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
339
- g_assert_not_reached();
340
- }
341
-
342
/* resolve label address */
343
if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
344
return false;
345
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
346
TCGReg a3 = tcg_target_call_iarg_regs[3];
347
TCGReg a4 = tcg_target_call_iarg_regs[4];
348
349
- /* We don't support oversize guests */
350
- if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
351
- g_assert_not_reached();
352
- }
353
-
354
/* resolve label address */
355
if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
356
return false;
357
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
358
359
#endif /* CONFIG_SOFTMMU */
360
361
-static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
362
+static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg val,
363
TCGReg base, MemOp opc, bool is_64)
364
{
365
/* Byte swapping is left to middle-end expansion. */
366
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
367
368
switch (opc & (MO_SSIZE)) {
369
case MO_UB:
370
- tcg_out_opc_imm(s, OPC_LBU, lo, base, 0);
371
+ tcg_out_opc_imm(s, OPC_LBU, val, base, 0);
372
break;
373
case MO_SB:
374
- tcg_out_opc_imm(s, OPC_LB, lo, base, 0);
375
+ tcg_out_opc_imm(s, OPC_LB, val, base, 0);
376
break;
377
case MO_UW:
378
- tcg_out_opc_imm(s, OPC_LHU, lo, base, 0);
379
+ tcg_out_opc_imm(s, OPC_LHU, val, base, 0);
380
break;
381
case MO_SW:
382
- tcg_out_opc_imm(s, OPC_LH, lo, base, 0);
383
+ tcg_out_opc_imm(s, OPC_LH, val, base, 0);
384
break;
385
case MO_UL:
386
- if (TCG_TARGET_REG_BITS == 64 && is_64) {
387
- tcg_out_opc_imm(s, OPC_LWU, lo, base, 0);
388
+ if (is_64) {
389
+ tcg_out_opc_imm(s, OPC_LWU, val, base, 0);
390
break;
258
}
391
}
259
}
392
/* FALLTHRU */
260
393
case MO_SL:
261
@@ -XXX,XX +XXX,XX @@ int page_unprotect(target_ulong address, uintptr_t pc)
394
- tcg_out_opc_imm(s, OPC_LW, lo, base, 0);
262
}
395
+ tcg_out_opc_imm(s, OPC_LW, val, base, 0);
263
#endif /* CONFIG_USER_ONLY */
396
break;
264
397
case MO_UQ:
265
+/*
398
- /* Prefer to load from offset 0 first, but allow for overlap. */
266
+ * Called by generic code at e.g. cpu reset after cpu creation,
399
- if (TCG_TARGET_REG_BITS == 64) {
267
+ * therefore we must be prepared to allocate the jump cache.
400
- tcg_out_opc_imm(s, OPC_LD, lo, base, 0);
268
+ */
401
- } else if (lo != base) {
269
+void tcg_flush_jmp_cache(CPUState *cpu)
402
- tcg_out_opc_imm(s, OPC_LW, lo, base, 0);
270
+{
403
- tcg_out_opc_imm(s, OPC_LW, hi, base, 4);
271
+ CPUJumpCache *jc = cpu->tb_jmp_cache;
404
- } else {
272
+
405
- tcg_out_opc_imm(s, OPC_LW, hi, base, 4);
273
+ if (likely(jc)) {
406
- tcg_out_opc_imm(s, OPC_LW, lo, base, 0);
274
+ for (int i = 0; i < TB_JMP_CACHE_SIZE; i++) {
407
- }
275
+ qatomic_set(&jc->array[i].tb, NULL);
408
+ tcg_out_opc_imm(s, OPC_LD, val, base, 0);
276
+ }
409
break;
277
+ } else {
410
default:
278
+ /* This should happen once during realize, and thus never race. */
411
g_assert_not_reached();
279
+ jc = g_new0(CPUJumpCache, 1);
412
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
280
+ jc = qatomic_xchg(&cpu->tb_jmp_cache, jc);
413
281
+ assert(jc == NULL);
414
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
282
+ }
415
{
283
+}
416
- TCGReg addr_regl, addr_regh __attribute__((unused));
284
+
417
- TCGReg data_regl, data_regh;
285
/* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
418
+ TCGReg addr_reg, data_reg;
286
void tcg_flush_softmmu_tlb(CPUState *cs)
419
MemOpIdx oi;
287
{
420
MemOp opc;
288
diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c
421
#if defined(CONFIG_SOFTMMU)
289
index XXXXXXX..XXXXXXX 100644
422
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
290
--- a/hw/core/cpu-common.c
423
#endif
291
+++ b/hw/core/cpu-common.c
424
TCGReg base;
292
@@ -XXX,XX +XXX,XX @@ static void cpu_common_reset(DeviceState *dev)
425
293
cpu->cflags_next_tb = -1;
426
- data_regl = *args++;
294
427
- data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
295
if (tcg_enabled()) {
428
- addr_regl = *args++;
296
- cpu_tb_jmp_cache_clear(cpu);
429
- addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
297
-
430
+ data_reg = *args++;
298
+ tcg_flush_jmp_cache(cpu);
431
+ addr_reg = *args++;
299
tcg_flush_softmmu_tlb(cpu);
432
oi = *args++;
300
}
433
opc = get_memop(oi);
301
}
434
302
diff --git a/plugins/core.c b/plugins/core.c
435
#if defined(CONFIG_SOFTMMU)
303
index XXXXXXX..XXXXXXX 100644
436
- base = tcg_out_tlb_load(s, addr_regl, addr_regh, oi, label_ptr, 1);
304
--- a/plugins/core.c
437
- tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
305
+++ b/plugins/core.c
438
- add_qemu_ldst_label(s, 1, oi,
306
@@ -XXX,XX +XXX,XX @@ struct qemu_plugin_ctx *plugin_id_to_ctx_locked(qemu_plugin_id_t id)
439
- (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
307
static void plugin_cpu_update__async(CPUState *cpu, run_on_cpu_data data)
440
- data_regl, data_regh, addr_regl, addr_regh,
308
{
441
- s->code_ptr, label_ptr);
309
bitmap_copy(cpu->plugin_mask, &data.host_ulong, QEMU_PLUGIN_EV_MAX);
442
+ base = tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 1);
310
- cpu_tb_jmp_cache_clear(cpu);
443
+ tcg_out_qemu_ld_direct(s, data_reg, base, opc, is_64);
311
+ tcg_flush_jmp_cache(cpu);
444
+ add_qemu_ldst_label(s, 1, oi, (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
312
}
445
+ data_reg, addr_reg, s->code_ptr, label_ptr);
313
446
#else
314
static void plugin_cpu_update__locked(gpointer k, gpointer v, gpointer udata)
447
a_bits = get_alignment_bits(opc);
315
diff --git a/trace/control-target.c b/trace/control-target.c
448
if (a_bits) {
316
index XXXXXXX..XXXXXXX 100644
449
- tcg_out_test_alignment(s, true, addr_regl, a_bits);
317
--- a/trace/control-target.c
450
+ tcg_out_test_alignment(s, true, addr_reg, a_bits);
318
+++ b/trace/control-target.c
451
}
319
@@ -XXX,XX +XXX,XX @@ static void trace_event_synchronize_vcpu_state_dynamic(
452
- base = addr_regl;
320
{
453
- if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
321
bitmap_copy(vcpu->trace_dstate, vcpu->trace_dstate_delayed,
454
+ base = addr_reg;
322
CPU_TRACE_DSTATE_MAX_EVENTS);
455
+ if (TARGET_LONG_BITS == 32) {
323
- cpu_tb_jmp_cache_clear(vcpu);
456
tcg_out_ext32u(s, TCG_REG_TMP0, base);
324
+ tcg_flush_jmp_cache(vcpu);
457
base = TCG_REG_TMP0;
325
}
458
}
326
459
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
327
void trace_event_set_vcpu_state_dynamic(CPUState *vcpu,
460
tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_GUEST_BASE_REG, base);
461
base = TCG_REG_TMP0;
462
}
463
- tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
464
+ tcg_out_qemu_ld_direct(s, data_reg, base, opc, is_64);
465
#endif
466
}
467
468
-static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi,
469
+static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg val,
470
TCGReg base, MemOp opc)
471
{
472
/* Byte swapping is left to middle-end expansion. */
473
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi,
474
475
switch (opc & (MO_SSIZE)) {
476
case MO_8:
477
- tcg_out_opc_store(s, OPC_SB, base, lo, 0);
478
+ tcg_out_opc_store(s, OPC_SB, base, val, 0);
479
break;
480
case MO_16:
481
- tcg_out_opc_store(s, OPC_SH, base, lo, 0);
482
+ tcg_out_opc_store(s, OPC_SH, base, val, 0);
483
break;
484
case MO_32:
485
- tcg_out_opc_store(s, OPC_SW, base, lo, 0);
486
+ tcg_out_opc_store(s, OPC_SW, base, val, 0);
487
break;
488
case MO_64:
489
- if (TCG_TARGET_REG_BITS == 64) {
490
- tcg_out_opc_store(s, OPC_SD, base, lo, 0);
491
- } else {
492
- tcg_out_opc_store(s, OPC_SW, base, lo, 0);
493
- tcg_out_opc_store(s, OPC_SW, base, hi, 4);
494
- }
495
+ tcg_out_opc_store(s, OPC_SD, base, val, 0);
496
break;
497
default:
498
g_assert_not_reached();
499
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi,
500
501
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
502
{
503
- TCGReg addr_regl, addr_regh __attribute__((unused));
504
- TCGReg data_regl, data_regh;
505
+ TCGReg addr_reg, data_reg;
506
MemOpIdx oi;
507
MemOp opc;
508
#if defined(CONFIG_SOFTMMU)
509
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
510
#endif
511
TCGReg base;
512
513
- data_regl = *args++;
514
- data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
515
- addr_regl = *args++;
516
- addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
517
+ data_reg = *args++;
518
+ addr_reg = *args++;
519
oi = *args++;
520
opc = get_memop(oi);
521
522
#if defined(CONFIG_SOFTMMU)
523
- base = tcg_out_tlb_load(s, addr_regl, addr_regh, oi, label_ptr, 0);
524
- tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
525
- add_qemu_ldst_label(s, 0, oi,
526
- (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
527
- data_regl, data_regh, addr_regl, addr_regh,
528
- s->code_ptr, label_ptr);
529
+ base = tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 0);
530
+ tcg_out_qemu_st_direct(s, data_reg, base, opc);
531
+ add_qemu_ldst_label(s, 0, oi, (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
532
+ data_reg, addr_reg, s->code_ptr, label_ptr);
533
#else
534
a_bits = get_alignment_bits(opc);
535
if (a_bits) {
536
- tcg_out_test_alignment(s, false, addr_regl, a_bits);
537
+ tcg_out_test_alignment(s, false, addr_reg, a_bits);
538
}
539
- base = addr_regl;
540
- if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
541
+ base = addr_reg;
542
+ if (TARGET_LONG_BITS == 32) {
543
tcg_out_ext32u(s, TCG_REG_TMP0, base);
544
base = TCG_REG_TMP0;
545
}
546
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
547
tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_GUEST_BASE_REG, base);
548
base = TCG_REG_TMP0;
549
}
550
- tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
551
+ tcg_out_qemu_st_direct(s, data_reg, base, opc);
552
#endif
553
}
554
555
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
556
case INDEX_op_brcond_i64:
557
tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
558
break;
559
- case INDEX_op_brcond2_i32:
560
- tcg_out_brcond2(s, args[4], a0, a1, a2, args[3], arg_label(args[5]));
561
- break;
562
563
case INDEX_op_setcond_i32:
564
case INDEX_op_setcond_i64:
565
tcg_out_setcond(s, args[3], a0, a1, a2);
566
break;
567
- case INDEX_op_setcond2_i32:
568
- tcg_out_setcond2(s, args[5], a0, a1, a2, args[3], args[4]);
569
- break;
570
571
case INDEX_op_qemu_ld_i32:
572
tcg_out_qemu_ld(s, args, false);
573
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
574
case INDEX_op_sub2_i64:
575
return C_O2_I4(r, r, rZ, rZ, rM, rM);
576
577
- case INDEX_op_brcond2_i32:
578
- return C_O0_I4(rZ, rZ, rZ, rZ);
579
-
580
- case INDEX_op_setcond2_i32:
581
- return C_O1_I4(r, rZ, rZ, rZ, rZ);
582
-
583
case INDEX_op_qemu_ld_i32:
584
- return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
585
- ? C_O1_I1(r, L) : C_O1_I2(r, L, L));
586
- case INDEX_op_qemu_st_i32:
587
- return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
588
- ? C_O0_I2(LZ, L) : C_O0_I3(LZ, L, L));
589
case INDEX_op_qemu_ld_i64:
590
- return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L)
591
- : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O2_I1(r, r, L)
592
- : C_O2_I2(r, r, L, L));
593
+ return C_O1_I1(r, L);
594
+ case INDEX_op_qemu_st_i32:
595
case INDEX_op_qemu_st_i64:
596
- return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(LZ, L)
597
- : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O0_I3(LZ, LZ, L)
598
- : C_O0_I4(LZ, LZ, L, L));
599
+ return C_O0_I2(LZ, L);
600
601
default:
602
g_assert_not_reached();
603
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
604
static void tcg_target_init(TCGContext *s)
605
{
606
tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff;
607
- if (TCG_TARGET_REG_BITS == 64) {
608
- tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff;
609
- }
610
+ tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff;
611
612
tcg_target_call_clobber_regs = -1u;
613
tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0);
328
--
614
--
329
2.34.1
615
2.34.1
330
616
331
617
diff view generated by jsdifflib
1
From: Alex Bennée <alex.bennee@linaro.org>
1
Interpret the variable argument placement in the caller. Pass data_type
2
instead of is64 -- there are several places where we already convert back
3
from bool to type. Clean things up by using type throughout.
2
4
3
Before: 35.912 s ± 0.168 s
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
After: 35.565 s ± 0.087 s
6
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
5
6
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-Id: <20220811151413.3350684-5-alex.bennee@linaro.org>
9
Signed-off-by: Cédric Le Goater <clg@kaod.org>
10
Message-Id: <20220923084803.498337-5-clg@kaod.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
8
---
13
accel/tcg/cputlb.c | 15 ++++++---------
9
tcg/riscv/tcg-target.c.inc | 66 ++++++++++++++------------------------
14
1 file changed, 6 insertions(+), 9 deletions(-)
10
1 file changed, 24 insertions(+), 42 deletions(-)
15
11
16
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
12
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
17
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
18
--- a/accel/tcg/cputlb.c
14
--- a/tcg/riscv/tcg-target.c.inc
19
+++ b/accel/tcg/cputlb.c
15
+++ b/tcg/riscv/tcg-target.c.inc
20
@@ -XXX,XX +XXX,XX @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr,
16
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
21
static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
17
#endif /* CONFIG_SOFTMMU */
22
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
18
19
static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg val,
20
- TCGReg base, MemOp opc, bool is_64)
21
+ TCGReg base, MemOp opc, TCGType type)
23
{
22
{
24
- CPUClass *cc = CPU_GET_CLASS(cpu);
23
/* Byte swapping is left to middle-end expansion. */
25
bool ok;
24
tcg_debug_assert((opc & MO_BSWAP) == 0);
26
25
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg val,
27
/*
26
tcg_out_opc_imm(s, OPC_LH, val, base, 0);
28
* This is not a probe, so only valid return is success; failure
27
break;
29
* should result in exception + longjmp to the cpu loop.
28
case MO_UL:
30
*/
29
- if (is_64) {
31
- ok = cc->tcg_ops->tlb_fill(cpu, addr, size,
30
+ if (type == TCG_TYPE_I64) {
32
- access_type, mmu_idx, false, retaddr);
31
tcg_out_opc_imm(s, OPC_LWU, val, base, 0);
33
+ ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size,
32
break;
34
+ access_type, mmu_idx, false, retaddr);
33
}
35
assert(ok);
34
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg val,
35
}
36
}
36
}
37
37
38
@@ -XXX,XX +XXX,XX @@ static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
38
-static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
39
MMUAccessType access_type,
39
+static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
40
int mmu_idx, uintptr_t retaddr)
40
+ MemOpIdx oi, TCGType data_type)
41
{
41
{
42
- CPUClass *cc = CPU_GET_CLASS(cpu);
42
- TCGReg addr_reg, data_reg;
43
- MemOpIdx oi;
44
- MemOp opc;
45
-#if defined(CONFIG_SOFTMMU)
46
- tcg_insn_unit *label_ptr[1];
47
-#else
48
- unsigned a_bits;
49
-#endif
50
+ MemOp opc = get_memop(oi);
51
TCGReg base;
52
53
- data_reg = *args++;
54
- addr_reg = *args++;
55
- oi = *args++;
56
- opc = get_memop(oi);
43
-
57
-
44
- cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr);
58
#if defined(CONFIG_SOFTMMU)
45
+ cpu->cc->tcg_ops->do_unaligned_access(cpu, addr, access_type,
59
+ tcg_insn_unit *label_ptr[1];
46
+ mmu_idx, retaddr);
60
+
61
base = tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 1);
62
- tcg_out_qemu_ld_direct(s, data_reg, base, opc, is_64);
63
- add_qemu_ldst_label(s, 1, oi, (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
64
- data_reg, addr_reg, s->code_ptr, label_ptr);
65
+ tcg_out_qemu_ld_direct(s, data_reg, base, opc, data_type);
66
+ add_qemu_ldst_label(s, true, oi, data_type, data_reg, addr_reg,
67
+ s->code_ptr, label_ptr);
68
#else
69
- a_bits = get_alignment_bits(opc);
70
+ unsigned a_bits = get_alignment_bits(opc);
71
if (a_bits) {
72
tcg_out_test_alignment(s, true, addr_reg, a_bits);
73
}
74
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
75
tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_GUEST_BASE_REG, base);
76
base = TCG_REG_TMP0;
77
}
78
- tcg_out_qemu_ld_direct(s, data_reg, base, opc, is_64);
79
+ tcg_out_qemu_ld_direct(s, data_reg, base, opc, data_type);
80
#endif
47
}
81
}
48
82
49
static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
83
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg val,
50
@@ -XXX,XX +XXX,XX @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
84
}
51
if (!tlb_hit_page(tlb_addr, page_addr)) {
85
}
52
if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) {
86
53
CPUState *cs = env_cpu(env);
87
-static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
54
- CPUClass *cc = CPU_GET_CLASS(cs);
88
+static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
55
89
+ MemOpIdx oi, TCGType data_type)
56
- if (!cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type,
90
{
57
- mmu_idx, nonfault, retaddr)) {
91
- TCGReg addr_reg, data_reg;
58
+ if (!cs->cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type,
92
- MemOpIdx oi;
59
+ mmu_idx, nonfault, retaddr)) {
93
- MemOp opc;
60
/* Non-faulting page table read failed. */
94
-#if defined(CONFIG_SOFTMMU)
61
*phost = NULL;
95
- tcg_insn_unit *label_ptr[1];
62
return TLB_INVALID_MASK;
96
-#else
97
- unsigned a_bits;
98
-#endif
99
+ MemOp opc = get_memop(oi);
100
TCGReg base;
101
102
- data_reg = *args++;
103
- addr_reg = *args++;
104
- oi = *args++;
105
- opc = get_memop(oi);
106
-
107
#if defined(CONFIG_SOFTMMU)
108
+ tcg_insn_unit *label_ptr[1];
109
+
110
base = tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 0);
111
tcg_out_qemu_st_direct(s, data_reg, base, opc);
112
- add_qemu_ldst_label(s, 0, oi, (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
113
- data_reg, addr_reg, s->code_ptr, label_ptr);
114
+ add_qemu_ldst_label(s, false, oi, data_type, data_reg, addr_reg,
115
+ s->code_ptr, label_ptr);
116
#else
117
- a_bits = get_alignment_bits(opc);
118
+ unsigned a_bits = get_alignment_bits(opc);
119
if (a_bits) {
120
tcg_out_test_alignment(s, false, addr_reg, a_bits);
121
}
122
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
123
break;
124
125
case INDEX_op_qemu_ld_i32:
126
- tcg_out_qemu_ld(s, args, false);
127
+ tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
128
break;
129
case INDEX_op_qemu_ld_i64:
130
- tcg_out_qemu_ld(s, args, true);
131
+ tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
132
break;
133
case INDEX_op_qemu_st_i32:
134
- tcg_out_qemu_st(s, args, false);
135
+ tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
136
break;
137
case INDEX_op_qemu_st_i64:
138
- tcg_out_qemu_st(s, args, true);
139
+ tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
140
break;
141
142
case INDEX_op_extrh_i64_i32:
63
--
143
--
64
2.34.1
144
2.34.1
65
145
66
146
diff view generated by jsdifflib
1
Now that we have collected all of the page data into
1
We need to set this in TCGLabelQemuLdst, so plumb this
2
CPUTLBEntryFull, provide an interface to record that
2
all the way through from tcg_out_op.
3
all in one go, instead of using 4 arguments. This interface
4
allows CPUTLBEntryFull to be extended without having to
5
change the number of arguments.
6
3
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
6
---
12
include/exec/cpu-defs.h | 14 +++++++++++
7
tcg/s390x/tcg-target.c.inc | 22 ++++++++++++++--------
13
include/exec/exec-all.h | 22 ++++++++++++++++++
8
1 file changed, 14 insertions(+), 8 deletions(-)
14
accel/tcg/cputlb.c | 51 ++++++++++++++++++++++++++---------------
15
3 files changed, 69 insertions(+), 18 deletions(-)
16
9
17
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
10
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
18
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
19
--- a/include/exec/cpu-defs.h
12
--- a/tcg/s390x/tcg-target.c.inc
20
+++ b/include/exec/cpu-defs.h
13
+++ b/tcg/s390x/tcg-target.c.inc
21
@@ -XXX,XX +XXX,XX @@ typedef struct CPUTLBEntryFull {
14
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc,
22
* + the offset within the target MemoryRegion (otherwise)
23
*/
24
hwaddr xlat_section;
25
+
26
+ /*
27
+ * @phys_addr contains the physical address in the address space
28
+ * given by cpu_asidx_from_attrs(cpu, @attrs).
29
+ */
30
+ hwaddr phys_addr;
31
+
32
+ /* @attrs contains the memory transaction attributes for the page. */
33
MemTxAttrs attrs;
34
+
35
+ /* @prot contains the complete protections for the page. */
36
+ uint8_t prot;
37
+
38
+ /* @lg_page_size contains the log2 of the page size. */
39
+ uint8_t lg_page_size;
40
} CPUTLBEntryFull;
41
42
/*
43
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
44
index XXXXXXX..XXXXXXX 100644
45
--- a/include/exec/exec-all.h
46
+++ b/include/exec/exec-all.h
47
@@ -XXX,XX +XXX,XX @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
48
uint16_t idxmap,
49
unsigned bits);
50
51
+/**
52
+ * tlb_set_page_full:
53
+ * @cpu: CPU context
54
+ * @mmu_idx: mmu index of the tlb to modify
55
+ * @vaddr: virtual address of the entry to add
56
+ * @full: the details of the tlb entry
57
+ *
58
+ * Add an entry to @cpu tlb index @mmu_idx. All of the fields of
59
+ * @full must be filled, except for xlat_section, and constitute
60
+ * the complete description of the translated page.
61
+ *
62
+ * This is generally called by the target tlb_fill function after
63
+ * having performed a successful page table walk to find the physical
64
+ * address and attributes for the translation.
65
+ *
66
+ * At most one entry for a given virtual address is permitted. Only a
67
+ * single TARGET_PAGE_SIZE region is mapped; @full->lg_page_size is only
68
+ * used by tlb_flush_page.
69
+ */
70
+void tlb_set_page_full(CPUState *cpu, int mmu_idx, target_ulong vaddr,
71
+ CPUTLBEntryFull *full);
72
+
73
/**
74
* tlb_set_page_with_attrs:
75
* @cpu: CPU to add this TLB entry for
76
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
77
index XXXXXXX..XXXXXXX 100644
78
--- a/accel/tcg/cputlb.c
79
+++ b/accel/tcg/cputlb.c
80
@@ -XXX,XX +XXX,XX @@ static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
81
env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
82
}
15
}
83
16
84
-/* Add a new TLB entry. At most one entry for a given virtual address
17
static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi,
85
+/*
18
- TCGReg data, TCGReg addr,
86
+ * Add a new TLB entry. At most one entry for a given virtual address
19
+ TCGType type, TCGReg data, TCGReg addr,
87
* is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
20
tcg_insn_unit *raddr, tcg_insn_unit *label_ptr)
88
* supplied size is only used by tlb_flush_page.
89
*
90
* Called from TCG-generated code, which is under an RCU read-side
91
* critical section.
92
*/
93
-void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
94
- hwaddr paddr, MemTxAttrs attrs, int prot,
95
- int mmu_idx, target_ulong size)
96
+void tlb_set_page_full(CPUState *cpu, int mmu_idx,
97
+ target_ulong vaddr, CPUTLBEntryFull *full)
98
{
21
{
99
CPUArchState *env = cpu->env_ptr;
22
TCGLabelQemuLdst *label = new_ldst_label(s);
100
CPUTLB *tlb = env_tlb(env);
23
101
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
24
label->is_ld = is_ld;
102
CPUTLBEntry *te, tn;
25
label->oi = oi;
103
hwaddr iotlb, xlat, sz, paddr_page;
26
+ label->type = type;
104
target_ulong vaddr_page;
27
label->datalo_reg = data;
105
- int asidx = cpu_asidx_from_attrs(cpu, attrs);
28
label->addrlo_reg = addr;
106
- int wp_flags;
29
label->raddr = tcg_splitwx_to_rx(raddr);
107
+ int asidx, wp_flags, prot;
30
@@ -XXX,XX +XXX,XX @@ static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
108
bool is_ram, is_romd;
31
#endif /* CONFIG_SOFTMMU */
109
32
110
assert_cpu_is_self(cpu);
33
static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
111
34
- MemOpIdx oi)
112
- if (size <= TARGET_PAGE_SIZE) {
35
+ MemOpIdx oi, TCGType data_type)
113
+ if (full->lg_page_size <= TARGET_PAGE_BITS) {
36
{
114
sz = TARGET_PAGE_SIZE;
37
MemOp opc = get_memop(oi);
115
} else {
38
#ifdef CONFIG_SOFTMMU
116
- tlb_add_large_page(env, mmu_idx, vaddr, size);
39
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
117
- sz = size;
40
118
+ sz = (hwaddr)1 << full->lg_page_size;
41
tcg_out_qemu_ld_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
119
+ tlb_add_large_page(env, mmu_idx, vaddr, sz);
42
120
}
43
- add_qemu_ldst_label(s, 1, oi, data_reg, addr_reg, s->code_ptr, label_ptr);
121
vaddr_page = vaddr & TARGET_PAGE_MASK;
44
+ add_qemu_ldst_label(s, true, oi, data_type, data_reg, addr_reg,
122
- paddr_page = paddr & TARGET_PAGE_MASK;
45
+ s->code_ptr, label_ptr);
123
+ paddr_page = full->phys_addr & TARGET_PAGE_MASK;
46
#else
124
47
TCGReg index_reg;
125
+ prot = full->prot;
48
tcg_target_long disp;
126
+ asidx = cpu_asidx_from_attrs(cpu, full->attrs);
49
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
127
section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
128
- &xlat, &sz, attrs, &prot);
129
+ &xlat, &sz, full->attrs, &prot);
130
assert(sz >= TARGET_PAGE_SIZE);
131
132
tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
133
" prot=%x idx=%d\n",
134
- vaddr, paddr, prot, mmu_idx);
135
+ vaddr, full->phys_addr, prot, mmu_idx);
136
137
address = vaddr_page;
138
- if (size < TARGET_PAGE_SIZE) {
139
+ if (full->lg_page_size < TARGET_PAGE_BITS) {
140
/* Repeat the MMU check and TLB fill on every access. */
141
address |= TLB_INVALID_MASK;
142
}
143
- if (attrs.byte_swap) {
144
+ if (full->attrs.byte_swap) {
145
address |= TLB_BSWAP;
146
}
147
148
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
149
* subtract here is that of the page base, and not the same as the
150
* vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
151
*/
152
+ desc->fulltlb[index] = *full;
153
desc->fulltlb[index].xlat_section = iotlb - vaddr_page;
154
- desc->fulltlb[index].attrs = attrs;
155
+ desc->fulltlb[index].phys_addr = paddr_page;
156
+ desc->fulltlb[index].prot = prot;
157
158
/* Now calculate the new entry */
159
tn.addend = addend - vaddr_page;
160
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
161
qemu_spin_unlock(&tlb->c.lock);
162
}
50
}
163
51
164
-/* Add a new TLB entry, but without specifying the memory
52
static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
165
- * transaction attributes to be used.
53
- MemOpIdx oi)
166
- */
54
+ MemOpIdx oi, TCGType data_type)
167
+void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
55
{
168
+ hwaddr paddr, MemTxAttrs attrs, int prot,
56
MemOp opc = get_memop(oi);
169
+ int mmu_idx, target_ulong size)
57
#ifdef CONFIG_SOFTMMU
170
+{
58
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
171
+ CPUTLBEntryFull full = {
59
172
+ .phys_addr = paddr,
60
tcg_out_qemu_st_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
173
+ .attrs = attrs,
61
174
+ .prot = prot,
62
- add_qemu_ldst_label(s, 0, oi, data_reg, addr_reg, s->code_ptr, label_ptr);
175
+ .lg_page_size = ctz64(size)
63
+ add_qemu_ldst_label(s, false, oi, data_type, data_reg, addr_reg,
176
+ };
64
+ s->code_ptr, label_ptr);
177
+
65
#else
178
+ assert(is_power_of_2(size));
66
TCGReg index_reg;
179
+ tlb_set_page_full(cpu, mmu_idx, vaddr, &full);
67
tcg_target_long disp;
180
+}
68
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
181
+
69
break;
182
void tlb_set_page(CPUState *cpu, target_ulong vaddr,
70
183
hwaddr paddr, int prot,
71
case INDEX_op_qemu_ld_i32:
184
int mmu_idx, target_ulong size)
72
- /* ??? Technically we can use a non-extending instruction. */
73
+ tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I32);
74
+ break;
75
case INDEX_op_qemu_ld_i64:
76
- tcg_out_qemu_ld(s, args[0], args[1], args[2]);
77
+ tcg_out_qemu_ld(s, args[0], args[1], args[2], TCG_TYPE_I64);
78
break;
79
case INDEX_op_qemu_st_i32:
80
+ tcg_out_qemu_st(s, args[0], args[1], args[2], TCG_TYPE_I32);
81
+ break;
82
case INDEX_op_qemu_st_i64:
83
- tcg_out_qemu_st(s, args[0], args[1], args[2]);
84
+ tcg_out_qemu_st(s, args[0], args[1], args[2], TCG_TYPE_I64);
85
break;
86
87
case INDEX_op_ld16s_i64:
185
--
88
--
186
2.34.1
89
2.34.1
187
90
188
91
diff view generated by jsdifflib
1
When PAGE_WRITE_INV is set when calling tlb_set_page,
1
Collect the 3 potential parts of the host address into a struct.
2
we immediately set TLB_INVALID_MASK in order to force
2
Reorg tcg_out_qemu_{ld,st}_direct to use it.
3
tlb_fill to be called on the next lookup. Here in
3
4
probe_access_internal, we have just called tlb_fill
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
and eliminated true misses, thus the lookup must be valid.
6
7
This allows us to remove a warning comment from s390x.
8
There doesn't seem to be a reason to change the code though.
9
10
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
11
Reviewed-by: David Hildenbrand <david@redhat.com>
12
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
---
6
---
15
accel/tcg/cputlb.c | 10 +++++++++-
7
tcg/s390x/tcg-target.c.inc | 109 ++++++++++++++++++++-----------------
16
target/s390x/tcg/mem_helper.c | 4 ----
8
1 file changed, 60 insertions(+), 49 deletions(-)
17
2 files changed, 9 insertions(+), 5 deletions(-)
9
18
10
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
19
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
20
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
21
--- a/accel/tcg/cputlb.c
12
--- a/tcg/s390x/tcg-target.c.inc
22
+++ b/accel/tcg/cputlb.c
13
+++ b/tcg/s390x/tcg-target.c.inc
23
@@ -XXX,XX +XXX,XX @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
14
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest,
24
}
15
tcg_out_call_int(s, dest);
25
tlb_addr = tlb_read_ofs(entry, elt_ofs);
16
}
26
17
27
+ flags = TLB_FLAGS_MASK;
18
+typedef struct {
28
page_addr = addr & TARGET_PAGE_MASK;
19
+ TCGReg base;
29
if (!tlb_hit_page(tlb_addr, page_addr)) {
20
+ TCGReg index;
30
if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) {
21
+ int disp;
31
@@ -XXX,XX +XXX,XX @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
22
+} HostAddress;
32
23
+
33
/* TLB resize via tlb_fill may have moved the entry. */
24
static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg data,
34
entry = tlb_entry(env, mmu_idx, addr);
25
- TCGReg base, TCGReg index, int disp)
35
+
26
+ HostAddress h)
36
+ /*
27
{
37
+ * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately,
28
switch (opc & (MO_SSIZE | MO_BSWAP)) {
38
+ * to force the next access through tlb_fill. We've just
29
case MO_UB:
39
+ * called tlb_fill, so we know that this entry *is* valid.
30
- tcg_out_insn(s, RXY, LLGC, data, base, index, disp);
40
+ */
31
+ tcg_out_insn(s, RXY, LLGC, data, h.base, h.index, h.disp);
41
+ flags &= ~TLB_INVALID_MASK;
32
break;
33
case MO_SB:
34
- tcg_out_insn(s, RXY, LGB, data, base, index, disp);
35
+ tcg_out_insn(s, RXY, LGB, data, h.base, h.index, h.disp);
36
break;
37
38
case MO_UW | MO_BSWAP:
39
/* swapped unsigned halfword load with upper bits zeroed */
40
- tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
41
+ tcg_out_insn(s, RXY, LRVH, data, h.base, h.index, h.disp);
42
tcg_out_ext16u(s, data, data);
43
break;
44
case MO_UW:
45
- tcg_out_insn(s, RXY, LLGH, data, base, index, disp);
46
+ tcg_out_insn(s, RXY, LLGH, data, h.base, h.index, h.disp);
47
break;
48
49
case MO_SW | MO_BSWAP:
50
/* swapped sign-extended halfword load */
51
- tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
52
+ tcg_out_insn(s, RXY, LRVH, data, h.base, h.index, h.disp);
53
tcg_out_ext16s(s, TCG_TYPE_REG, data, data);
54
break;
55
case MO_SW:
56
- tcg_out_insn(s, RXY, LGH, data, base, index, disp);
57
+ tcg_out_insn(s, RXY, LGH, data, h.base, h.index, h.disp);
58
break;
59
60
case MO_UL | MO_BSWAP:
61
/* swapped unsigned int load with upper bits zeroed */
62
- tcg_out_insn(s, RXY, LRV, data, base, index, disp);
63
+ tcg_out_insn(s, RXY, LRV, data, h.base, h.index, h.disp);
64
tcg_out_ext32u(s, data, data);
65
break;
66
case MO_UL:
67
- tcg_out_insn(s, RXY, LLGF, data, base, index, disp);
68
+ tcg_out_insn(s, RXY, LLGF, data, h.base, h.index, h.disp);
69
break;
70
71
case MO_SL | MO_BSWAP:
72
/* swapped sign-extended int load */
73
- tcg_out_insn(s, RXY, LRV, data, base, index, disp);
74
+ tcg_out_insn(s, RXY, LRV, data, h.base, h.index, h.disp);
75
tcg_out_ext32s(s, data, data);
76
break;
77
case MO_SL:
78
- tcg_out_insn(s, RXY, LGF, data, base, index, disp);
79
+ tcg_out_insn(s, RXY, LGF, data, h.base, h.index, h.disp);
80
break;
81
82
case MO_UQ | MO_BSWAP:
83
- tcg_out_insn(s, RXY, LRVG, data, base, index, disp);
84
+ tcg_out_insn(s, RXY, LRVG, data, h.base, h.index, h.disp);
85
break;
86
case MO_UQ:
87
- tcg_out_insn(s, RXY, LG, data, base, index, disp);
88
+ tcg_out_insn(s, RXY, LG, data, h.base, h.index, h.disp);
89
break;
90
91
default:
92
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg data,
93
}
94
95
static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg data,
96
- TCGReg base, TCGReg index, int disp)
97
+ HostAddress h)
98
{
99
switch (opc & (MO_SIZE | MO_BSWAP)) {
100
case MO_UB:
101
- if (disp >= 0 && disp < 0x1000) {
102
- tcg_out_insn(s, RX, STC, data, base, index, disp);
103
+ if (h.disp >= 0 && h.disp < 0x1000) {
104
+ tcg_out_insn(s, RX, STC, data, h.base, h.index, h.disp);
105
} else {
106
- tcg_out_insn(s, RXY, STCY, data, base, index, disp);
107
+ tcg_out_insn(s, RXY, STCY, data, h.base, h.index, h.disp);
42
}
108
}
43
tlb_addr = tlb_read_ofs(entry, elt_ofs);
109
break;
44
}
110
45
- flags = tlb_addr & TLB_FLAGS_MASK;
111
case MO_UW | MO_BSWAP:
46
+ flags &= tlb_addr;
112
- tcg_out_insn(s, RXY, STRVH, data, base, index, disp);
47
113
+ tcg_out_insn(s, RXY, STRVH, data, h.base, h.index, h.disp);
48
/* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
114
break;
49
if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) {
115
case MO_UW:
50
diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c
116
- if (disp >= 0 && disp < 0x1000) {
51
index XXXXXXX..XXXXXXX 100644
117
- tcg_out_insn(s, RX, STH, data, base, index, disp);
52
--- a/target/s390x/tcg/mem_helper.c
118
+ if (h.disp >= 0 && h.disp < 0x1000) {
53
+++ b/target/s390x/tcg/mem_helper.c
119
+ tcg_out_insn(s, RX, STH, data, h.base, h.index, h.disp);
54
@@ -XXX,XX +XXX,XX @@ static int s390_probe_access(CPUArchState *env, target_ulong addr, int size,
120
} else {
121
- tcg_out_insn(s, RXY, STHY, data, base, index, disp);
122
+ tcg_out_insn(s, RXY, STHY, data, h.base, h.index, h.disp);
123
}
124
break;
125
126
case MO_UL | MO_BSWAP:
127
- tcg_out_insn(s, RXY, STRV, data, base, index, disp);
128
+ tcg_out_insn(s, RXY, STRV, data, h.base, h.index, h.disp);
129
break;
130
case MO_UL:
131
- if (disp >= 0 && disp < 0x1000) {
132
- tcg_out_insn(s, RX, ST, data, base, index, disp);
133
+ if (h.disp >= 0 && h.disp < 0x1000) {
134
+ tcg_out_insn(s, RX, ST, data, h.base, h.index, h.disp);
135
} else {
136
- tcg_out_insn(s, RXY, STY, data, base, index, disp);
137
+ tcg_out_insn(s, RXY, STY, data, h.base, h.index, h.disp);
138
}
139
break;
140
141
case MO_UQ | MO_BSWAP:
142
- tcg_out_insn(s, RXY, STRVG, data, base, index, disp);
143
+ tcg_out_insn(s, RXY, STRVG, data, h.base, h.index, h.disp);
144
break;
145
case MO_UQ:
146
- tcg_out_insn(s, RXY, STG, data, base, index, disp);
147
+ tcg_out_insn(s, RXY, STG, data, h.base, h.index, h.disp);
148
break;
149
150
default:
151
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
152
return tcg_out_fail_alignment(s, l);
153
}
154
155
-static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
156
- TCGReg *index_reg, tcg_target_long *disp)
157
+static HostAddress tcg_prepare_user_ldst(TCGContext *s, TCGReg addr_reg)
158
{
159
+ TCGReg index;
160
+ int disp;
161
+
162
if (TARGET_LONG_BITS == 32) {
163
- tcg_out_ext32u(s, TCG_TMP0, *addr_reg);
164
- *addr_reg = TCG_TMP0;
165
+ tcg_out_ext32u(s, TCG_TMP0, addr_reg);
166
+ addr_reg = TCG_TMP0;
167
}
168
if (guest_base < 0x80000) {
169
- *index_reg = TCG_REG_NONE;
170
- *disp = guest_base;
171
+ index = TCG_REG_NONE;
172
+ disp = guest_base;
173
} else {
174
- *index_reg = TCG_GUEST_BASE_REG;
175
- *disp = 0;
176
+ index = TCG_GUEST_BASE_REG;
177
+ disp = 0;
178
}
179
+ return (HostAddress){ .base = addr_reg, .index = index, .disp = disp };
180
}
181
#endif /* CONFIG_SOFTMMU */
182
183
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
184
MemOpIdx oi, TCGType data_type)
185
{
186
MemOp opc = get_memop(oi);
187
+ HostAddress h;
188
+
189
#ifdef CONFIG_SOFTMMU
190
unsigned mem_index = get_mmuidx(oi);
191
tcg_insn_unit *label_ptr;
192
- TCGReg base_reg;
193
194
- base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 1);
195
+ h.base = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 1);
196
+ h.index = TCG_REG_R2;
197
+ h.disp = 0;
198
199
tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
200
label_ptr = s->code_ptr;
201
s->code_ptr += 1;
202
203
- tcg_out_qemu_ld_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
204
+ tcg_out_qemu_ld_direct(s, opc, data_reg, h);
205
206
add_qemu_ldst_label(s, true, oi, data_type, data_reg, addr_reg,
207
s->code_ptr, label_ptr);
55
#else
208
#else
56
int flags;
209
- TCGReg index_reg;
57
210
- tcg_target_long disp;
58
- /*
211
unsigned a_bits = get_alignment_bits(opc);
59
- * For !CONFIG_USER_ONLY, we cannot rely on TLB_INVALID_MASK or haddr==NULL
212
60
- * to detect if there was an exception during tlb_fill().
213
if (a_bits) {
61
- */
214
tcg_out_test_alignment(s, true, addr_reg, a_bits);
62
env->tlb_fill_exc = 0;
215
}
63
flags = probe_access_flags(env, addr, access_type, mmu_idx, nonfault, phost,
216
- tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
64
ra);
217
- tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, index_reg, disp);
218
+ h = tcg_prepare_user_ldst(s, addr_reg);
219
+ tcg_out_qemu_ld_direct(s, opc, data_reg, h);
220
#endif
221
}
222
223
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
224
MemOpIdx oi, TCGType data_type)
225
{
226
MemOp opc = get_memop(oi);
227
+ HostAddress h;
228
+
229
#ifdef CONFIG_SOFTMMU
230
unsigned mem_index = get_mmuidx(oi);
231
tcg_insn_unit *label_ptr;
232
- TCGReg base_reg;
233
234
- base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 0);
235
+ h.base = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 0);
236
+ h.index = TCG_REG_R2;
237
+ h.disp = 0;
238
239
tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
240
label_ptr = s->code_ptr;
241
s->code_ptr += 1;
242
243
- tcg_out_qemu_st_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
244
+ tcg_out_qemu_st_direct(s, opc, data_reg, h);
245
246
add_qemu_ldst_label(s, false, oi, data_type, data_reg, addr_reg,
247
s->code_ptr, label_ptr);
248
#else
249
- TCGReg index_reg;
250
- tcg_target_long disp;
251
unsigned a_bits = get_alignment_bits(opc);
252
253
if (a_bits) {
254
tcg_out_test_alignment(s, false, addr_reg, a_bits);
255
}
256
- tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
257
- tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, index_reg, disp);
258
+ h = tcg_prepare_user_ldst(s, addr_reg);
259
+ tcg_out_qemu_st_direct(s, opc, data_reg, h);
260
#endif
261
}
262
65
--
263
--
66
2.34.1
264
2.34.1
67
265
68
266
diff view generated by jsdifflib
1
This field is only written, not read; remove it.
1
In tcg_canonicalize_memop, we remove MO_SIGN from MO_32 operations
2
with TCG_TYPE_I32. Thus this is never set. We already have an
3
identical test just above which does not include is_64
2
4
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
7
---
8
include/hw/core/cpu.h | 1 -
8
tcg/sparc64/tcg-target.c.inc | 2 +-
9
accel/tcg/cputlb.c | 7 +++----
9
1 file changed, 1 insertion(+), 1 deletion(-)
10
2 files changed, 3 insertions(+), 5 deletions(-)
11
10
12
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
11
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
13
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
14
--- a/include/hw/core/cpu.h
13
--- a/tcg/sparc64/tcg-target.c.inc
15
+++ b/include/hw/core/cpu.h
14
+++ b/tcg/sparc64/tcg-target.c.inc
16
@@ -XXX,XX +XXX,XX @@ struct CPUWatchpoint {
15
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
17
* the memory regions get moved around by io_writex.
16
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O2, oi);
18
*/
17
19
typedef struct SavedIOTLB {
18
/* We let the helper sign-extend SB and SW, but leave SL for here. */
20
- hwaddr addr;
19
- if (is_64 && (memop & MO_SSIZE) == MO_SL) {
21
MemoryRegionSection *section;
20
+ if ((memop & MO_SSIZE) == MO_SL) {
22
hwaddr mr_offset;
21
tcg_out_ext32s(s, data, TCG_REG_O0);
23
} SavedIOTLB;
22
} else {
24
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
23
tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0);
25
index XXXXXXX..XXXXXXX 100644
26
--- a/accel/tcg/cputlb.c
27
+++ b/accel/tcg/cputlb.c
28
@@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
29
* This is read by tlb_plugin_lookup if the fulltlb entry doesn't match
30
* because of the side effect of io_writex changing memory layout.
31
*/
32
-static void save_iotlb_data(CPUState *cs, hwaddr addr,
33
- MemoryRegionSection *section, hwaddr mr_offset)
34
+static void save_iotlb_data(CPUState *cs, MemoryRegionSection *section,
35
+ hwaddr mr_offset)
36
{
37
#ifdef CONFIG_PLUGIN
38
SavedIOTLB *saved = &cs->saved_iotlb;
39
- saved->addr = addr;
40
saved->section = section;
41
saved->mr_offset = mr_offset;
42
#endif
43
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
44
* The memory_region_dispatch may trigger a flush/resize
45
* so for plugins we save the iotlb_data just in case.
46
*/
47
- save_iotlb_data(cpu, full->xlat_section, section, mr_offset);
48
+ save_iotlb_data(cpu, section, mr_offset);
49
50
if (!qemu_mutex_iothread_locked()) {
51
qemu_mutex_lock_iothread();
52
--
24
--
53
2.34.1
25
2.34.1
54
26
55
27
diff view generated by jsdifflib
1
From: Alex Bennée <alex.bennee@linaro.org>
1
We need to set this in TCGLabelQemuLdst, so plumb this
2
all the way through from tcg_out_op.
2
3
3
This is a heavily used function so lets avoid the cost of
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
CPU_GET_CLASS. On the romulus-bmc run it has a modest effect:
5
6
Before: 36.812 s ± 0.506 s
7
After: 35.912 s ± 0.168 s
8
9
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-Id: <20220811151413.3350684-4-alex.bennee@linaro.org>
12
Signed-off-by: Cédric Le Goater <clg@kaod.org>
13
Message-Id: <20220923084803.498337-4-clg@kaod.org>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
---
6
---
16
hw/core/cpu-sysemu.c | 5 ++---
7
tcg/sparc64/tcg-target.c.inc | 6 +++---
17
1 file changed, 2 insertions(+), 3 deletions(-)
8
1 file changed, 3 insertions(+), 3 deletions(-)
18
9
19
diff --git a/hw/core/cpu-sysemu.c b/hw/core/cpu-sysemu.c
10
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
20
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
21
--- a/hw/core/cpu-sysemu.c
12
--- a/tcg/sparc64/tcg-target.c.inc
22
+++ b/hw/core/cpu-sysemu.c
13
+++ b/tcg/sparc64/tcg-target.c.inc
23
@@ -XXX,XX +XXX,XX @@ hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr)
14
@@ -XXX,XX +XXX,XX @@ static const int qemu_st_opc[(MO_SIZE | MO_BSWAP) + 1] = {
24
15
};
25
int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs)
16
17
static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
18
- MemOpIdx oi, bool is_64)
19
+ MemOpIdx oi, TCGType data_type)
26
{
20
{
27
- CPUClass *cc = CPU_GET_CLASS(cpu);
21
MemOp memop = get_memop(oi);
28
int ret = 0;
22
tcg_insn_unit *label_ptr;
29
23
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
30
- if (cc->sysemu_ops->asidx_from_attrs) {
24
break;
31
- ret = cc->sysemu_ops->asidx_from_attrs(cpu, attrs);
25
32
+ if (cpu->cc->sysemu_ops->asidx_from_attrs) {
26
case INDEX_op_qemu_ld_i32:
33
+ ret = cpu->cc->sysemu_ops->asidx_from_attrs(cpu, attrs);
27
- tcg_out_qemu_ld(s, a0, a1, a2, false);
34
assert(ret < cpu->num_ases && ret >= 0);
28
+ tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
35
}
29
break;
36
return ret;
30
case INDEX_op_qemu_ld_i64:
31
- tcg_out_qemu_ld(s, a0, a1, a2, true);
32
+ tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
33
break;
34
case INDEX_op_qemu_st_i32:
35
tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
37
--
36
--
38
2.34.1
37
2.34.1
39
38
40
39
diff view generated by jsdifflib
1
The availability of tb->pc will shortly be conditional.
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Introduce accessor functions to minimize ifdefs.
3
4
Pass around a known pc to places like tcg_gen_code,
5
where the caller must already have the value.
6
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
3
---
10
accel/tcg/internal.h | 6 ++++
4
tcg/tcg.c | 13 +++++++++++++
11
include/exec/exec-all.h | 6 ++++
5
tcg/tcg-ldst.c.inc | 14 --------------
12
include/tcg/tcg.h | 2 +-
6
2 files changed, 13 insertions(+), 14 deletions(-)
13
accel/tcg/cpu-exec.c | 46 ++++++++++++++-----------
14
accel/tcg/translate-all.c | 37 +++++++++++---------
15
target/arm/cpu.c | 4 +--
16
target/avr/cpu.c | 2 +-
17
target/hexagon/cpu.c | 2 +-
18
target/hppa/cpu.c | 4 +--
19
target/i386/tcg/tcg-cpu.c | 2 +-
20
target/loongarch/cpu.c | 2 +-
21
target/microblaze/cpu.c | 2 +-
22
target/mips/tcg/exception.c | 2 +-
23
target/mips/tcg/sysemu/special_helper.c | 2 +-
24
target/openrisc/cpu.c | 2 +-
25
target/riscv/cpu.c | 4 +--
26
target/rx/cpu.c | 2 +-
27
target/sh4/cpu.c | 4 +--
28
target/sparc/cpu.c | 2 +-
29
target/tricore/cpu.c | 2 +-
30
tcg/tcg.c | 8 ++---
31
21 files changed, 82 insertions(+), 61 deletions(-)
32
7
33
diff --git a/accel/tcg/internal.h b/accel/tcg/internal.h
34
index XXXXXXX..XXXXXXX 100644
35
--- a/accel/tcg/internal.h
36
+++ b/accel/tcg/internal.h
37
@@ -XXX,XX +XXX,XX @@ G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
38
void page_init(void);
39
void tb_htable_init(void);
40
41
+/* Return the current PC from CPU, which may be cached in TB. */
42
+static inline target_ulong log_pc(CPUState *cpu, const TranslationBlock *tb)
43
+{
44
+ return tb_pc(tb);
45
+}
46
+
47
#endif /* ACCEL_TCG_INTERNAL_H */
48
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
49
index XXXXXXX..XXXXXXX 100644
50
--- a/include/exec/exec-all.h
51
+++ b/include/exec/exec-all.h
52
@@ -XXX,XX +XXX,XX @@ struct TranslationBlock {
53
uintptr_t jmp_dest[2];
54
};
55
56
+/* Hide the read to avoid ifdefs for TARGET_TB_PCREL. */
57
+static inline target_ulong tb_pc(const TranslationBlock *tb)
58
+{
59
+ return tb->pc;
60
+}
61
+
62
/* Hide the qatomic_read to make code a little easier on the eyes */
63
static inline uint32_t tb_cflags(const TranslationBlock *tb)
64
{
65
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
66
index XXXXXXX..XXXXXXX 100644
67
--- a/include/tcg/tcg.h
68
+++ b/include/tcg/tcg.h
69
@@ -XXX,XX +XXX,XX @@ void tcg_register_thread(void);
70
void tcg_prologue_init(TCGContext *s);
71
void tcg_func_start(TCGContext *s);
72
73
-int tcg_gen_code(TCGContext *s, TranslationBlock *tb);
74
+int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start);
75
76
void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size);
77
78
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
79
index XXXXXXX..XXXXXXX 100644
80
--- a/accel/tcg/cpu-exec.c
81
+++ b/accel/tcg/cpu-exec.c
82
@@ -XXX,XX +XXX,XX @@ static bool tb_lookup_cmp(const void *p, const void *d)
83
const TranslationBlock *tb = p;
84
const struct tb_desc *desc = d;
85
86
- if (tb->pc == desc->pc &&
87
+ if (tb_pc(tb) == desc->pc &&
88
tb->page_addr[0] == desc->page_addr0 &&
89
tb->cs_base == desc->cs_base &&
90
tb->flags == desc->flags &&
91
@@ -XXX,XX +XXX,XX @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
92
return tb;
93
}
94
95
-static inline void log_cpu_exec(target_ulong pc, CPUState *cpu,
96
- const TranslationBlock *tb)
97
+static void log_cpu_exec(target_ulong pc, CPUState *cpu,
98
+ const TranslationBlock *tb)
99
{
100
- if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC))
101
- && qemu_log_in_addr_range(pc)) {
102
-
103
+ if (qemu_log_in_addr_range(pc)) {
104
qemu_log_mask(CPU_LOG_EXEC,
105
"Trace %d: %p [" TARGET_FMT_lx
106
"/" TARGET_FMT_lx "/%08x/%08x] %s\n",
107
@@ -XXX,XX +XXX,XX @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
108
return tcg_code_gen_epilogue;
109
}
110
111
- log_cpu_exec(pc, cpu, tb);
112
+ if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) {
113
+ log_cpu_exec(pc, cpu, tb);
114
+ }
115
116
return tb->tc.ptr;
117
}
118
@@ -XXX,XX +XXX,XX @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
119
TranslationBlock *last_tb;
120
const void *tb_ptr = itb->tc.ptr;
121
122
- log_cpu_exec(itb->pc, cpu, itb);
123
+ if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) {
124
+ log_cpu_exec(log_pc(cpu, itb), cpu, itb);
125
+ }
126
127
qemu_thread_jit_execute();
128
ret = tcg_qemu_tb_exec(env, tb_ptr);
129
@@ -XXX,XX +XXX,XX @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
130
* of the start of the TB.
131
*/
132
CPUClass *cc = CPU_GET_CLASS(cpu);
133
- qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc,
134
- "Stopped execution of TB chain before %p ["
135
- TARGET_FMT_lx "] %s\n",
136
- last_tb->tc.ptr, last_tb->pc,
137
- lookup_symbol(last_tb->pc));
138
+
139
if (cc->tcg_ops->synchronize_from_tb) {
140
cc->tcg_ops->synchronize_from_tb(cpu, last_tb);
141
} else {
142
assert(cc->set_pc);
143
- cc->set_pc(cpu, last_tb->pc);
144
+ cc->set_pc(cpu, tb_pc(last_tb));
145
+ }
146
+ if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
147
+ target_ulong pc = log_pc(cpu, last_tb);
148
+ if (qemu_log_in_addr_range(pc)) {
149
+ qemu_log("Stopped execution of TB chain before %p ["
150
+ TARGET_FMT_lx "] %s\n",
151
+ last_tb->tc.ptr, pc, lookup_symbol(pc));
152
+ }
153
}
154
}
155
156
@@ -XXX,XX +XXX,XX @@ static inline void tb_add_jump(TranslationBlock *tb, int n,
157
158
qemu_spin_unlock(&tb_next->jmp_lock);
159
160
- qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
161
- "Linking TBs %p [" TARGET_FMT_lx
162
- "] index %d -> %p [" TARGET_FMT_lx "]\n",
163
- tb->tc.ptr, tb->pc, n,
164
- tb_next->tc.ptr, tb_next->pc);
165
+ qemu_log_mask(CPU_LOG_EXEC, "Linking TBs %p index %d -> %p\n",
166
+ tb->tc.ptr, n, tb_next->tc.ptr);
167
return;
168
169
out_unlock_next:
170
@@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
171
}
172
173
static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
174
+ target_ulong pc,
175
TranslationBlock **last_tb, int *tb_exit)
176
{
177
int32_t insns_left;
178
179
- trace_exec_tb(tb, tb->pc);
180
+ trace_exec_tb(tb, pc);
181
tb = cpu_tb_exec(cpu, tb, tb_exit);
182
if (*tb_exit != TB_EXIT_REQUESTED) {
183
*last_tb = tb;
184
@@ -XXX,XX +XXX,XX @@ int cpu_exec(CPUState *cpu)
185
tb_add_jump(last_tb, tb_exit, tb);
186
}
187
188
- cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit);
189
+ cpu_loop_exec_tb(cpu, tb, pc, &last_tb, &tb_exit);
190
191
/* Try to align the host and virtual clocks
192
if the guest is in advance */
193
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
194
index XXXXXXX..XXXXXXX 100644
195
--- a/accel/tcg/translate-all.c
196
+++ b/accel/tcg/translate-all.c
197
@@ -XXX,XX +XXX,XX @@ static int encode_search(TranslationBlock *tb, uint8_t *block)
198
199
for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
200
if (i == 0) {
201
- prev = (j == 0 ? tb->pc : 0);
202
+ prev = (j == 0 ? tb_pc(tb) : 0);
203
} else {
204
prev = tcg_ctx->gen_insn_data[i - 1][j];
205
}
206
@@ -XXX,XX +XXX,XX @@ static int encode_search(TranslationBlock *tb, uint8_t *block)
207
static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
208
uintptr_t searched_pc, bool reset_icount)
209
{
210
- target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
211
+ target_ulong data[TARGET_INSN_START_WORDS] = { tb_pc(tb) };
212
uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
213
CPUArchState *env = cpu->env_ptr;
214
const uint8_t *p = tb->tc.ptr + tb->tc.size;
215
@@ -XXX,XX +XXX,XX @@ static bool tb_cmp(const void *ap, const void *bp)
216
const TranslationBlock *a = ap;
217
const TranslationBlock *b = bp;
218
219
- return a->pc == b->pc &&
220
+ return tb_pc(a) == tb_pc(b) &&
221
a->cs_base == b->cs_base &&
222
a->flags == b->flags &&
223
(tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) &&
224
@@ -XXX,XX +XXX,XX @@ static void do_tb_invalidate_check(void *p, uint32_t hash, void *userp)
225
TranslationBlock *tb = p;
226
target_ulong addr = *(target_ulong *)userp;
227
228
- if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
229
+ if (!(addr + TARGET_PAGE_SIZE <= tb_pc(tb) ||
230
+ addr >= tb_pc(tb) + tb->size)) {
231
printf("ERROR invalidate: address=" TARGET_FMT_lx
232
- " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
233
+ " PC=%08lx size=%04x\n", addr, (long)tb_pc(tb), tb->size);
234
}
235
}
236
237
@@ -XXX,XX +XXX,XX @@ static void do_tb_page_check(void *p, uint32_t hash, void *userp)
238
TranslationBlock *tb = p;
239
int flags1, flags2;
240
241
- flags1 = page_get_flags(tb->pc);
242
- flags2 = page_get_flags(tb->pc + tb->size - 1);
243
+ flags1 = page_get_flags(tb_pc(tb));
244
+ flags2 = page_get_flags(tb_pc(tb) + tb->size - 1);
245
if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
246
printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
247
- (long)tb->pc, tb->size, flags1, flags2);
248
+ (long)tb_pc(tb), tb->size, flags1, flags2);
249
}
250
}
251
252
@@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
253
254
/* remove the TB from the hash list */
255
phys_pc = tb->page_addr[0];
256
- h = tb_hash_func(phys_pc, tb->pc, tb->flags, orig_cflags,
257
+ h = tb_hash_func(phys_pc, tb_pc(tb), tb->flags, orig_cflags,
258
tb->trace_vcpu_dstate);
259
if (!qht_remove(&tb_ctx.htable, tb, h)) {
260
return;
261
@@ -XXX,XX +XXX,XX @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
262
}
263
264
/* add in the hash table */
265
- h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags,
266
+ h = tb_hash_func(phys_pc, tb_pc(tb), tb->flags, tb->cflags,
267
tb->trace_vcpu_dstate);
268
qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
269
270
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
271
tcg_ctx->cpu = NULL;
272
max_insns = tb->icount;
273
274
- trace_translate_block(tb, tb->pc, tb->tc.ptr);
275
+ trace_translate_block(tb, pc, tb->tc.ptr);
276
277
/* generate machine code */
278
tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
279
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
280
ti = profile_getclock();
281
#endif
282
283
- gen_code_size = tcg_gen_code(tcg_ctx, tb);
284
+ gen_code_size = tcg_gen_code(tcg_ctx, tb, pc);
285
if (unlikely(gen_code_size < 0)) {
286
error_return:
287
switch (gen_code_size) {
288
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
289
290
#ifdef DEBUG_DISAS
291
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
292
- qemu_log_in_addr_range(tb->pc)) {
293
+ qemu_log_in_addr_range(pc)) {
294
FILE *logfile = qemu_log_trylock();
295
if (logfile) {
296
int code_size, data_size;
297
@@ -XXX,XX +XXX,XX @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
298
*/
299
cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n;
300
301
- qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
302
- "cpu_io_recompile: rewound execution of TB to "
303
- TARGET_FMT_lx "\n", tb->pc);
304
+ if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
305
+ target_ulong pc = log_pc(cpu, tb);
306
+ if (qemu_log_in_addr_range(pc)) {
307
+ qemu_log("cpu_io_recompile: rewound execution of TB to "
308
+ TARGET_FMT_lx "\n", pc);
309
+ }
310
+ }
311
312
cpu_loop_exit_noexc(cpu);
313
}
314
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
315
index XXXXXXX..XXXXXXX 100644
316
--- a/target/arm/cpu.c
317
+++ b/target/arm/cpu.c
318
@@ -XXX,XX +XXX,XX @@ void arm_cpu_synchronize_from_tb(CPUState *cs,
319
* never possible for an AArch64 TB to chain to an AArch32 TB.
320
*/
321
if (is_a64(env)) {
322
- env->pc = tb->pc;
323
+ env->pc = tb_pc(tb);
324
} else {
325
- env->regs[15] = tb->pc;
326
+ env->regs[15] = tb_pc(tb);
327
}
328
}
329
#endif /* CONFIG_TCG */
330
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
331
index XXXXXXX..XXXXXXX 100644
332
--- a/target/avr/cpu.c
333
+++ b/target/avr/cpu.c
334
@@ -XXX,XX +XXX,XX @@ static void avr_cpu_synchronize_from_tb(CPUState *cs,
335
AVRCPU *cpu = AVR_CPU(cs);
336
CPUAVRState *env = &cpu->env;
337
338
- env->pc_w = tb->pc / 2; /* internally PC points to words */
339
+ env->pc_w = tb_pc(tb) / 2; /* internally PC points to words */
340
}
341
342
static void avr_cpu_reset(DeviceState *ds)
343
diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c
344
index XXXXXXX..XXXXXXX 100644
345
--- a/target/hexagon/cpu.c
346
+++ b/target/hexagon/cpu.c
347
@@ -XXX,XX +XXX,XX @@ static void hexagon_cpu_synchronize_from_tb(CPUState *cs,
348
{
349
HexagonCPU *cpu = HEXAGON_CPU(cs);
350
CPUHexagonState *env = &cpu->env;
351
- env->gpr[HEX_REG_PC] = tb->pc;
352
+ env->gpr[HEX_REG_PC] = tb_pc(tb);
353
}
354
355
static bool hexagon_cpu_has_work(CPUState *cs)
356
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
357
index XXXXXXX..XXXXXXX 100644
358
--- a/target/hppa/cpu.c
359
+++ b/target/hppa/cpu.c
360
@@ -XXX,XX +XXX,XX @@ static void hppa_cpu_synchronize_from_tb(CPUState *cs,
361
HPPACPU *cpu = HPPA_CPU(cs);
362
363
#ifdef CONFIG_USER_ONLY
364
- cpu->env.iaoq_f = tb->pc;
365
+ cpu->env.iaoq_f = tb_pc(tb);
366
cpu->env.iaoq_b = tb->cs_base;
367
#else
368
/* Recover the IAOQ values from the GVA + PRIV. */
369
@@ -XXX,XX +XXX,XX @@ static void hppa_cpu_synchronize_from_tb(CPUState *cs,
370
int32_t diff = cs_base;
371
372
cpu->env.iasq_f = iasq_f;
373
- cpu->env.iaoq_f = (tb->pc & ~iasq_f) + priv;
374
+ cpu->env.iaoq_f = (tb_pc(tb) & ~iasq_f) + priv;
375
if (diff) {
376
cpu->env.iaoq_b = cpu->env.iaoq_f + diff;
377
}
378
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
379
index XXXXXXX..XXXXXXX 100644
380
--- a/target/i386/tcg/tcg-cpu.c
381
+++ b/target/i386/tcg/tcg-cpu.c
382
@@ -XXX,XX +XXX,XX @@ static void x86_cpu_synchronize_from_tb(CPUState *cs,
383
{
384
X86CPU *cpu = X86_CPU(cs);
385
386
- cpu->env.eip = tb->pc - tb->cs_base;
387
+ cpu->env.eip = tb_pc(tb) - tb->cs_base;
388
}
389
390
#ifndef CONFIG_USER_ONLY
391
diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c
392
index XXXXXXX..XXXXXXX 100644
393
--- a/target/loongarch/cpu.c
394
+++ b/target/loongarch/cpu.c
395
@@ -XXX,XX +XXX,XX @@ static void loongarch_cpu_synchronize_from_tb(CPUState *cs,
396
LoongArchCPU *cpu = LOONGARCH_CPU(cs);
397
CPULoongArchState *env = &cpu->env;
398
399
- env->pc = tb->pc;
400
+ env->pc = tb_pc(tb);
401
}
402
#endif /* CONFIG_TCG */
403
404
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
405
index XXXXXXX..XXXXXXX 100644
406
--- a/target/microblaze/cpu.c
407
+++ b/target/microblaze/cpu.c
408
@@ -XXX,XX +XXX,XX @@ static void mb_cpu_synchronize_from_tb(CPUState *cs,
409
{
410
MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
411
412
- cpu->env.pc = tb->pc;
413
+ cpu->env.pc = tb_pc(tb);
414
cpu->env.iflags = tb->flags & IFLAGS_TB_MASK;
415
}
416
417
diff --git a/target/mips/tcg/exception.c b/target/mips/tcg/exception.c
418
index XXXXXXX..XXXXXXX 100644
419
--- a/target/mips/tcg/exception.c
420
+++ b/target/mips/tcg/exception.c
421
@@ -XXX,XX +XXX,XX @@ void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb)
422
MIPSCPU *cpu = MIPS_CPU(cs);
423
CPUMIPSState *env = &cpu->env;
424
425
- env->active_tc.PC = tb->pc;
426
+ env->active_tc.PC = tb_pc(tb);
427
env->hflags &= ~MIPS_HFLAG_BMASK;
428
env->hflags |= tb->flags & MIPS_HFLAG_BMASK;
429
}
430
diff --git a/target/mips/tcg/sysemu/special_helper.c b/target/mips/tcg/sysemu/special_helper.c
431
index XXXXXXX..XXXXXXX 100644
432
--- a/target/mips/tcg/sysemu/special_helper.c
433
+++ b/target/mips/tcg/sysemu/special_helper.c
434
@@ -XXX,XX +XXX,XX @@ bool mips_io_recompile_replay_branch(CPUState *cs, const TranslationBlock *tb)
435
CPUMIPSState *env = &cpu->env;
436
437
if ((env->hflags & MIPS_HFLAG_BMASK) != 0
438
- && env->active_tc.PC != tb->pc) {
439
+ && env->active_tc.PC != tb_pc(tb)) {
440
env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
441
env->hflags &= ~MIPS_HFLAG_BMASK;
442
return true;
443
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
444
index XXXXXXX..XXXXXXX 100644
445
--- a/target/openrisc/cpu.c
446
+++ b/target/openrisc/cpu.c
447
@@ -XXX,XX +XXX,XX @@ static void openrisc_cpu_synchronize_from_tb(CPUState *cs,
448
{
449
OpenRISCCPU *cpu = OPENRISC_CPU(cs);
450
451
- cpu->env.pc = tb->pc;
452
+ cpu->env.pc = tb_pc(tb);
453
}
454
455
456
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
457
index XXXXXXX..XXXXXXX 100644
458
--- a/target/riscv/cpu.c
459
+++ b/target/riscv/cpu.c
460
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_synchronize_from_tb(CPUState *cs,
461
RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
462
463
if (xl == MXL_RV32) {
464
- env->pc = (int32_t)tb->pc;
465
+ env->pc = (int32_t)tb_pc(tb);
466
} else {
467
- env->pc = tb->pc;
468
+ env->pc = tb_pc(tb);
469
}
470
}
471
472
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
473
index XXXXXXX..XXXXXXX 100644
474
--- a/target/rx/cpu.c
475
+++ b/target/rx/cpu.c
476
@@ -XXX,XX +XXX,XX @@ static void rx_cpu_synchronize_from_tb(CPUState *cs,
477
{
478
RXCPU *cpu = RX_CPU(cs);
479
480
- cpu->env.pc = tb->pc;
481
+ cpu->env.pc = tb_pc(tb);
482
}
483
484
static bool rx_cpu_has_work(CPUState *cs)
485
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
486
index XXXXXXX..XXXXXXX 100644
487
--- a/target/sh4/cpu.c
488
+++ b/target/sh4/cpu.c
489
@@ -XXX,XX +XXX,XX @@ static void superh_cpu_synchronize_from_tb(CPUState *cs,
490
{
491
SuperHCPU *cpu = SUPERH_CPU(cs);
492
493
- cpu->env.pc = tb->pc;
494
+ cpu->env.pc = tb_pc(tb);
495
cpu->env.flags = tb->flags & TB_FLAG_ENVFLAGS_MASK;
496
}
497
498
@@ -XXX,XX +XXX,XX @@ static bool superh_io_recompile_replay_branch(CPUState *cs,
499
CPUSH4State *env = &cpu->env;
500
501
if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
502
- && env->pc != tb->pc) {
503
+ && env->pc != tb_pc(tb)) {
504
env->pc -= 2;
505
env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
506
return true;
507
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
508
index XXXXXXX..XXXXXXX 100644
509
--- a/target/sparc/cpu.c
510
+++ b/target/sparc/cpu.c
511
@@ -XXX,XX +XXX,XX @@ static void sparc_cpu_synchronize_from_tb(CPUState *cs,
512
{
513
SPARCCPU *cpu = SPARC_CPU(cs);
514
515
- cpu->env.pc = tb->pc;
516
+ cpu->env.pc = tb_pc(tb);
517
cpu->env.npc = tb->cs_base;
518
}
519
520
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
521
index XXXXXXX..XXXXXXX 100644
522
--- a/target/tricore/cpu.c
523
+++ b/target/tricore/cpu.c
524
@@ -XXX,XX +XXX,XX @@ static void tricore_cpu_synchronize_from_tb(CPUState *cs,
525
TriCoreCPU *cpu = TRICORE_CPU(cs);
526
CPUTriCoreState *env = &cpu->env;
527
528
- env->PC = tb->pc;
529
+ env->PC = tb_pc(tb);
530
}
531
532
static void tricore_cpu_reset(DeviceState *dev)
533
diff --git a/tcg/tcg.c b/tcg/tcg.c
8
diff --git a/tcg/tcg.c b/tcg/tcg.c
534
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
535
--- a/tcg/tcg.c
10
--- a/tcg/tcg.c
536
+++ b/tcg/tcg.c
11
+++ b/tcg/tcg.c
537
@@ -XXX,XX +XXX,XX @@ int64_t tcg_cpu_exec_time(void)
12
@@ -XXX,XX +XXX,XX @@ typedef struct QEMU_PACKED {
538
#endif
13
DebugFrameFDEHeader fde;
539
14
} DebugFrameHeader;
540
15
541
-int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
16
+typedef struct TCGLabelQemuLdst {
542
+int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start)
17
+ bool is_ld; /* qemu_ld: true, qemu_st: false */
543
{
18
+ MemOpIdx oi;
544
#ifdef CONFIG_PROFILER
19
+ TCGType type; /* result type of a load */
545
TCGProfile *prof = &s->prof;
20
+ TCGReg addrlo_reg; /* reg index for low word of guest virtual addr */
546
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
21
+ TCGReg addrhi_reg; /* reg index for high word of guest virtual addr */
547
22
+ TCGReg datalo_reg; /* reg index for low word to be loaded or stored */
548
#ifdef DEBUG_DISAS
23
+ TCGReg datahi_reg; /* reg index for high word to be loaded or stored */
549
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)
24
+ const tcg_insn_unit *raddr; /* addr of the next IR of qemu_ld/st IR */
550
- && qemu_log_in_addr_range(tb->pc))) {
25
+ tcg_insn_unit *label_ptr[2]; /* label pointers to be updated */
551
+ && qemu_log_in_addr_range(pc_start))) {
26
+ QSIMPLEQ_ENTRY(TCGLabelQemuLdst) next;
552
FILE *logfile = qemu_log_trylock();
27
+} TCGLabelQemuLdst;
553
if (logfile) {
28
+
554
fprintf(logfile, "OP:\n");
29
static void tcg_register_jit_int(const void *buf, size_t size,
555
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
30
const void *debug_frame,
556
if (s->nb_indirects > 0) {
31
size_t debug_frame_size)
557
#ifdef DEBUG_DISAS
32
diff --git a/tcg/tcg-ldst.c.inc b/tcg/tcg-ldst.c.inc
558
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND)
33
index XXXXXXX..XXXXXXX 100644
559
- && qemu_log_in_addr_range(tb->pc))) {
34
--- a/tcg/tcg-ldst.c.inc
560
+ && qemu_log_in_addr_range(pc_start))) {
35
+++ b/tcg/tcg-ldst.c.inc
561
FILE *logfile = qemu_log_trylock();
36
@@ -XXX,XX +XXX,XX @@
562
if (logfile) {
37
* THE SOFTWARE.
563
fprintf(logfile, "OP before indirect lowering:\n");
38
*/
564
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
39
565
40
-typedef struct TCGLabelQemuLdst {
566
#ifdef DEBUG_DISAS
41
- bool is_ld; /* qemu_ld: true, qemu_st: false */
567
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT)
42
- MemOpIdx oi;
568
- && qemu_log_in_addr_range(tb->pc))) {
43
- TCGType type; /* result type of a load */
569
+ && qemu_log_in_addr_range(pc_start))) {
44
- TCGReg addrlo_reg; /* reg index for low word of guest virtual addr */
570
FILE *logfile = qemu_log_trylock();
45
- TCGReg addrhi_reg; /* reg index for high word of guest virtual addr */
571
if (logfile) {
46
- TCGReg datalo_reg; /* reg index for low word to be loaded or stored */
572
fprintf(logfile, "OP after optimization and liveness analysis:\n");
47
- TCGReg datahi_reg; /* reg index for high word to be loaded or stored */
48
- const tcg_insn_unit *raddr; /* addr of the next IR of qemu_ld/st IR */
49
- tcg_insn_unit *label_ptr[2]; /* label pointers to be updated */
50
- QSIMPLEQ_ENTRY(TCGLabelQemuLdst) next;
51
-} TCGLabelQemuLdst;
52
-
53
-
54
/*
55
* Generate TB finalization at the end of block
56
*/
573
--
57
--
574
2.34.1
58
2.34.1
575
59
576
60
diff view generated by jsdifflib
1
From: Alex Bennée <alex.bennee@linaro.org>
1
An inline function is safer than a macro, and REG_P
2
was rather too generic.
2
3
3
The class cast checkers are quite expensive and always on (unlike the
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
dynamic case who's checks are gated by CONFIG_QOM_CAST_DEBUG). To
5
avoid the overhead of repeatedly checking something which should never
6
change we cache the CPUClass reference for use in the hot code paths.
7
8
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-Id: <20220811151413.3350684-3-alex.bennee@linaro.org>
11
Signed-off-by: Cédric Le Goater <clg@kaod.org>
12
Message-Id: <20220923084803.498337-3-clg@kaod.org>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
---
6
---
15
include/hw/core/cpu.h | 9 +++++++++
7
tcg/tcg-internal.h | 4 ----
16
cpu.c | 9 ++++-----
8
tcg/tcg.c | 16 +++++++++++++---
17
2 files changed, 13 insertions(+), 5 deletions(-)
9
2 files changed, 13 insertions(+), 7 deletions(-)
18
10
19
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
11
diff --git a/tcg/tcg-internal.h b/tcg/tcg-internal.h
20
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
21
--- a/include/hw/core/cpu.h
13
--- a/tcg/tcg-internal.h
22
+++ b/include/hw/core/cpu.h
14
+++ b/tcg/tcg-internal.h
23
@@ -XXX,XX +XXX,XX @@ typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size,
15
@@ -XXX,XX +XXX,XX @@ typedef struct TCGCallArgumentLoc {
24
*/
16
unsigned tmp_subindex : 2;
25
#define CPU(obj) ((CPUState *)(obj))
17
} TCGCallArgumentLoc;
26
18
27
+/*
19
-/* Avoid "unsigned < 0 is always false" Werror, when iarg_regs is empty. */
28
+ * The class checkers bring in CPU_GET_CLASS() which is potentially
20
-#define REG_P(L) \
29
+ * expensive given the eventual call to
21
- ((int)(L)->arg_slot < (int)ARRAY_SIZE(tcg_target_call_iarg_regs))
30
+ * object_class_dynamic_cast_assert(). Because of this the CPUState
22
-
31
+ * has a cached value for the class in cs->cc which is set up in
23
typedef struct TCGHelperInfo {
32
+ * cpu_exec_realizefn() for use in hot code paths.
24
void *func;
33
+ */
25
const char *name;
34
typedef struct CPUClass CPUClass;
26
diff --git a/tcg/tcg.c b/tcg/tcg.c
35
DECLARE_CLASS_CHECKERS(CPUClass, CPU,
36
TYPE_CPU)
37
@@ -XXX,XX +XXX,XX @@ struct qemu_work_item;
38
struct CPUState {
39
/*< private >*/
40
DeviceState parent_obj;
41
+ /* cache to avoid expensive CPU_GET_CLASS */
42
+ CPUClass *cc;
43
/*< public >*/
44
45
int nr_cores;
46
diff --git a/cpu.c b/cpu.c
47
index XXXXXXX..XXXXXXX 100644
27
index XXXXXXX..XXXXXXX 100644
48
--- a/cpu.c
28
--- a/tcg/tcg.c
49
+++ b/cpu.c
29
+++ b/tcg/tcg.c
50
@@ -XXX,XX +XXX,XX @@ const VMStateDescription vmstate_cpu_common = {
30
@@ -XXX,XX +XXX,XX @@ static void init_ffi_layouts(void)
51
31
}
52
void cpu_exec_realizefn(CPUState *cpu, Error **errp)
32
#endif /* CONFIG_TCG_INTERPRETER */
33
34
+static inline bool arg_slot_reg_p(unsigned arg_slot)
35
+{
36
+ /*
37
+ * Split the sizeof away from the comparison to avoid Werror from
38
+ * "unsigned < 0 is always false", when iarg_regs is empty.
39
+ */
40
+ unsigned nreg = ARRAY_SIZE(tcg_target_call_iarg_regs);
41
+ return arg_slot < nreg;
42
+}
43
+
44
typedef struct TCGCumulativeArgs {
45
int arg_idx; /* tcg_gen_callN args[] */
46
int info_in_idx; /* TCGHelperInfo in[] */
47
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
48
case TCG_CALL_ARG_NORMAL:
49
case TCG_CALL_ARG_EXTEND_U:
50
case TCG_CALL_ARG_EXTEND_S:
51
- if (REG_P(loc)) {
52
+ if (arg_slot_reg_p(loc->arg_slot)) {
53
*la_temp_pref(ts) = 0;
54
break;
55
}
56
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
57
case TCG_CALL_ARG_NORMAL:
58
case TCG_CALL_ARG_EXTEND_U:
59
case TCG_CALL_ARG_EXTEND_S:
60
- if (REG_P(loc)) {
61
+ if (arg_slot_reg_p(loc->arg_slot)) {
62
tcg_regset_set_reg(*la_temp_pref(ts),
63
tcg_target_call_iarg_regs[loc->arg_slot]);
64
}
65
@@ -XXX,XX +XXX,XX @@ static void load_arg_stk(TCGContext *s, int stk_slot, TCGTemp *ts,
66
static void load_arg_normal(TCGContext *s, const TCGCallArgumentLoc *l,
67
TCGTemp *ts, TCGRegSet *allocated_regs)
53
{
68
{
54
-#ifndef CONFIG_USER_ONLY
69
- if (REG_P(l)) {
55
- CPUClass *cc = CPU_GET_CLASS(cpu);
70
+ if (arg_slot_reg_p(l->arg_slot)) {
56
-#endif
71
TCGReg reg = tcg_target_call_iarg_regs[l->arg_slot];
57
+ /* cache the cpu class for the hotpath */
72
load_arg_reg(s, reg, ts, *allocated_regs);
58
+ cpu->cc = CPU_GET_CLASS(cpu);
73
tcg_regset_set_reg(*allocated_regs, reg);
59
60
cpu_list_add(cpu);
61
if (!accel_cpu_realizefn(cpu, errp)) {
62
@@ -XXX,XX +XXX,XX @@ void cpu_exec_realizefn(CPUState *cpu, Error **errp)
63
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
64
vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
65
}
66
- if (cc->sysemu_ops->legacy_vmsd != NULL) {
67
- vmstate_register(NULL, cpu->cpu_index, cc->sysemu_ops->legacy_vmsd, cpu);
68
+ if (cpu->cc->sysemu_ops->legacy_vmsd != NULL) {
69
+ vmstate_register(NULL, cpu->cpu_index, cpu->cc->sysemu_ops->legacy_vmsd, cpu);
70
}
71
#endif /* CONFIG_USER_ONLY */
72
}
73
--
74
--
74
2.34.1
75
2.34.1
75
76
76
77
diff view generated by jsdifflib
1
Populate this new method for all targets. Always match
1
Unify all computation of argument stack offset in one function.
2
the result that would be given by cpu_get_tb_cpu_state,
2
This requires that we adjust ref_slot to be in the same units,
3
as we will want these values to correspond in the logs.
3
by adding max_reg_slots during init_call_layout.
4
4
5
Reviewed-by: Taylor Simpson <tsimpson@quicinc.com>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> (target/sparc)
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
7
---
10
Cc: Eduardo Habkost <eduardo@habkost.net> (supporter:Machine core)
8
tcg/tcg.c | 29 +++++++++++++++++------------
11
Cc: Marcel Apfelbaum <marcel.apfelbaum@gmail.com> (supporter:Machine core)
9
1 file changed, 17 insertions(+), 12 deletions(-)
12
Cc: "Philippe Mathieu-Daudé" <f4bug@amsat.org> (reviewer:Machine core)
13
Cc: Yanan Wang <wangyanan55@huawei.com> (reviewer:Machine core)
14
Cc: Michael Rolnik <mrolnik@gmail.com> (maintainer:AVR TCG CPUs)
15
Cc: "Edgar E. Iglesias" <edgar.iglesias@gmail.com> (maintainer:CRIS TCG CPUs)
16
Cc: Taylor Simpson <tsimpson@quicinc.com> (supporter:Hexagon TCG CPUs)
17
Cc: Song Gao <gaosong@loongson.cn> (maintainer:LoongArch TCG CPUs)
18
Cc: Xiaojuan Yang <yangxiaojuan@loongson.cn> (maintainer:LoongArch TCG CPUs)
19
Cc: Laurent Vivier <laurent@vivier.eu> (maintainer:M68K TCG CPUs)
20
Cc: Jiaxun Yang <jiaxun.yang@flygoat.com> (reviewer:MIPS TCG CPUs)
21
Cc: Aleksandar Rikalo <aleksandar.rikalo@syrmia.com> (reviewer:MIPS TCG CPUs)
22
Cc: Chris Wulff <crwulff@gmail.com> (maintainer:NiosII TCG CPUs)
23
Cc: Marek Vasut <marex@denx.de> (maintainer:NiosII TCG CPUs)
24
Cc: Stafford Horne <shorne@gmail.com> (odd fixer:OpenRISC TCG CPUs)
25
Cc: Yoshinori Sato <ysato@users.sourceforge.jp> (reviewer:RENESAS RX CPUs)
26
Cc: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> (maintainer:SPARC TCG CPUs)
27
Cc: Bastian Koppelmann <kbastian@mail.uni-paderborn.de> (maintainer:TriCore TCG CPUs)
28
Cc: Max Filippov <jcmvbkbc@gmail.com> (maintainer:Xtensa TCG CPUs)
29
Cc: qemu-arm@nongnu.org (open list:ARM TCG CPUs)
30
Cc: qemu-ppc@nongnu.org (open list:PowerPC TCG CPUs)
31
Cc: qemu-riscv@nongnu.org (open list:RISC-V TCG CPUs)
32
Cc: qemu-s390x@nongnu.org (open list:S390 TCG CPUs)
33
---
34
include/hw/core/cpu.h | 3 +++
35
target/alpha/cpu.c | 9 +++++++++
36
target/arm/cpu.c | 13 +++++++++++++
37
target/avr/cpu.c | 8 ++++++++
38
target/cris/cpu.c | 8 ++++++++
39
target/hexagon/cpu.c | 8 ++++++++
40
target/hppa/cpu.c | 8 ++++++++
41
target/i386/cpu.c | 9 +++++++++
42
target/loongarch/cpu.c | 9 +++++++++
43
target/m68k/cpu.c | 8 ++++++++
44
target/microblaze/cpu.c | 8 ++++++++
45
target/mips/cpu.c | 8 ++++++++
46
target/nios2/cpu.c | 9 +++++++++
47
target/openrisc/cpu.c | 8 ++++++++
48
target/ppc/cpu_init.c | 8 ++++++++
49
target/riscv/cpu.c | 13 +++++++++++++
50
target/rx/cpu.c | 8 ++++++++
51
target/s390x/cpu.c | 8 ++++++++
52
target/sh4/cpu.c | 8 ++++++++
53
target/sparc/cpu.c | 8 ++++++++
54
target/tricore/cpu.c | 9 +++++++++
55
target/xtensa/cpu.c | 8 ++++++++
56
22 files changed, 186 insertions(+)
57
10
58
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
11
diff --git a/tcg/tcg.c b/tcg/tcg.c
59
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
60
--- a/include/hw/core/cpu.h
13
--- a/tcg/tcg.c
61
+++ b/include/hw/core/cpu.h
14
+++ b/tcg/tcg.c
62
@@ -XXX,XX +XXX,XX @@ struct SysemuCPUOps;
15
@@ -XXX,XX +XXX,XX @@ static inline bool arg_slot_reg_p(unsigned arg_slot)
63
* If the target behaviour here is anything other than "set
16
return arg_slot < nreg;
64
* the PC register to the value passed in" then the target must
65
* also implement the synchronize_from_tb hook.
66
+ * @get_pc: Callback for getting the Program Counter register.
67
+ * As above, with the semantics of the target architecture.
68
* @gdb_read_register: Callback for letting GDB read a register.
69
* @gdb_write_register: Callback for letting GDB write a register.
70
* @gdb_adjust_breakpoint: Callback for adjusting the address of a
71
@@ -XXX,XX +XXX,XX @@ struct CPUClass {
72
void (*dump_state)(CPUState *cpu, FILE *, int flags);
73
int64_t (*get_arch_id)(CPUState *cpu);
74
void (*set_pc)(CPUState *cpu, vaddr value);
75
+ vaddr (*get_pc)(CPUState *cpu);
76
int (*gdb_read_register)(CPUState *cpu, GByteArray *buf, int reg);
77
int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
78
vaddr (*gdb_adjust_breakpoint)(CPUState *cpu, vaddr addr);
79
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
80
index XXXXXXX..XXXXXXX 100644
81
--- a/target/alpha/cpu.c
82
+++ b/target/alpha/cpu.c
83
@@ -XXX,XX +XXX,XX @@ static void alpha_cpu_set_pc(CPUState *cs, vaddr value)
84
cpu->env.pc = value;
85
}
17
}
86
18
87
+static vaddr alpha_cpu_get_pc(CPUState *cs)
19
+static inline int arg_slot_stk_ofs(unsigned arg_slot)
88
+{
20
+{
89
+ AlphaCPU *cpu = ALPHA_CPU(cs);
21
+ unsigned max = TCG_STATIC_CALL_ARGS_SIZE / sizeof(tcg_target_long);
22
+ unsigned stk_slot = arg_slot - ARRAY_SIZE(tcg_target_call_iarg_regs);
90
+
23
+
91
+ return cpu->env.pc;
24
+ tcg_debug_assert(stk_slot < max);
25
+ return TCG_TARGET_CALL_STACK_OFFSET + stk_slot * sizeof(tcg_target_long);
92
+}
26
+}
93
+
27
+
94
+
28
typedef struct TCGCumulativeArgs {
95
static bool alpha_cpu_has_work(CPUState *cs)
29
int arg_idx; /* tcg_gen_callN args[] */
96
{
30
int info_in_idx; /* TCGHelperInfo in[] */
97
/* Here we are checking to see if the CPU should wake up from HALT.
31
@@ -XXX,XX +XXX,XX @@ static void init_call_layout(TCGHelperInfo *info)
98
@@ -XXX,XX +XXX,XX @@ static void alpha_cpu_class_init(ObjectClass *oc, void *data)
32
}
99
cc->has_work = alpha_cpu_has_work;
33
}
100
cc->dump_state = alpha_cpu_dump_state;
34
assert(ref_base + cum.ref_slot <= max_stk_slots);
101
cc->set_pc = alpha_cpu_set_pc;
35
+ ref_base += max_reg_slots;
102
+ cc->get_pc = alpha_cpu_get_pc;
36
103
cc->gdb_read_register = alpha_cpu_gdb_read_register;
37
if (ref_base != 0) {
104
cc->gdb_write_register = alpha_cpu_gdb_write_register;
38
for (int i = cum.info_in_idx - 1; i >= 0; --i) {
105
#ifndef CONFIG_USER_ONLY
39
@@ -XXX,XX +XXX,XX @@ static void load_arg_reg(TCGContext *s, TCGReg reg, TCGTemp *ts,
106
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
107
index XXXXXXX..XXXXXXX 100644
108
--- a/target/arm/cpu.c
109
+++ b/target/arm/cpu.c
110
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_set_pc(CPUState *cs, vaddr value)
111
}
40
}
112
}
41
}
113
42
114
+static vaddr arm_cpu_get_pc(CPUState *cs)
43
-static void load_arg_stk(TCGContext *s, int stk_slot, TCGTemp *ts,
115
+{
44
+static void load_arg_stk(TCGContext *s, unsigned arg_slot, TCGTemp *ts,
116
+ ARMCPU *cpu = ARM_CPU(cs);
45
TCGRegSet allocated_regs)
117
+ CPUARMState *env = &cpu->env;
46
{
118
+
47
/*
119
+ if (is_a64(env)) {
48
@@ -XXX,XX +XXX,XX @@ static void load_arg_stk(TCGContext *s, int stk_slot, TCGTemp *ts,
120
+ return env->pc;
49
*/
121
+ } else {
50
temp_load(s, ts, tcg_target_available_regs[ts->type], allocated_regs, 0);
122
+ return env->regs[15];
51
tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK,
123
+ }
52
- TCG_TARGET_CALL_STACK_OFFSET +
124
+}
53
- stk_slot * sizeof(tcg_target_long));
125
+
54
+ arg_slot_stk_ofs(arg_slot));
126
#ifdef CONFIG_TCG
127
void arm_cpu_synchronize_from_tb(CPUState *cs,
128
const TranslationBlock *tb)
129
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
130
cc->has_work = arm_cpu_has_work;
131
cc->dump_state = arm_cpu_dump_state;
132
cc->set_pc = arm_cpu_set_pc;
133
+ cc->get_pc = arm_cpu_get_pc;
134
cc->gdb_read_register = arm_cpu_gdb_read_register;
135
cc->gdb_write_register = arm_cpu_gdb_write_register;
136
#ifndef CONFIG_USER_ONLY
137
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
138
index XXXXXXX..XXXXXXX 100644
139
--- a/target/avr/cpu.c
140
+++ b/target/avr/cpu.c
141
@@ -XXX,XX +XXX,XX @@ static void avr_cpu_set_pc(CPUState *cs, vaddr value)
142
cpu->env.pc_w = value / 2; /* internally PC points to words */
143
}
55
}
144
56
145
+static vaddr avr_cpu_get_pc(CPUState *cs)
57
static void load_arg_normal(TCGContext *s, const TCGCallArgumentLoc *l,
146
+{
58
@@ -XXX,XX +XXX,XX @@ static void load_arg_normal(TCGContext *s, const TCGCallArgumentLoc *l,
147
+ AVRCPU *cpu = AVR_CPU(cs);
59
load_arg_reg(s, reg, ts, *allocated_regs);
148
+
60
tcg_regset_set_reg(*allocated_regs, reg);
149
+ return cpu->env.pc_w * 2;
61
} else {
150
+}
62
- load_arg_stk(s, l->arg_slot - ARRAY_SIZE(tcg_target_call_iarg_regs),
151
+
63
- ts, *allocated_regs);
152
static bool avr_cpu_has_work(CPUState *cs)
64
+ load_arg_stk(s, l->arg_slot, ts, *allocated_regs);
153
{
154
AVRCPU *cpu = AVR_CPU(cs);
155
@@ -XXX,XX +XXX,XX @@ static void avr_cpu_class_init(ObjectClass *oc, void *data)
156
cc->has_work = avr_cpu_has_work;
157
cc->dump_state = avr_cpu_dump_state;
158
cc->set_pc = avr_cpu_set_pc;
159
+ cc->get_pc = avr_cpu_get_pc;
160
dc->vmsd = &vms_avr_cpu;
161
cc->sysemu_ops = &avr_sysemu_ops;
162
cc->disas_set_info = avr_cpu_disas_set_info;
163
diff --git a/target/cris/cpu.c b/target/cris/cpu.c
164
index XXXXXXX..XXXXXXX 100644
165
--- a/target/cris/cpu.c
166
+++ b/target/cris/cpu.c
167
@@ -XXX,XX +XXX,XX @@ static void cris_cpu_set_pc(CPUState *cs, vaddr value)
168
cpu->env.pc = value;
169
}
170
171
+static vaddr cris_cpu_get_pc(CPUState *cs)
172
+{
173
+ CRISCPU *cpu = CRIS_CPU(cs);
174
+
175
+ return cpu->env.pc;
176
+}
177
+
178
static bool cris_cpu_has_work(CPUState *cs)
179
{
180
return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI);
181
@@ -XXX,XX +XXX,XX @@ static void cris_cpu_class_init(ObjectClass *oc, void *data)
182
cc->has_work = cris_cpu_has_work;
183
cc->dump_state = cris_cpu_dump_state;
184
cc->set_pc = cris_cpu_set_pc;
185
+ cc->get_pc = cris_cpu_get_pc;
186
cc->gdb_read_register = cris_cpu_gdb_read_register;
187
cc->gdb_write_register = cris_cpu_gdb_write_register;
188
#ifndef CONFIG_USER_ONLY
189
diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c
190
index XXXXXXX..XXXXXXX 100644
191
--- a/target/hexagon/cpu.c
192
+++ b/target/hexagon/cpu.c
193
@@ -XXX,XX +XXX,XX @@ static void hexagon_cpu_set_pc(CPUState *cs, vaddr value)
194
env->gpr[HEX_REG_PC] = value;
195
}
196
197
+static vaddr hexagon_cpu_get_pc(CPUState *cs)
198
+{
199
+ HexagonCPU *cpu = HEXAGON_CPU(cs);
200
+ CPUHexagonState *env = &cpu->env;
201
+ return env->gpr[HEX_REG_PC];
202
+}
203
+
204
static void hexagon_cpu_synchronize_from_tb(CPUState *cs,
205
const TranslationBlock *tb)
206
{
207
@@ -XXX,XX +XXX,XX @@ static void hexagon_cpu_class_init(ObjectClass *c, void *data)
208
cc->has_work = hexagon_cpu_has_work;
209
cc->dump_state = hexagon_dump_state;
210
cc->set_pc = hexagon_cpu_set_pc;
211
+ cc->get_pc = hexagon_cpu_get_pc;
212
cc->gdb_read_register = hexagon_gdb_read_register;
213
cc->gdb_write_register = hexagon_gdb_write_register;
214
cc->gdb_num_core_regs = TOTAL_PER_THREAD_REGS + NUM_VREGS + NUM_QREGS;
215
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
216
index XXXXXXX..XXXXXXX 100644
217
--- a/target/hppa/cpu.c
218
+++ b/target/hppa/cpu.c
219
@@ -XXX,XX +XXX,XX @@ static void hppa_cpu_set_pc(CPUState *cs, vaddr value)
220
cpu->env.iaoq_b = value + 4;
221
}
222
223
+static vaddr hppa_cpu_get_pc(CPUState *cs)
224
+{
225
+ HPPACPU *cpu = HPPA_CPU(cs);
226
+
227
+ return cpu->env.iaoq_f;
228
+}
229
+
230
static void hppa_cpu_synchronize_from_tb(CPUState *cs,
231
const TranslationBlock *tb)
232
{
233
@@ -XXX,XX +XXX,XX @@ static void hppa_cpu_class_init(ObjectClass *oc, void *data)
234
cc->has_work = hppa_cpu_has_work;
235
cc->dump_state = hppa_cpu_dump_state;
236
cc->set_pc = hppa_cpu_set_pc;
237
+ cc->get_pc = hppa_cpu_get_pc;
238
cc->gdb_read_register = hppa_cpu_gdb_read_register;
239
cc->gdb_write_register = hppa_cpu_gdb_write_register;
240
#ifndef CONFIG_USER_ONLY
241
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
242
index XXXXXXX..XXXXXXX 100644
243
--- a/target/i386/cpu.c
244
+++ b/target/i386/cpu.c
245
@@ -XXX,XX +XXX,XX @@ static void x86_cpu_set_pc(CPUState *cs, vaddr value)
246
cpu->env.eip = value;
247
}
248
249
+static vaddr x86_cpu_get_pc(CPUState *cs)
250
+{
251
+ X86CPU *cpu = X86_CPU(cs);
252
+
253
+ /* Match cpu_get_tb_cpu_state. */
254
+ return cpu->env.eip + cpu->env.segs[R_CS].base;
255
+}
256
+
257
int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request)
258
{
259
X86CPU *cpu = X86_CPU(cs);
260
@@ -XXX,XX +XXX,XX @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
261
cc->has_work = x86_cpu_has_work;
262
cc->dump_state = x86_cpu_dump_state;
263
cc->set_pc = x86_cpu_set_pc;
264
+ cc->get_pc = x86_cpu_get_pc;
265
cc->gdb_read_register = x86_cpu_gdb_read_register;
266
cc->gdb_write_register = x86_cpu_gdb_write_register;
267
cc->get_arch_id = x86_cpu_get_arch_id;
268
diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c
269
index XXXXXXX..XXXXXXX 100644
270
--- a/target/loongarch/cpu.c
271
+++ b/target/loongarch/cpu.c
272
@@ -XXX,XX +XXX,XX @@ static void loongarch_cpu_set_pc(CPUState *cs, vaddr value)
273
env->pc = value;
274
}
275
276
+static vaddr loongarch_cpu_get_pc(CPUState *cs)
277
+{
278
+ LoongArchCPU *cpu = LOONGARCH_CPU(cs);
279
+ CPULoongArchState *env = &cpu->env;
280
+
281
+ return env->pc;
282
+}
283
+
284
#ifndef CONFIG_USER_ONLY
285
#include "hw/loongarch/virt.h"
286
287
@@ -XXX,XX +XXX,XX @@ static void loongarch_cpu_class_init(ObjectClass *c, void *data)
288
cc->has_work = loongarch_cpu_has_work;
289
cc->dump_state = loongarch_cpu_dump_state;
290
cc->set_pc = loongarch_cpu_set_pc;
291
+ cc->get_pc = loongarch_cpu_get_pc;
292
#ifndef CONFIG_USER_ONLY
293
dc->vmsd = &vmstate_loongarch_cpu;
294
cc->sysemu_ops = &loongarch_sysemu_ops;
295
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
296
index XXXXXXX..XXXXXXX 100644
297
--- a/target/m68k/cpu.c
298
+++ b/target/m68k/cpu.c
299
@@ -XXX,XX +XXX,XX @@ static void m68k_cpu_set_pc(CPUState *cs, vaddr value)
300
cpu->env.pc = value;
301
}
302
303
+static vaddr m68k_cpu_get_pc(CPUState *cs)
304
+{
305
+ M68kCPU *cpu = M68K_CPU(cs);
306
+
307
+ return cpu->env.pc;
308
+}
309
+
310
static bool m68k_cpu_has_work(CPUState *cs)
311
{
312
return cs->interrupt_request & CPU_INTERRUPT_HARD;
313
@@ -XXX,XX +XXX,XX @@ static void m68k_cpu_class_init(ObjectClass *c, void *data)
314
cc->has_work = m68k_cpu_has_work;
315
cc->dump_state = m68k_cpu_dump_state;
316
cc->set_pc = m68k_cpu_set_pc;
317
+ cc->get_pc = m68k_cpu_get_pc;
318
cc->gdb_read_register = m68k_cpu_gdb_read_register;
319
cc->gdb_write_register = m68k_cpu_gdb_write_register;
320
#if defined(CONFIG_SOFTMMU)
321
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
322
index XXXXXXX..XXXXXXX 100644
323
--- a/target/microblaze/cpu.c
324
+++ b/target/microblaze/cpu.c
325
@@ -XXX,XX +XXX,XX @@ static void mb_cpu_set_pc(CPUState *cs, vaddr value)
326
cpu->env.iflags = 0;
327
}
328
329
+static vaddr mb_cpu_get_pc(CPUState *cs)
330
+{
331
+ MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
332
+
333
+ return cpu->env.pc;
334
+}
335
+
336
static void mb_cpu_synchronize_from_tb(CPUState *cs,
337
const TranslationBlock *tb)
338
{
339
@@ -XXX,XX +XXX,XX @@ static void mb_cpu_class_init(ObjectClass *oc, void *data)
340
341
cc->dump_state = mb_cpu_dump_state;
342
cc->set_pc = mb_cpu_set_pc;
343
+ cc->get_pc = mb_cpu_get_pc;
344
cc->gdb_read_register = mb_cpu_gdb_read_register;
345
cc->gdb_write_register = mb_cpu_gdb_write_register;
346
347
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
348
index XXXXXXX..XXXXXXX 100644
349
--- a/target/mips/cpu.c
350
+++ b/target/mips/cpu.c
351
@@ -XXX,XX +XXX,XX @@ static void mips_cpu_set_pc(CPUState *cs, vaddr value)
352
mips_env_set_pc(&cpu->env, value);
353
}
354
355
+static vaddr mips_cpu_get_pc(CPUState *cs)
356
+{
357
+ MIPSCPU *cpu = MIPS_CPU(cs);
358
+
359
+ return cpu->env.active_tc.PC;
360
+}
361
+
362
static bool mips_cpu_has_work(CPUState *cs)
363
{
364
MIPSCPU *cpu = MIPS_CPU(cs);
365
@@ -XXX,XX +XXX,XX @@ static void mips_cpu_class_init(ObjectClass *c, void *data)
366
cc->has_work = mips_cpu_has_work;
367
cc->dump_state = mips_cpu_dump_state;
368
cc->set_pc = mips_cpu_set_pc;
369
+ cc->get_pc = mips_cpu_get_pc;
370
cc->gdb_read_register = mips_cpu_gdb_read_register;
371
cc->gdb_write_register = mips_cpu_gdb_write_register;
372
#ifndef CONFIG_USER_ONLY
373
diff --git a/target/nios2/cpu.c b/target/nios2/cpu.c
374
index XXXXXXX..XXXXXXX 100644
375
--- a/target/nios2/cpu.c
376
+++ b/target/nios2/cpu.c
377
@@ -XXX,XX +XXX,XX @@ static void nios2_cpu_set_pc(CPUState *cs, vaddr value)
378
env->pc = value;
379
}
380
381
+static vaddr nios2_cpu_get_pc(CPUState *cs)
382
+{
383
+ Nios2CPU *cpu = NIOS2_CPU(cs);
384
+ CPUNios2State *env = &cpu->env;
385
+
386
+ return env->pc;
387
+}
388
+
389
static bool nios2_cpu_has_work(CPUState *cs)
390
{
391
return cs->interrupt_request & CPU_INTERRUPT_HARD;
392
@@ -XXX,XX +XXX,XX @@ static void nios2_cpu_class_init(ObjectClass *oc, void *data)
393
cc->has_work = nios2_cpu_has_work;
394
cc->dump_state = nios2_cpu_dump_state;
395
cc->set_pc = nios2_cpu_set_pc;
396
+ cc->get_pc = nios2_cpu_get_pc;
397
cc->disas_set_info = nios2_cpu_disas_set_info;
398
#ifndef CONFIG_USER_ONLY
399
cc->sysemu_ops = &nios2_sysemu_ops;
400
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
401
index XXXXXXX..XXXXXXX 100644
402
--- a/target/openrisc/cpu.c
403
+++ b/target/openrisc/cpu.c
404
@@ -XXX,XX +XXX,XX @@ static void openrisc_cpu_set_pc(CPUState *cs, vaddr value)
405
cpu->env.dflag = 0;
406
}
407
408
+static vaddr openrisc_cpu_get_pc(CPUState *cs)
409
+{
410
+ OpenRISCCPU *cpu = OPENRISC_CPU(cs);
411
+
412
+ return cpu->env.pc;
413
+}
414
+
415
static void openrisc_cpu_synchronize_from_tb(CPUState *cs,
416
const TranslationBlock *tb)
417
{
418
@@ -XXX,XX +XXX,XX @@ static void openrisc_cpu_class_init(ObjectClass *oc, void *data)
419
cc->has_work = openrisc_cpu_has_work;
420
cc->dump_state = openrisc_cpu_dump_state;
421
cc->set_pc = openrisc_cpu_set_pc;
422
+ cc->get_pc = openrisc_cpu_get_pc;
423
cc->gdb_read_register = openrisc_cpu_gdb_read_register;
424
cc->gdb_write_register = openrisc_cpu_gdb_write_register;
425
#ifndef CONFIG_USER_ONLY
426
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
427
index XXXXXXX..XXXXXXX 100644
428
--- a/target/ppc/cpu_init.c
429
+++ b/target/ppc/cpu_init.c
430
@@ -XXX,XX +XXX,XX @@ static void ppc_cpu_set_pc(CPUState *cs, vaddr value)
431
cpu->env.nip = value;
432
}
433
434
+static vaddr ppc_cpu_get_pc(CPUState *cs)
435
+{
436
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
437
+
438
+ return cpu->env.nip;
439
+}
440
+
441
static bool ppc_cpu_has_work(CPUState *cs)
442
{
443
PowerPCCPU *cpu = POWERPC_CPU(cs);
444
@@ -XXX,XX +XXX,XX @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
445
cc->has_work = ppc_cpu_has_work;
446
cc->dump_state = ppc_cpu_dump_state;
447
cc->set_pc = ppc_cpu_set_pc;
448
+ cc->get_pc = ppc_cpu_get_pc;
449
cc->gdb_read_register = ppc_cpu_gdb_read_register;
450
cc->gdb_write_register = ppc_cpu_gdb_write_register;
451
#ifndef CONFIG_USER_ONLY
452
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
453
index XXXXXXX..XXXXXXX 100644
454
--- a/target/riscv/cpu.c
455
+++ b/target/riscv/cpu.c
456
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
457
}
65
}
458
}
66
}
459
67
460
+static vaddr riscv_cpu_get_pc(CPUState *cs)
68
-static void load_arg_ref(TCGContext *s, int arg_slot, TCGReg ref_base,
461
+{
69
+static void load_arg_ref(TCGContext *s, unsigned arg_slot, TCGReg ref_base,
462
+ RISCVCPU *cpu = RISCV_CPU(cs);
70
intptr_t ref_off, TCGRegSet *allocated_regs)
463
+ CPURISCVState *env = &cpu->env;
464
+
465
+ /* Match cpu_get_tb_cpu_state. */
466
+ if (env->xl == MXL_RV32) {
467
+ return env->pc & UINT32_MAX;
468
+ }
469
+ return env->pc;
470
+}
471
+
472
static void riscv_cpu_synchronize_from_tb(CPUState *cs,
473
const TranslationBlock *tb)
474
{
71
{
475
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_class_init(ObjectClass *c, void *data)
72
TCGReg reg;
476
cc->has_work = riscv_cpu_has_work;
73
- int stk_slot = arg_slot - ARRAY_SIZE(tcg_target_call_iarg_regs);
477
cc->dump_state = riscv_cpu_dump_state;
74
478
cc->set_pc = riscv_cpu_set_pc;
75
- if (stk_slot < 0) {
479
+ cc->get_pc = riscv_cpu_get_pc;
76
+ if (arg_slot_reg_p(arg_slot)) {
480
cc->gdb_read_register = riscv_cpu_gdb_read_register;
77
reg = tcg_target_call_iarg_regs[arg_slot];
481
cc->gdb_write_register = riscv_cpu_gdb_write_register;
78
tcg_reg_free(s, reg, *allocated_regs);
482
cc->gdb_num_core_regs = 33;
79
tcg_out_addi_ptr(s, reg, ref_base, ref_off);
483
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
80
@@ -XXX,XX +XXX,XX @@ static void load_arg_ref(TCGContext *s, int arg_slot, TCGReg ref_base,
484
index XXXXXXX..XXXXXXX 100644
81
*allocated_regs, 0, false);
485
--- a/target/rx/cpu.c
82
tcg_out_addi_ptr(s, reg, ref_base, ref_off);
486
+++ b/target/rx/cpu.c
83
tcg_out_st(s, TCG_TYPE_PTR, reg, TCG_REG_CALL_STACK,
487
@@ -XXX,XX +XXX,XX @@ static void rx_cpu_set_pc(CPUState *cs, vaddr value)
84
- TCG_TARGET_CALL_STACK_OFFSET
488
cpu->env.pc = value;
85
- + stk_slot * sizeof(tcg_target_long));
86
+ arg_slot_stk_ofs(arg_slot));
87
}
489
}
88
}
490
89
491
+static vaddr rx_cpu_get_pc(CPUState *cs)
90
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
492
+{
91
case TCG_CALL_ARG_BY_REF:
493
+ RXCPU *cpu = RX_CPU(cs);
92
load_arg_stk(s, loc->ref_slot, ts, allocated_regs);
494
+
93
load_arg_ref(s, loc->arg_slot, TCG_REG_CALL_STACK,
495
+ return cpu->env.pc;
94
- TCG_TARGET_CALL_STACK_OFFSET
496
+}
95
- + loc->ref_slot * sizeof(tcg_target_long),
497
+
96
+ arg_slot_stk_ofs(loc->ref_slot),
498
static void rx_cpu_synchronize_from_tb(CPUState *cs,
97
&allocated_regs);
499
const TranslationBlock *tb)
98
break;
500
{
99
case TCG_CALL_ARG_BY_REF_N:
501
@@ -XXX,XX +XXX,XX @@ static void rx_cpu_class_init(ObjectClass *klass, void *data)
502
cc->has_work = rx_cpu_has_work;
503
cc->dump_state = rx_cpu_dump_state;
504
cc->set_pc = rx_cpu_set_pc;
505
+ cc->get_pc = rx_cpu_get_pc;
506
507
#ifndef CONFIG_USER_ONLY
508
cc->sysemu_ops = &rx_sysemu_ops;
509
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
510
index XXXXXXX..XXXXXXX 100644
511
--- a/target/s390x/cpu.c
512
+++ b/target/s390x/cpu.c
513
@@ -XXX,XX +XXX,XX @@ static void s390_cpu_set_pc(CPUState *cs, vaddr value)
514
cpu->env.psw.addr = value;
515
}
516
517
+static vaddr s390_cpu_get_pc(CPUState *cs)
518
+{
519
+ S390CPU *cpu = S390_CPU(cs);
520
+
521
+ return cpu->env.psw.addr;
522
+}
523
+
524
static bool s390_cpu_has_work(CPUState *cs)
525
{
526
S390CPU *cpu = S390_CPU(cs);
527
@@ -XXX,XX +XXX,XX @@ static void s390_cpu_class_init(ObjectClass *oc, void *data)
528
cc->has_work = s390_cpu_has_work;
529
cc->dump_state = s390_cpu_dump_state;
530
cc->set_pc = s390_cpu_set_pc;
531
+ cc->get_pc = s390_cpu_get_pc;
532
cc->gdb_read_register = s390_cpu_gdb_read_register;
533
cc->gdb_write_register = s390_cpu_gdb_write_register;
534
#ifndef CONFIG_USER_ONLY
535
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
536
index XXXXXXX..XXXXXXX 100644
537
--- a/target/sh4/cpu.c
538
+++ b/target/sh4/cpu.c
539
@@ -XXX,XX +XXX,XX @@ static void superh_cpu_set_pc(CPUState *cs, vaddr value)
540
cpu->env.pc = value;
541
}
542
543
+static vaddr superh_cpu_get_pc(CPUState *cs)
544
+{
545
+ SuperHCPU *cpu = SUPERH_CPU(cs);
546
+
547
+ return cpu->env.pc;
548
+}
549
+
550
static void superh_cpu_synchronize_from_tb(CPUState *cs,
551
const TranslationBlock *tb)
552
{
553
@@ -XXX,XX +XXX,XX @@ static void superh_cpu_class_init(ObjectClass *oc, void *data)
554
cc->has_work = superh_cpu_has_work;
555
cc->dump_state = superh_cpu_dump_state;
556
cc->set_pc = superh_cpu_set_pc;
557
+ cc->get_pc = superh_cpu_get_pc;
558
cc->gdb_read_register = superh_cpu_gdb_read_register;
559
cc->gdb_write_register = superh_cpu_gdb_write_register;
560
#ifndef CONFIG_USER_ONLY
561
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
562
index XXXXXXX..XXXXXXX 100644
563
--- a/target/sparc/cpu.c
564
+++ b/target/sparc/cpu.c
565
@@ -XXX,XX +XXX,XX @@ static void sparc_cpu_set_pc(CPUState *cs, vaddr value)
566
cpu->env.npc = value + 4;
567
}
568
569
+static vaddr sparc_cpu_get_pc(CPUState *cs)
570
+{
571
+ SPARCCPU *cpu = SPARC_CPU(cs);
572
+
573
+ return cpu->env.pc;
574
+}
575
+
576
static void sparc_cpu_synchronize_from_tb(CPUState *cs,
577
const TranslationBlock *tb)
578
{
579
@@ -XXX,XX +XXX,XX @@ static void sparc_cpu_class_init(ObjectClass *oc, void *data)
580
cc->memory_rw_debug = sparc_cpu_memory_rw_debug;
581
#endif
582
cc->set_pc = sparc_cpu_set_pc;
583
+ cc->get_pc = sparc_cpu_get_pc;
584
cc->gdb_read_register = sparc_cpu_gdb_read_register;
585
cc->gdb_write_register = sparc_cpu_gdb_write_register;
586
#ifndef CONFIG_USER_ONLY
587
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
588
index XXXXXXX..XXXXXXX 100644
589
--- a/target/tricore/cpu.c
590
+++ b/target/tricore/cpu.c
591
@@ -XXX,XX +XXX,XX @@ static void tricore_cpu_set_pc(CPUState *cs, vaddr value)
592
env->PC = value & ~(target_ulong)1;
593
}
594
595
+static vaddr tricore_cpu_get_pc(CPUState *cs)
596
+{
597
+ TriCoreCPU *cpu = TRICORE_CPU(cs);
598
+ CPUTriCoreState *env = &cpu->env;
599
+
600
+ return env->PC;
601
+}
602
+
603
static void tricore_cpu_synchronize_from_tb(CPUState *cs,
604
const TranslationBlock *tb)
605
{
606
@@ -XXX,XX +XXX,XX @@ static void tricore_cpu_class_init(ObjectClass *c, void *data)
607
608
cc->dump_state = tricore_cpu_dump_state;
609
cc->set_pc = tricore_cpu_set_pc;
610
+ cc->get_pc = tricore_cpu_get_pc;
611
cc->sysemu_ops = &tricore_sysemu_ops;
612
cc->tcg_ops = &tricore_tcg_ops;
613
}
614
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
615
index XXXXXXX..XXXXXXX 100644
616
--- a/target/xtensa/cpu.c
617
+++ b/target/xtensa/cpu.c
618
@@ -XXX,XX +XXX,XX @@ static void xtensa_cpu_set_pc(CPUState *cs, vaddr value)
619
cpu->env.pc = value;
620
}
621
622
+static vaddr xtensa_cpu_get_pc(CPUState *cs)
623
+{
624
+ XtensaCPU *cpu = XTENSA_CPU(cs);
625
+
626
+ return cpu->env.pc;
627
+}
628
+
629
static bool xtensa_cpu_has_work(CPUState *cs)
630
{
631
#ifndef CONFIG_USER_ONLY
632
@@ -XXX,XX +XXX,XX @@ static void xtensa_cpu_class_init(ObjectClass *oc, void *data)
633
cc->has_work = xtensa_cpu_has_work;
634
cc->dump_state = xtensa_cpu_dump_state;
635
cc->set_pc = xtensa_cpu_set_pc;
636
+ cc->get_pc = xtensa_cpu_get_pc;
637
cc->gdb_read_register = xtensa_cpu_gdb_read_register;
638
cc->gdb_write_register = xtensa_cpu_gdb_write_register;
639
cc->gdb_stop_before_watchpoint = true;
640
--
100
--
641
2.34.1
101
2.34.1
642
102
643
103
diff view generated by jsdifflib
1
Add an interface to return the CPUTLBEntryFull struct
1
While the old type was correct in the ideal sense, some ABIs require
2
that goes with the lookup. The result is not intended
2
the argument to be zero-extended. Using uint32_t for all such values
3
to be valid across multiple lookups, so the user must
3
is a decent compromise.
4
use the results immediately.
5
4
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
7
---
11
include/exec/exec-all.h | 15 +++++++++++++
8
include/tcg/tcg-ldst.h | 10 +++++++---
12
include/qemu/typedefs.h | 1 +
9
accel/tcg/cputlb.c | 6 +++---
13
accel/tcg/cputlb.c | 47 +++++++++++++++++++++++++----------------
10
2 files changed, 10 insertions(+), 6 deletions(-)
14
3 files changed, 45 insertions(+), 18 deletions(-)
15
11
16
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
12
diff --git a/include/tcg/tcg-ldst.h b/include/tcg/tcg-ldst.h
17
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
18
--- a/include/exec/exec-all.h
14
--- a/include/tcg/tcg-ldst.h
19
+++ b/include/exec/exec-all.h
15
+++ b/include/tcg/tcg-ldst.h
20
@@ -XXX,XX +XXX,XX @@ int probe_access_flags(CPUArchState *env, target_ulong addr,
16
@@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
21
MMUAccessType access_type, int mmu_idx,
17
tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
22
bool nonfault, void **phost, uintptr_t retaddr);
18
MemOpIdx oi, uintptr_t retaddr);
23
19
24
+#ifndef CONFIG_USER_ONLY
20
-void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
25
+/**
21
+/*
26
+ * probe_access_full:
22
+ * Value extended to at least uint32_t, so that some ABIs do not require
27
+ * Like probe_access_flags, except also return into @pfull.
23
+ * zero-extension from uint8_t or uint16_t.
28
+ *
29
+ * The CPUTLBEntryFull structure returned via @pfull is transient
30
+ * and must be consumed or copied immediately, before any further
31
+ * access or changes to TLB @mmu_idx.
32
+ */
24
+ */
33
+int probe_access_full(CPUArchState *env, target_ulong addr,
25
+void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
34
+ MMUAccessType access_type, int mmu_idx,
26
MemOpIdx oi, uintptr_t retaddr);
35
+ bool nonfault, void **phost,
27
-void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
36
+ CPUTLBEntryFull **pfull, uintptr_t retaddr);
28
+void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
37
+#endif
29
MemOpIdx oi, uintptr_t retaddr);
38
+
30
void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
39
#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
31
MemOpIdx oi, uintptr_t retaddr);
40
32
void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
41
/* Estimated block size for TB allocation. */
33
MemOpIdx oi, uintptr_t retaddr);
42
diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h
34
-void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
43
index XXXXXXX..XXXXXXX 100644
35
+void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
44
--- a/include/qemu/typedefs.h
36
MemOpIdx oi, uintptr_t retaddr);
45
+++ b/include/qemu/typedefs.h
37
void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
46
@@ -XXX,XX +XXX,XX @@ typedef struct ConfidentialGuestSupport ConfidentialGuestSupport;
38
MemOpIdx oi, uintptr_t retaddr);
47
typedef struct CPUAddressSpace CPUAddressSpace;
48
typedef struct CPUArchState CPUArchState;
49
typedef struct CPUState CPUState;
50
+typedef struct CPUTLBEntryFull CPUTLBEntryFull;
51
typedef struct DeviceListener DeviceListener;
52
typedef struct DeviceState DeviceState;
53
typedef struct DirtyBitmapSnapshot DirtyBitmapSnapshot;
54
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
39
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
55
index XXXXXXX..XXXXXXX 100644
40
index XXXXXXX..XXXXXXX 100644
56
--- a/accel/tcg/cputlb.c
41
--- a/accel/tcg/cputlb.c
57
+++ b/accel/tcg/cputlb.c
42
+++ b/accel/tcg/cputlb.c
58
@@ -XXX,XX +XXX,XX @@ static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
43
@@ -XXX,XX +XXX,XX @@ full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
59
static int probe_access_internal(CPUArchState *env, target_ulong addr,
44
store_helper(env, addr, val, oi, retaddr, MO_UB);
60
int fault_size, MMUAccessType access_type,
45
}
61
int mmu_idx, bool nonfault,
46
62
- void **phost, uintptr_t retaddr)
47
-void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
63
+ void **phost, CPUTLBEntryFull **pfull,
48
+void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
64
+ uintptr_t retaddr)
49
MemOpIdx oi, uintptr_t retaddr)
65
{
50
{
66
uintptr_t index = tlb_index(env, mmu_idx, addr);
51
full_stb_mmu(env, addr, val, oi, retaddr);
67
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
52
@@ -XXX,XX +XXX,XX @@ static void full_le_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
68
@@ -XXX,XX +XXX,XX @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
53
store_helper(env, addr, val, oi, retaddr, MO_LEUW);
69
mmu_idx, nonfault, retaddr)) {
70
/* Non-faulting page table read failed. */
71
*phost = NULL;
72
+ *pfull = NULL;
73
return TLB_INVALID_MASK;
74
}
75
76
/* TLB resize via tlb_fill may have moved the entry. */
77
+ index = tlb_index(env, mmu_idx, addr);
78
entry = tlb_entry(env, mmu_idx, addr);
79
80
/*
81
@@ -XXX,XX +XXX,XX @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
82
}
83
flags &= tlb_addr;
84
85
+ *pfull = &env_tlb(env)->d[mmu_idx].fulltlb[index];
86
+
87
/* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
88
if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) {
89
*phost = NULL;
90
@@ -XXX,XX +XXX,XX @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
91
return flags;
92
}
54
}
93
55
94
-int probe_access_flags(CPUArchState *env, target_ulong addr,
56
-void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
95
- MMUAccessType access_type, int mmu_idx,
57
+void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
96
- bool nonfault, void **phost, uintptr_t retaddr)
58
MemOpIdx oi, uintptr_t retaddr)
97
+int probe_access_full(CPUArchState *env, target_ulong addr,
98
+ MMUAccessType access_type, int mmu_idx,
99
+ bool nonfault, void **phost, CPUTLBEntryFull **pfull,
100
+ uintptr_t retaddr)
101
{
59
{
102
- int flags;
60
full_le_stw_mmu(env, addr, val, oi, retaddr);
103
-
61
@@ -XXX,XX +XXX,XX @@ static void full_be_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
104
- flags = probe_access_internal(env, addr, 0, access_type, mmu_idx,
62
store_helper(env, addr, val, oi, retaddr, MO_BEUW);
105
- nonfault, phost, retaddr);
106
+ int flags = probe_access_internal(env, addr, 0, access_type, mmu_idx,
107
+ nonfault, phost, pfull, retaddr);
108
109
/* Handle clean RAM pages. */
110
if (unlikely(flags & TLB_NOTDIRTY)) {
111
- uintptr_t index = tlb_index(env, mmu_idx, addr);
112
- CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
113
-
114
- notdirty_write(env_cpu(env), addr, 1, full, retaddr);
115
+ notdirty_write(env_cpu(env), addr, 1, *pfull, retaddr);
116
flags &= ~TLB_NOTDIRTY;
117
}
118
119
return flags;
120
}
63
}
121
64
122
+int probe_access_flags(CPUArchState *env, target_ulong addr,
65
-void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
123
+ MMUAccessType access_type, int mmu_idx,
66
+void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
124
+ bool nonfault, void **phost, uintptr_t retaddr)
67
MemOpIdx oi, uintptr_t retaddr)
125
+{
126
+ CPUTLBEntryFull *full;
127
+
128
+ return probe_access_full(env, addr, access_type, mmu_idx,
129
+ nonfault, phost, &full, retaddr);
130
+}
131
+
132
void *probe_access(CPUArchState *env, target_ulong addr, int size,
133
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
134
{
68
{
135
+ CPUTLBEntryFull *full;
69
full_be_stw_mmu(env, addr, val, oi, retaddr);
136
void *host;
137
int flags;
138
139
g_assert(-(addr | TARGET_PAGE_MASK) >= size);
140
141
flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
142
- false, &host, retaddr);
143
+ false, &host, &full, retaddr);
144
145
/* Per the interface, size == 0 merely faults the access. */
146
if (size == 0) {
147
@@ -XXX,XX +XXX,XX @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
148
}
149
150
if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
151
- uintptr_t index = tlb_index(env, mmu_idx, addr);
152
- CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
153
-
154
/* Handle watchpoints. */
155
if (flags & TLB_WATCHPOINT) {
156
int wp_access = (access_type == MMU_DATA_STORE
157
@@ -XXX,XX +XXX,XX @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
158
void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
159
MMUAccessType access_type, int mmu_idx)
160
{
161
+ CPUTLBEntryFull *full;
162
void *host;
163
int flags;
164
165
flags = probe_access_internal(env, addr, 0, access_type,
166
- mmu_idx, true, &host, 0);
167
+ mmu_idx, true, &host, &full, 0);
168
169
/* No combination of flags are expected by the caller. */
170
return flags ? NULL : host;
171
@@ -XXX,XX +XXX,XX @@ void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
172
tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
173
void **hostp)
174
{
175
+ CPUTLBEntryFull *full;
176
void *p;
177
178
(void)probe_access_internal(env, addr, 1, MMU_INST_FETCH,
179
- cpu_mmu_index(env, true), false, &p, 0);
180
+ cpu_mmu_index(env, true), false, &p, &full, 0);
181
if (p == NULL) {
182
return -1;
183
}
184
--
70
--
185
2.34.1
71
2.34.1
186
72
187
73
diff view generated by jsdifflib