1
The following changes since commit 470dd6bd360782f5137f7e3376af6a44658eb1d3:
1
v3: One more try to fix macos issues.
2
2
3
Merge remote-tracking branch 'remotes/stsquad/tags/pull-testing-060121-4' into staging (2021-01-06 22:18:36 +0000)
3
4
r~
5
6
7
8
The following changes since commit e0209297cddd5e10a07e15fac5cca7aa1a8e0e59:
9
10
Merge tag 'pull-ufs-20250217' of https://gitlab.com/jeuk20.kim/qemu into staging (2025-02-18 10:58:48 +0800)
4
11
5
are available in the Git repository at:
12
are available in the Git repository at:
6
13
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20210107
14
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20250215-3
8
15
9
for you to fetch changes up to e5e2e4c73926f6f3c1f5da24a350e4345d5ad232:
16
for you to fetch changes up to e726f65867087d86436de05e9f372a86ec1381a6:
10
17
11
tcg: Constify TCGLabelQemuLdst.raddr (2021-01-07 05:09:42 -1000)
18
tcg: Remove TCG_TARGET_HAS_{br,set}cond2 from riscv and loongarch64 (2025-02-18 08:29:03 -0800)
12
19
13
----------------------------------------------------------------
20
----------------------------------------------------------------
14
Build fix for ppc64 centos7.
21
tcg: Remove last traces of TCG_TARGET_NEED_POOL_LABELS
15
Reduce the use of scratch registers for tcg/i386.
22
tcg: Cleanups after disallowing 64-on-32
16
Use _aligned_malloc for Win32.
23
tcg: Introduce constraint for zero register
17
Enable split w^x code gen buffers.
24
tcg: Remove TCG_TARGET_HAS_{br,set}cond2 from riscv and loongarch64
25
tcg/i386: Use tcg_{high,unsigned}_cond in tcg_out_brcond2
26
linux-user: Move TARGET_SA_RESTORER out of generic/signal.h
27
linux-user: Fix alignment when unmapping excess reservation
28
target/sparc: Fix register selection for all F*TOx and FxTO* instructions
29
target/sparc: Fix gdbstub incorrectly handling registers f32-f62
30
target/sparc: fake UltraSPARC T1 PCR and PIC registers
18
31
19
----------------------------------------------------------------
32
----------------------------------------------------------------
20
Philippe Mathieu-Daudé (1):
33
Andreas Schwab (1):
21
util/oslib: Assert qemu_try_memalign() alignment is a power of 2
34
linux-user: Move TARGET_SA_RESTORER out of generic/signal.h
22
35
23
Richard Henderson (46):
36
Artyom Tarasenko (1):
24
linux-user: Conditionalize TUNSETVNETLE
37
target/sparc: fake UltraSPARC T1 PCR and PIC registers
25
tcg/i386: Adjust TCG_TARGET_HAS_MEMORY_BSWAP
26
tcg: Introduce INDEX_op_qemu_st8_i32
27
util/oslib-win32: Use _aligned_malloc for qemu_try_memalign
28
tcg: Do not flush icache for interpreter
29
util: Enhance flush_icache_range with separate data pointer
30
util: Specialize flush_idcache_range for aarch64
31
tcg: Move tcg prologue pointer out of TCGContext
32
tcg: Move tcg epilogue pointer out of TCGContext
33
tcg: Add in_code_gen_buffer
34
tcg: Introduce tcg_splitwx_to_{rx,rw}
35
tcg: Adjust TCGLabel for const
36
tcg: Adjust tcg_out_call for const
37
tcg: Adjust tcg_out_label for const
38
tcg: Adjust tcg_register_jit for const
39
tcg: Adjust tb_target_set_jmp_target for split-wx
40
tcg: Make DisasContextBase.tb const
41
tcg: Make tb arg to synchronize_from_tb const
42
tcg: Use Error with alloc_code_gen_buffer
43
tcg: Add --accel tcg,split-wx property
44
accel/tcg: Support split-wx for linux with memfd
45
accel/tcg: Support split-wx for darwin/iOS with vm_remap
46
tcg: Return the TB pointer from the rx region from exit_tb
47
tcg/i386: Support split-wx code generation
48
tcg/aarch64: Use B not BL for tcg_out_goto_long
49
tcg/aarch64: Support split-wx code generation
50
disas: Push const down through host disassembly
51
tcg/tci: Push const down through bytecode reading
52
tcg: Introduce tcg_tbrel_diff
53
tcg/ppc: Use tcg_tbrel_diff
54
tcg/ppc: Use tcg_out_mem_long to reset TCG_REG_TB
55
tcg/ppc: Support split-wx code generation
56
tcg/sparc: Use tcg_tbrel_diff
57
tcg/sparc: Support split-wx code generation
58
tcg/s390: Use tcg_tbrel_diff
59
tcg/s390: Support split-wx code generation
60
tcg/riscv: Fix branch range checks
61
tcg/riscv: Remove branch-over-branch fallback
62
tcg/riscv: Support split-wx code generation
63
accel/tcg: Add mips support to alloc_code_gen_buffer_splitwx_memfd
64
tcg/mips: Do not assert on relocation overflow
65
tcg/mips: Support split-wx code generation
66
tcg/arm: Support split-wx code generation
67
tcg: Remove TCG_TARGET_SUPPORT_MIRROR
68
tcg: Constify tcg_code_gen_epilogue
69
tcg: Constify TCGLabelQemuLdst.raddr
70
38
71
accel/tcg/tcg-runtime.h | 2 +-
39
Fabiano Rosas (1):
72
include/disas/dis-asm.h | 4 +-
40
elfload: Fix alignment when unmapping excess reservation
73
include/disas/disas.h | 2 +-
74
include/exec/exec-all.h | 2 +-
75
include/exec/gen-icount.h | 4 +-
76
include/exec/log.h | 2 +-
77
include/exec/translator.h | 2 +-
78
include/hw/core/cpu.h | 3 +-
79
include/qemu/cacheflush.h | 15 ++-
80
include/sysemu/tcg.h | 3 +-
81
include/tcg/tcg-op.h | 2 +-
82
include/tcg/tcg-opc.h | 5 +
83
include/tcg/tcg.h | 61 +++++++--
84
linux-user/ioctls.h | 2 +
85
tcg/aarch64/tcg-target.h | 3 +-
86
tcg/arm/tcg-target.h | 3 +-
87
tcg/i386/tcg-target.h | 12 +-
88
tcg/mips/tcg-target.h | 3 +-
89
tcg/ppc/tcg-target.h | 3 +-
90
tcg/riscv/tcg-target.h | 3 +-
91
tcg/s390/tcg-target.h | 9 +-
92
tcg/sparc/tcg-target.h | 3 +-
93
tcg/tci/tcg-target.h | 7 +-
94
accel/tcg/cpu-exec.c | 41 +++---
95
accel/tcg/tcg-all.c | 26 +++-
96
accel/tcg/tcg-runtime.c | 4 +-
97
accel/tcg/translate-all.c | 311 ++++++++++++++++++++++++++++++++++---------
98
accel/tcg/translator.c | 4 +-
99
bsd-user/main.c | 2 +-
100
disas.c | 2 +-
101
disas/capstone.c | 2 +-
102
linux-user/main.c | 2 +-
103
softmmu/physmem.c | 2 +-
104
target/arm/cpu.c | 3 +-
105
target/arm/translate-a64.c | 2 +-
106
target/avr/cpu.c | 3 +-
107
target/hppa/cpu.c | 3 +-
108
target/i386/tcg/tcg-cpu.c | 3 +-
109
target/microblaze/cpu.c | 3 +-
110
target/mips/cpu.c | 3 +-
111
target/riscv/cpu.c | 3 +-
112
target/rx/cpu.c | 3 +-
113
target/sh4/cpu.c | 3 +-
114
target/sparc/cpu.c | 3 +-
115
target/tricore/cpu.c | 2 +-
116
tcg/optimize.c | 1 +
117
tcg/tcg-op.c | 21 ++-
118
tcg/tcg.c | 94 ++++++++++---
119
tcg/tci.c | 62 +++++----
120
util/cacheflush.c | 107 ++++++++++++---
121
util/cacheinfo.c | 8 +-
122
util/oslib-posix.c | 2 +
123
util/oslib-win32.c | 12 +-
124
tcg/aarch64/tcg-target.c.inc | 75 ++++++-----
125
tcg/arm/tcg-target.c.inc | 41 +++---
126
tcg/i386/tcg-target.c.inc | 174 +++++++++++-------------
127
tcg/mips/tcg-target.c.inc | 97 ++++++--------
128
tcg/ppc/tcg-target.c.inc | 88 ++++++------
129
tcg/riscv/tcg-target.c.inc | 125 ++++++-----------
130
tcg/s390/tcg-target.c.inc | 91 ++++++-------
131
tcg/sparc/tcg-target.c.inc | 58 ++++----
132
tcg/tcg-ldst.c.inc | 2 +-
133
tcg/tcg-pool.c.inc | 6 +-
134
tcg/tci/tcg-target.c.inc | 2 +-
135
accel/tcg/trace-events | 2 +-
136
qemu-options.hx | 7 +
137
tcg/README | 5 +
138
67 files changed, 1035 insertions(+), 630 deletions(-)
139
41
42
Mikael Szreder (2):
43
target/sparc: Fix register selection for all F*TOx and FxTO* instructions
44
target/sparc: Fix gdbstub incorrectly handling registers f32-f62
45
46
Richard Henderson (23):
47
tcg: Remove last traces of TCG_TARGET_NEED_POOL_LABELS
48
tcg: Remove TCG_OVERSIZED_GUEST
49
tcg: Drop support for two address registers in gen_ldst
50
tcg: Merge INDEX_op_qemu_*_{a32,a64}_*
51
tcg/arm: Drop addrhi from prepare_host_addr
52
tcg/i386: Drop addrhi from prepare_host_addr
53
tcg/mips: Drop addrhi from prepare_host_addr
54
tcg/ppc: Drop addrhi from prepare_host_addr
55
tcg: Replace addr{lo,hi}_reg with addr_reg in TCGLabelQemuLdst
56
plugins: Fix qemu_plugin_read_memory_vaddr parameters
57
accel/tcg: Fix tlb_set_page_with_attrs, tlb_set_page
58
target/loongarch: Use VADDR_PRIx for logging pc_next
59
target/mips: Use VADDR_PRIx for logging pc_next
60
include/exec: Change vaddr to uintptr_t
61
include/exec: Use uintptr_t in CPUTLBEntry
62
tcg: Introduce the 'z' constraint for a hardware zero register
63
tcg/aarch64: Use 'z' constraint
64
tcg/loongarch64: Use 'z' constraint
65
tcg/mips: Use 'z' constraint
66
tcg/riscv: Use 'z' constraint
67
tcg/sparc64: Use 'z' constraint
68
tcg/i386: Use tcg_{high,unsigned}_cond in tcg_out_brcond2
69
tcg: Remove TCG_TARGET_HAS_{br,set}cond2 from riscv and loongarch64
70
71
include/exec/tlb-common.h | 10 +-
72
include/exec/vaddr.h | 16 +-
73
include/qemu/atomic.h | 18 +-
74
include/tcg/oversized-guest.h | 23 ---
75
include/tcg/tcg-opc.h | 28 +--
76
include/tcg/tcg.h | 3 +-
77
linux-user/aarch64/target_signal.h | 2 +
78
linux-user/arm/target_signal.h | 2 +
79
linux-user/generic/signal.h | 1 -
80
linux-user/i386/target_signal.h | 2 +
81
linux-user/m68k/target_signal.h | 1 +
82
linux-user/microblaze/target_signal.h | 2 +
83
linux-user/ppc/target_signal.h | 2 +
84
linux-user/s390x/target_signal.h | 2 +
85
linux-user/sh4/target_signal.h | 2 +
86
linux-user/x86_64/target_signal.h | 2 +
87
linux-user/xtensa/target_signal.h | 2 +
88
tcg/aarch64/tcg-target-con-set.h | 12 +-
89
tcg/aarch64/tcg-target.h | 2 +
90
tcg/loongarch64/tcg-target-con-set.h | 15 +-
91
tcg/loongarch64/tcg-target-con-str.h | 1 -
92
tcg/loongarch64/tcg-target-has.h | 2 -
93
tcg/loongarch64/tcg-target.h | 2 +
94
tcg/mips/tcg-target-con-set.h | 26 +--
95
tcg/mips/tcg-target-con-str.h | 1 -
96
tcg/mips/tcg-target.h | 2 +
97
tcg/riscv/tcg-target-con-set.h | 10 +-
98
tcg/riscv/tcg-target-con-str.h | 1 -
99
tcg/riscv/tcg-target-has.h | 2 -
100
tcg/riscv/tcg-target.h | 2 +
101
tcg/sparc64/tcg-target-con-set.h | 12 +-
102
tcg/sparc64/tcg-target-con-str.h | 1 -
103
tcg/sparc64/tcg-target.h | 3 +-
104
tcg/tci/tcg-target.h | 1 -
105
accel/tcg/cputlb.c | 32 +---
106
accel/tcg/tcg-all.c | 9 +-
107
linux-user/elfload.c | 4 +-
108
plugins/api.c | 2 +-
109
target/arm/ptw.c | 34 ----
110
target/loongarch/tcg/translate.c | 2 +-
111
target/mips/tcg/octeon_translate.c | 4 +-
112
target/riscv/cpu_helper.c | 13 +-
113
target/sparc/gdbstub.c | 18 +-
114
target/sparc/translate.c | 19 +++
115
tcg/optimize.c | 21 +--
116
tcg/tcg-op-ldst.c | 103 +++--------
117
tcg/tcg.c | 97 +++++------
118
tcg/tci.c | 119 +++----------
119
docs/devel/multi-thread-tcg.rst | 1 -
120
docs/devel/tcg-ops.rst | 4 +-
121
target/loongarch/tcg/insn_trans/trans_atomic.c.inc | 2 +-
122
target/sparc/insns.decode | 19 ++-
123
tcg/aarch64/tcg-target.c.inc | 86 ++++------
124
tcg/arm/tcg-target.c.inc | 114 ++++---------
125
tcg/i386/tcg-target.c.inc | 190 +++++----------------
126
tcg/loongarch64/tcg-target.c.inc | 72 +++-----
127
tcg/mips/tcg-target.c.inc | 169 ++++++------------
128
tcg/ppc/tcg-target.c.inc | 164 +++++-------------
129
tcg/riscv/tcg-target.c.inc | 56 +++---
130
tcg/s390x/tcg-target.c.inc | 40 ++---
131
tcg/sparc64/tcg-target.c.inc | 45 ++---
132
tcg/tci/tcg-target.c.inc | 60 ++-----
133
62 files changed, 550 insertions(+), 1162 deletions(-)
134
delete mode 100644 include/tcg/oversized-guest.h
diff view generated by jsdifflib
Deleted patch
1
This fixes the build for older ppc64 kernel headers.
2
1
3
Fixes: 6addf06a3c4
4
Reviewed-by: Laurent Vivier <laurent@vivier.eu>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
linux-user/ioctls.h | 2 ++
9
1 file changed, 2 insertions(+)
10
11
diff --git a/linux-user/ioctls.h b/linux-user/ioctls.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/linux-user/ioctls.h
14
+++ b/linux-user/ioctls.h
15
@@ -XXX,XX +XXX,XX @@
16
IOCTL(TUNSETQUEUE, IOC_W, MK_PTR(MK_STRUCT(STRUCT_short_ifreq)))
17
IOCTL(TUNSETIFINDEX , IOC_W, MK_PTR(TYPE_INT))
18
/* TUNGETFILTER is not supported: see TUNATTACHFILTER. */
19
+#ifdef TUNSETVNETLE
20
IOCTL(TUNSETVNETLE, IOC_W, MK_PTR(TYPE_INT))
21
IOCTL(TUNGETVNETLE, IOC_R, MK_PTR(TYPE_INT))
22
+#endif
23
#ifdef TUNSETVNETBE
24
IOCTL(TUNSETVNETBE, IOC_W, MK_PTR(TYPE_INT))
25
IOCTL(TUNGETVNETBE, IOC_R, MK_PTR(TYPE_INT))
26
--
27
2.25.1
28
29
diff view generated by jsdifflib
Deleted patch
1
Always true when movbe is available, otherwise leave
2
this to generic code.
3
1
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/i386/tcg-target.h | 3 +-
8
tcg/i386/tcg-target.c.inc | 119 ++++++++++++++------------------------
9
2 files changed, 47 insertions(+), 75 deletions(-)
10
11
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/i386/tcg-target.h
14
+++ b/tcg/i386/tcg-target.h
15
@@ -XXX,XX +XXX,XX @@ extern bool have_bmi1;
16
extern bool have_popcnt;
17
extern bool have_avx1;
18
extern bool have_avx2;
19
+extern bool have_movbe;
20
21
/* optional instructions */
22
#define TCG_TARGET_HAS_div2_i32 1
23
@@ -XXX,XX +XXX,XX @@ static inline void tb_target_set_jmp_target(uintptr_t tc_ptr,
24
25
#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
26
27
-#define TCG_TARGET_HAS_MEMORY_BSWAP 1
28
+#define TCG_TARGET_HAS_MEMORY_BSWAP have_movbe
29
30
#ifdef CONFIG_SOFTMMU
31
#define TCG_TARGET_NEED_LDST_LABELS
32
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
33
index XXXXXXX..XXXXXXX 100644
34
--- a/tcg/i386/tcg-target.c.inc
35
+++ b/tcg/i386/tcg-target.c.inc
36
@@ -XXX,XX +XXX,XX @@ bool have_bmi1;
37
bool have_popcnt;
38
bool have_avx1;
39
bool have_avx2;
40
+bool have_movbe;
41
42
#ifdef CONFIG_CPUID_H
43
-static bool have_movbe;
44
static bool have_bmi2;
45
static bool have_lzcnt;
46
#else
47
-# define have_movbe 0
48
# define have_bmi2 0
49
# define have_lzcnt 0
50
#endif
51
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
52
TCGReg base, int index, intptr_t ofs,
53
int seg, bool is64, MemOp memop)
54
{
55
- const MemOp real_bswap = memop & MO_BSWAP;
56
- MemOp bswap = real_bswap;
57
+ bool use_movbe = false;
58
int rexw = is64 * P_REXW;
59
int movop = OPC_MOVL_GvEv;
60
61
- if (have_movbe && real_bswap) {
62
- bswap = 0;
63
+ /* Do big-endian loads with movbe. */
64
+ if (memop & MO_BSWAP) {
65
+ tcg_debug_assert(have_movbe);
66
+ use_movbe = true;
67
movop = OPC_MOVBE_GyMy;
68
}
69
70
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
71
base, index, 0, ofs);
72
break;
73
case MO_UW:
74
- tcg_out_modrm_sib_offset(s, OPC_MOVZWL + seg, datalo,
75
- base, index, 0, ofs);
76
- if (real_bswap) {
77
- tcg_out_rolw_8(s, datalo);
78
- }
79
- break;
80
- case MO_SW:
81
- if (real_bswap) {
82
- if (have_movbe) {
83
+ if (use_movbe) {
84
+ /* There is no extending movbe; only low 16-bits are modified. */
85
+ if (datalo != base && datalo != index) {
86
+ /* XOR breaks dependency chains. */
87
+ tgen_arithr(s, ARITH_XOR, datalo, datalo);
88
tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + seg,
89
datalo, base, index, 0, ofs);
90
} else {
91
- tcg_out_modrm_sib_offset(s, OPC_MOVZWL + seg, datalo,
92
- base, index, 0, ofs);
93
- tcg_out_rolw_8(s, datalo);
94
+ tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + seg,
95
+ datalo, base, index, 0, ofs);
96
+ tcg_out_ext16u(s, datalo, datalo);
97
}
98
- tcg_out_modrm(s, OPC_MOVSWL + rexw, datalo, datalo);
99
+ } else {
100
+ tcg_out_modrm_sib_offset(s, OPC_MOVZWL + seg, datalo,
101
+ base, index, 0, ofs);
102
+ }
103
+ break;
104
+ case MO_SW:
105
+ if (use_movbe) {
106
+ tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + P_DATA16 + seg,
107
+ datalo, base, index, 0, ofs);
108
+ tcg_out_ext16s(s, datalo, datalo, rexw);
109
} else {
110
tcg_out_modrm_sib_offset(s, OPC_MOVSWL + rexw + seg,
111
datalo, base, index, 0, ofs);
112
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
113
break;
114
case MO_UL:
115
tcg_out_modrm_sib_offset(s, movop + seg, datalo, base, index, 0, ofs);
116
- if (bswap) {
117
- tcg_out_bswap32(s, datalo);
118
- }
119
break;
120
#if TCG_TARGET_REG_BITS == 64
121
case MO_SL:
122
- if (real_bswap) {
123
- tcg_out_modrm_sib_offset(s, movop + seg, datalo,
124
+ if (use_movbe) {
125
+ tcg_out_modrm_sib_offset(s, OPC_MOVBE_GyMy + seg, datalo,
126
base, index, 0, ofs);
127
- if (bswap) {
128
- tcg_out_bswap32(s, datalo);
129
- }
130
tcg_out_ext32s(s, datalo, datalo);
131
} else {
132
tcg_out_modrm_sib_offset(s, OPC_MOVSLQ + seg, datalo,
133
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
134
if (TCG_TARGET_REG_BITS == 64) {
135
tcg_out_modrm_sib_offset(s, movop + P_REXW + seg, datalo,
136
base, index, 0, ofs);
137
- if (bswap) {
138
- tcg_out_bswap64(s, datalo);
139
- }
140
} else {
141
- if (real_bswap) {
142
- int t = datalo;
143
+ if (use_movbe) {
144
+ TCGReg t = datalo;
145
datalo = datahi;
146
datahi = t;
147
}
148
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
149
tcg_out_modrm_sib_offset(s, movop + seg, datalo,
150
base, index, 0, ofs);
151
}
152
- if (bswap) {
153
- tcg_out_bswap32(s, datalo);
154
- tcg_out_bswap32(s, datahi);
155
- }
156
}
157
break;
158
default:
159
- tcg_abort();
160
+ g_assert_not_reached();
161
}
162
}
163
164
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
165
TCGReg base, int index, intptr_t ofs,
166
int seg, MemOp memop)
167
{
168
- /* ??? Ideally we wouldn't need a scratch register. For user-only,
169
- we could perform the bswap twice to restore the original value
170
- instead of moving to the scratch. But as it is, the L constraint
171
- means that TCG_REG_L0 is definitely free here. */
172
const TCGReg scratch = TCG_REG_L0;
173
- const MemOp real_bswap = memop & MO_BSWAP;
174
- MemOp bswap = real_bswap;
175
+ bool use_movbe = false;
176
int movop = OPC_MOVL_EvGv;
177
178
- if (have_movbe && real_bswap) {
179
- bswap = 0;
180
+ /*
181
+ * Do big-endian stores with movbe or softmmu.
182
+ * User-only without movbe will have its swapping done generically.
183
+ */
184
+ if (memop & MO_BSWAP) {
185
+ tcg_debug_assert(have_movbe);
186
+ use_movbe = true;
187
movop = OPC_MOVBE_MyGy;
188
}
189
190
switch (memop & MO_SIZE) {
191
case MO_8:
192
- /* In 32-bit mode, 8-bit stores can only happen from [abcd]x.
193
- Use the scratch register if necessary. */
194
+ /*
195
+ * In 32-bit mode, 8-bit stores can only happen from [abcd]x.
196
+ * TODO: Adjust constraints such that this is is forced,
197
+ * then we won't need a scratch at all for user-only.
198
+ */
199
if (TCG_TARGET_REG_BITS == 32 && datalo >= 4) {
200
tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
201
datalo = scratch;
202
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
203
datalo, base, index, 0, ofs);
204
break;
205
case MO_16:
206
- if (bswap) {
207
- tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
208
- tcg_out_rolw_8(s, scratch);
209
- datalo = scratch;
210
- }
211
tcg_out_modrm_sib_offset(s, movop + P_DATA16 + seg, datalo,
212
base, index, 0, ofs);
213
break;
214
case MO_32:
215
- if (bswap) {
216
- tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
217
- tcg_out_bswap32(s, scratch);
218
- datalo = scratch;
219
- }
220
tcg_out_modrm_sib_offset(s, movop + seg, datalo, base, index, 0, ofs);
221
break;
222
case MO_64:
223
if (TCG_TARGET_REG_BITS == 64) {
224
- if (bswap) {
225
- tcg_out_mov(s, TCG_TYPE_I64, scratch, datalo);
226
- tcg_out_bswap64(s, scratch);
227
- datalo = scratch;
228
- }
229
tcg_out_modrm_sib_offset(s, movop + P_REXW + seg, datalo,
230
base, index, 0, ofs);
231
- } else if (bswap) {
232
- tcg_out_mov(s, TCG_TYPE_I32, scratch, datahi);
233
- tcg_out_bswap32(s, scratch);
234
- tcg_out_modrm_sib_offset(s, OPC_MOVL_EvGv + seg, scratch,
235
- base, index, 0, ofs);
236
- tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
237
- tcg_out_bswap32(s, scratch);
238
- tcg_out_modrm_sib_offset(s, OPC_MOVL_EvGv + seg, scratch,
239
- base, index, 0, ofs + 4);
240
} else {
241
- if (real_bswap) {
242
- int t = datalo;
243
+ if (use_movbe) {
244
+ TCGReg t = datalo;
245
datalo = datahi;
246
datahi = t;
247
}
248
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
249
}
250
break;
251
default:
252
- tcg_abort();
253
+ g_assert_not_reached();
254
}
255
}
256
257
--
258
2.25.1
259
260
diff view generated by jsdifflib
Deleted patch
1
Enable this on i386 to restrict the set of input registers
2
for an 8-bit store, as required by the architecture. This
3
removes the last use of scratch registers for user-only mode.
4
1
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
include/tcg/tcg-opc.h | 5 +++++
9
tcg/aarch64/tcg-target.h | 1 +
10
tcg/arm/tcg-target.h | 1 +
11
tcg/i386/tcg-target.h | 3 +++
12
tcg/mips/tcg-target.h | 1 +
13
tcg/ppc/tcg-target.h | 1 +
14
tcg/riscv/tcg-target.h | 1 +
15
tcg/s390/tcg-target.h | 1 +
16
tcg/sparc/tcg-target.h | 1 +
17
tcg/tci/tcg-target.h | 1 +
18
tcg/optimize.c | 1 +
19
tcg/tcg-op.c | 6 +++++-
20
tcg/tcg.c | 4 ++++
21
tcg/i386/tcg-target.c.inc | 29 ++++++++++++++++++-----------
22
tcg/README | 5 +++++
23
15 files changed, 49 insertions(+), 12 deletions(-)
24
25
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
26
index XXXXXXX..XXXXXXX 100644
27
--- a/include/tcg/tcg-opc.h
28
+++ b/include/tcg/tcg-opc.h
29
@@ -XXX,XX +XXX,XX @@ DEF(qemu_ld_i64, DATA64_ARGS, TLADDR_ARGS, 1,
30
DEF(qemu_st_i64, 0, TLADDR_ARGS + DATA64_ARGS, 1,
31
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT)
32
33
+/* Only used by i386 to cope with stupid register constraints. */
34
+DEF(qemu_st8_i32, 0, TLADDR_ARGS + 1, 1,
35
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS |
36
+ IMPL(TCG_TARGET_HAS_qemu_st8_i32))
37
+
38
/* Host vector support. */
39
40
#define IMPLVEC TCG_OPF_VECTOR | IMPL(TCG_TARGET_MAYBE_vec)
41
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
42
index XXXXXXX..XXXXXXX 100644
43
--- a/tcg/aarch64/tcg-target.h
44
+++ b/tcg/aarch64/tcg-target.h
45
@@ -XXX,XX +XXX,XX @@ typedef enum {
46
#define TCG_TARGET_HAS_extrl_i64_i32 0
47
#define TCG_TARGET_HAS_extrh_i64_i32 0
48
#define TCG_TARGET_HAS_goto_ptr 1
49
+#define TCG_TARGET_HAS_qemu_st8_i32 0
50
51
#define TCG_TARGET_HAS_div_i64 1
52
#define TCG_TARGET_HAS_rem_i64 1
53
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
54
index XXXXXXX..XXXXXXX 100644
55
--- a/tcg/arm/tcg-target.h
56
+++ b/tcg/arm/tcg-target.h
57
@@ -XXX,XX +XXX,XX @@ extern bool use_idiv_instructions;
58
#define TCG_TARGET_HAS_rem_i32 0
59
#define TCG_TARGET_HAS_goto_ptr 1
60
#define TCG_TARGET_HAS_direct_jump 0
61
+#define TCG_TARGET_HAS_qemu_st8_i32 0
62
63
enum {
64
TCG_AREG0 = TCG_REG_R6,
65
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
66
index XXXXXXX..XXXXXXX 100644
67
--- a/tcg/i386/tcg-target.h
68
+++ b/tcg/i386/tcg-target.h
69
@@ -XXX,XX +XXX,XX @@ extern bool have_movbe;
70
#define TCG_TARGET_HAS_muls2_i64 1
71
#define TCG_TARGET_HAS_muluh_i64 0
72
#define TCG_TARGET_HAS_mulsh_i64 0
73
+#define TCG_TARGET_HAS_qemu_st8_i32 0
74
+#else
75
+#define TCG_TARGET_HAS_qemu_st8_i32 1
76
#endif
77
78
/* We do not support older SSE systems, only beginning with AVX1. */
79
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
80
index XXXXXXX..XXXXXXX 100644
81
--- a/tcg/mips/tcg-target.h
82
+++ b/tcg/mips/tcg-target.h
83
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
84
#define TCG_TARGET_HAS_clz_i32 use_mips32r2_instructions
85
#define TCG_TARGET_HAS_ctz_i32 0
86
#define TCG_TARGET_HAS_ctpop_i32 0
87
+#define TCG_TARGET_HAS_qemu_st8_i32 0
88
89
#if TCG_TARGET_REG_BITS == 64
90
#define TCG_TARGET_HAS_movcond_i64 use_movnz_instructions
91
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
92
index XXXXXXX..XXXXXXX 100644
93
--- a/tcg/ppc/tcg-target.h
94
+++ b/tcg/ppc/tcg-target.h
95
@@ -XXX,XX +XXX,XX @@ extern bool have_vsx;
96
#define TCG_TARGET_HAS_mulsh_i32 1
97
#define TCG_TARGET_HAS_goto_ptr 1
98
#define TCG_TARGET_HAS_direct_jump 1
99
+#define TCG_TARGET_HAS_qemu_st8_i32 0
100
101
#if TCG_TARGET_REG_BITS == 64
102
#define TCG_TARGET_HAS_add2_i32 0
103
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
104
index XXXXXXX..XXXXXXX 100644
105
--- a/tcg/riscv/tcg-target.h
106
+++ b/tcg/riscv/tcg-target.h
107
@@ -XXX,XX +XXX,XX @@ typedef enum {
108
#define TCG_TARGET_HAS_direct_jump 0
109
#define TCG_TARGET_HAS_brcond2 1
110
#define TCG_TARGET_HAS_setcond2 1
111
+#define TCG_TARGET_HAS_qemu_st8_i32 0
112
113
#if TCG_TARGET_REG_BITS == 64
114
#define TCG_TARGET_HAS_movcond_i64 0
115
diff --git a/tcg/s390/tcg-target.h b/tcg/s390/tcg-target.h
116
index XXXXXXX..XXXXXXX 100644
117
--- a/tcg/s390/tcg-target.h
118
+++ b/tcg/s390/tcg-target.h
119
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities;
120
#define TCG_TARGET_HAS_extrh_i64_i32 0
121
#define TCG_TARGET_HAS_goto_ptr 1
122
#define TCG_TARGET_HAS_direct_jump (s390_facilities & FACILITY_GEN_INST_EXT)
123
+#define TCG_TARGET_HAS_qemu_st8_i32 0
124
125
#define TCG_TARGET_HAS_div2_i64 1
126
#define TCG_TARGET_HAS_rot_i64 1
127
diff --git a/tcg/sparc/tcg-target.h b/tcg/sparc/tcg-target.h
128
index XXXXXXX..XXXXXXX 100644
129
--- a/tcg/sparc/tcg-target.h
130
+++ b/tcg/sparc/tcg-target.h
131
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
132
#define TCG_TARGET_HAS_mulsh_i32 0
133
#define TCG_TARGET_HAS_goto_ptr 1
134
#define TCG_TARGET_HAS_direct_jump 1
135
+#define TCG_TARGET_HAS_qemu_st8_i32 0
136
137
#define TCG_TARGET_HAS_extrl_i64_i32 1
138
#define TCG_TARGET_HAS_extrh_i64_i32 1
139
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
140
index XXXXXXX..XXXXXXX 100644
141
--- a/tcg/tci/tcg-target.h
142
+++ b/tcg/tci/tcg-target.h
143
@@ -XXX,XX +XXX,XX @@
144
#define TCG_TARGET_HAS_mulsh_i32 0
145
#define TCG_TARGET_HAS_goto_ptr 0
146
#define TCG_TARGET_HAS_direct_jump 1
147
+#define TCG_TARGET_HAS_qemu_st8_i32 0
148
149
#if TCG_TARGET_REG_BITS == 64
150
#define TCG_TARGET_HAS_extrl_i64_i32 0
151
diff --git a/tcg/optimize.c b/tcg/optimize.c
152
index XXXXXXX..XXXXXXX 100644
153
--- a/tcg/optimize.c
154
+++ b/tcg/optimize.c
155
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
156
case INDEX_op_qemu_ld_i32:
157
case INDEX_op_qemu_ld_i64:
158
case INDEX_op_qemu_st_i32:
159
+ case INDEX_op_qemu_st8_i32:
160
case INDEX_op_qemu_st_i64:
161
case INDEX_op_call:
162
/* Opcodes that touch guest memory stop the optimization. */
163
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
164
index XXXXXXX..XXXXXXX 100644
165
--- a/tcg/tcg-op.c
166
+++ b/tcg/tcg-op.c
167
@@ -XXX,XX +XXX,XX @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
168
}
169
170
addr = plugin_prep_mem_callbacks(addr);
171
- gen_ldst_i32(INDEX_op_qemu_st_i32, val, addr, memop, idx);
172
+ if (TCG_TARGET_HAS_qemu_st8_i32 && (memop & MO_SIZE) == MO_8) {
173
+ gen_ldst_i32(INDEX_op_qemu_st8_i32, val, addr, memop, idx);
174
+ } else {
175
+ gen_ldst_i32(INDEX_op_qemu_st_i32, val, addr, memop, idx);
176
+ }
177
plugin_gen_mem_callbacks(addr, info);
178
179
if (swap) {
180
diff --git a/tcg/tcg.c b/tcg/tcg.c
181
index XXXXXXX..XXXXXXX 100644
182
--- a/tcg/tcg.c
183
+++ b/tcg/tcg.c
184
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op)
185
case INDEX_op_qemu_st_i64:
186
return true;
187
188
+ case INDEX_op_qemu_st8_i32:
189
+ return TCG_TARGET_HAS_qemu_st8_i32;
190
+
191
case INDEX_op_goto_ptr:
192
return TCG_TARGET_HAS_goto_ptr;
193
194
@@ -XXX,XX +XXX,XX @@ static void tcg_dump_ops(TCGContext *s, bool have_prefs)
195
break;
196
case INDEX_op_qemu_ld_i32:
197
case INDEX_op_qemu_st_i32:
198
+ case INDEX_op_qemu_st8_i32:
199
case INDEX_op_qemu_ld_i64:
200
case INDEX_op_qemu_st_i64:
201
{
202
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
203
index XXXXXXX..XXXXXXX 100644
204
--- a/tcg/i386/tcg-target.c.inc
205
+++ b/tcg/i386/tcg-target.c.inc
206
@@ -XXX,XX +XXX,XX @@ static const char *target_parse_constraint(TCGArgConstraint *ct,
207
ct->regs |= ALL_VECTOR_REGS;
208
break;
209
210
- /* qemu_ld/st address constraint */
211
case 'L':
212
+ /* qemu_ld/st data+address constraint */
213
ct->regs = TCG_TARGET_REG_BITS == 64 ? 0xffff : 0xff;
214
+#ifdef CONFIG_SOFTMMU
215
tcg_regset_reset_reg(ct->regs, TCG_REG_L0);
216
tcg_regset_reset_reg(ct->regs, TCG_REG_L1);
217
+#endif
218
+ break;
219
+ case 's':
220
+ /* qemu_st8_i32 data constraint */
221
+ ct->regs = 0xf;
222
+#ifdef CONFIG_SOFTMMU
223
+ tcg_regset_reset_reg(ct->regs, TCG_REG_L0);
224
+ tcg_regset_reset_reg(ct->regs, TCG_REG_L1);
225
+#endif
226
break;
227
228
case 'e':
229
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
230
TCGReg base, int index, intptr_t ofs,
231
int seg, MemOp memop)
232
{
233
- const TCGReg scratch = TCG_REG_L0;
234
bool use_movbe = false;
235
int movop = OPC_MOVL_EvGv;
236
237
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
238
239
switch (memop & MO_SIZE) {
240
case MO_8:
241
- /*
242
- * In 32-bit mode, 8-bit stores can only happen from [abcd]x.
243
- * TODO: Adjust constraints such that this is is forced,
244
- * then we won't need a scratch at all for user-only.
245
- */
246
- if (TCG_TARGET_REG_BITS == 32 && datalo >= 4) {
247
- tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
248
- datalo = scratch;
249
- }
250
+ /* This is handled with constraints on INDEX_op_qemu_st8_i32. */
251
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || datalo < 4);
252
tcg_out_modrm_sib_offset(s, OPC_MOVB_EvGv + P_REXB_R + seg,
253
datalo, base, index, 0, ofs);
254
break;
255
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
256
tcg_out_qemu_ld(s, args, 1);
257
break;
258
case INDEX_op_qemu_st_i32:
259
+ case INDEX_op_qemu_st8_i32:
260
tcg_out_qemu_st(s, args, 0);
261
break;
262
case INDEX_op_qemu_st_i64:
263
@@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
264
static const TCGTargetOpDef r_0_ci = { .args_ct_str = { "r", "0", "ci" } };
265
static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } };
266
static const TCGTargetOpDef L_L = { .args_ct_str = { "L", "L" } };
267
+ static const TCGTargetOpDef s_L = { .args_ct_str = { "s", "L" } };
268
static const TCGTargetOpDef r_L_L = { .args_ct_str = { "r", "L", "L" } };
269
static const TCGTargetOpDef r_r_L = { .args_ct_str = { "r", "r", "L" } };
270
static const TCGTargetOpDef L_L_L = { .args_ct_str = { "L", "L", "L" } };
271
+ static const TCGTargetOpDef s_L_L = { .args_ct_str = { "s", "L", "L" } };
272
static const TCGTargetOpDef r_r_L_L
273
= { .args_ct_str = { "r", "r", "L", "L" } };
274
static const TCGTargetOpDef L_L_L_L
275
@@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
276
return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &r_L : &r_L_L;
277
case INDEX_op_qemu_st_i32:
278
return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &L_L : &L_L_L;
279
+ case INDEX_op_qemu_st8_i32:
280
+ return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &s_L : &s_L_L;
281
case INDEX_op_qemu_ld_i64:
282
return (TCG_TARGET_REG_BITS == 64 ? &r_L
283
: TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &r_r_L
284
diff --git a/tcg/README b/tcg/README
285
index XXXXXXX..XXXXXXX 100644
286
--- a/tcg/README
287
+++ b/tcg/README
288
@@ -XXX,XX +XXX,XX @@ goto_ptr opcode, emitting this op is equivalent to emitting exit_tb(0).
289
290
* qemu_ld_i32/i64 t0, t1, flags, memidx
291
* qemu_st_i32/i64 t0, t1, flags, memidx
292
+* qemu_st8_i32 t0, t1, flags, memidx
293
294
Load data at the guest address t1 into t0, or store data in t0 at guest
295
address t1. The _i32/_i64 size applies to the size of the input/output
296
@@ -XXX,XX +XXX,XX @@ of the memory access.
297
For a 32-bit host, qemu_ld/st_i64 is guaranteed to only be used with a
298
64-bit memory access specified in flags.
299
300
+For i386, qemu_st8_i32 is exactly like qemu_st_i32, except the size of
301
+the memory operation is known to be 8-bit. This allows the backend to
302
+provide a different set of register constraints.
303
+
304
********* Host vector operations
305
306
All of the vector ops have two parameters, TCGOP_VECL & TCGOP_VECE.
307
--
308
2.25.1
309
310
diff view generated by jsdifflib
Deleted patch
1
We do not need or want to be allocating page sized quanta.
2
1
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Reviewed-by: Stefan Weil <sw@weilnetz.de>
5
Message-Id: <20201018164836.1149452-1-richard.henderson@linaro.org>
6
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
util/oslib-win32.c | 11 ++++-------
10
1 file changed, 4 insertions(+), 7 deletions(-)
11
12
diff --git a/util/oslib-win32.c b/util/oslib-win32.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/util/oslib-win32.c
15
+++ b/util/oslib-win32.c
16
@@ -XXX,XX +XXX,XX @@
17
#include "trace.h"
18
#include "qemu/sockets.h"
19
#include "qemu/cutils.h"
20
+#include <malloc.h>
21
22
/* this must come after including "trace.h" */
23
#include <shlobj.h>
24
@@ -XXX,XX +XXX,XX @@ void *qemu_try_memalign(size_t alignment, size_t size)
25
{
26
void *ptr;
27
28
- if (!size) {
29
- abort();
30
- }
31
- ptr = VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE);
32
+ g_assert(size != 0);
33
+ ptr = _aligned_malloc(alignment, size);
34
trace_qemu_memalign(alignment, size, ptr);
35
return ptr;
36
}
37
@@ -XXX,XX +XXX,XX @@ void *qemu_anon_ram_alloc(size_t size, uint64_t *align, bool shared)
38
void qemu_vfree(void *ptr)
39
{
40
trace_qemu_vfree(ptr);
41
- if (ptr) {
42
- VirtualFree(ptr, 0, MEM_RELEASE);
43
- }
44
+ _aligned_free(ptr);
45
}
46
47
void qemu_anon_ram_free(void *ptr, size_t size)
48
--
49
2.25.1
50
51
diff view generated by jsdifflib
Deleted patch
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
2
1
3
qemu_try_memalign() expects a power of 2 alignment:
4
5
- posix_memalign(3):
6
7
The address of the allocated memory will be a multiple of alignment,
8
which must be a power of two and a multiple of sizeof(void *).
9
10
- _aligned_malloc()
11
12
The alignment value, which must be an integer power of 2.
13
14
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
15
Message-Id: <20201021173803.2619054-3-philmd@redhat.com>
16
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
17
---
18
util/oslib-posix.c | 2 ++
19
util/oslib-win32.c | 1 +
20
2 files changed, 3 insertions(+)
21
22
diff --git a/util/oslib-posix.c b/util/oslib-posix.c
23
index XXXXXXX..XXXXXXX 100644
24
--- a/util/oslib-posix.c
25
+++ b/util/oslib-posix.c
26
@@ -XXX,XX +XXX,XX @@ void *qemu_try_memalign(size_t alignment, size_t size)
27
28
if (alignment < sizeof(void*)) {
29
alignment = sizeof(void*);
30
+ } else {
31
+ g_assert(is_power_of_2(alignment));
32
}
33
34
#if defined(CONFIG_POSIX_MEMALIGN)
35
diff --git a/util/oslib-win32.c b/util/oslib-win32.c
36
index XXXXXXX..XXXXXXX 100644
37
--- a/util/oslib-win32.c
38
+++ b/util/oslib-win32.c
39
@@ -XXX,XX +XXX,XX @@ void *qemu_try_memalign(size_t alignment, size_t size)
40
void *ptr;
41
42
g_assert(size != 0);
43
+ g_assert(is_power_of_2(alignment));
44
ptr = _aligned_malloc(alignment, size);
45
trace_qemu_memalign(alignment, size, ptr);
46
return ptr;
47
--
48
2.25.1
49
50
diff view generated by jsdifflib
Deleted patch
1
This is currently a no-op within tci/tcg-target.h, but
2
is about to be moved to a more generic location.
3
1
4
Reviewed-by: Joelle van Dyne <j@getutm.app>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/tcg.c | 4 ++++
8
1 file changed, 4 insertions(+)
9
10
diff --git a/tcg/tcg.c b/tcg/tcg.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/tcg.c
13
+++ b/tcg/tcg.c
14
@@ -XXX,XX +XXX,XX @@ void tcg_prologue_init(TCGContext *s)
15
#endif
16
17
buf1 = s->code_ptr;
18
+#ifndef CONFIG_TCG_INTERPRETER
19
flush_icache_range((uintptr_t)buf0, (uintptr_t)buf1);
20
+#endif
21
22
/* Deduct the prologue from the buffer. */
23
prologue_size = tcg_current_code_size(s);
24
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
25
return -2;
26
}
27
28
+#ifndef CONFIG_TCG_INTERPRETER
29
/* flush instruction cache */
30
flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr);
31
+#endif
32
33
return tcg_current_code_size(s);
34
}
35
--
36
2.25.1
37
38
diff view generated by jsdifflib
Deleted patch
1
We are shortly going to have a split rw/rx jit buffer. Depending
2
on the host, we need to flush the dcache at the rw data pointer and
3
flush the icache at the rx code pointer.
4
1
5
For now, the two passed pointers are identical, so there is no
6
effective change in behaviour.
7
8
Reviewed-by: Joelle van Dyne <j@getutm.app>
9
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
12
include/qemu/cacheflush.h | 15 ++++++++++++--
13
softmmu/physmem.c | 2 +-
14
tcg/tcg.c | 6 ++++--
15
util/cacheflush.c | 38 +++++++++++++++++++++---------------
16
util/cacheinfo.c | 8 +++++---
17
tcg/aarch64/tcg-target.c.inc | 2 +-
18
tcg/mips/tcg-target.c.inc | 2 +-
19
tcg/ppc/tcg-target.c.inc | 4 ++--
20
tcg/sparc/tcg-target.c.inc | 4 ++--
21
9 files changed, 51 insertions(+), 30 deletions(-)
22
23
diff --git a/include/qemu/cacheflush.h b/include/qemu/cacheflush.h
24
index XXXXXXX..XXXXXXX 100644
25
--- a/include/qemu/cacheflush.h
26
+++ b/include/qemu/cacheflush.h
27
@@ -XXX,XX +XXX,XX @@
28
#ifndef QEMU_CACHEFLUSH_H
29
#define QEMU_CACHEFLUSH_H
30
31
+/**
32
+ * flush_idcache_range:
33
+ * @rx: instruction address
34
+ * @rw: data address
35
+ * @len: length to flush
36
+ *
37
+ * Flush @len bytes of the data cache at @rw and the icache at @rx
38
+ * to bring them in sync. The two addresses may be different virtual
39
+ * mappings of the same physical page(s).
40
+ */
41
+
42
#if defined(__i386__) || defined(__x86_64__) || defined(__s390__)
43
44
-static inline void flush_icache_range(uintptr_t start, uintptr_t stop)
45
+static inline void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len)
46
{
47
/* icache is coherent and does not require flushing. */
48
}
49
50
#else
51
52
-void flush_icache_range(uintptr_t start, uintptr_t stop);
53
+void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len);
54
55
#endif
56
57
diff --git a/softmmu/physmem.c b/softmmu/physmem.c
58
index XXXXXXX..XXXXXXX 100644
59
--- a/softmmu/physmem.c
60
+++ b/softmmu/physmem.c
61
@@ -XXX,XX +XXX,XX @@ static inline MemTxResult address_space_write_rom_internal(AddressSpace *as,
62
invalidate_and_set_dirty(mr, addr1, l);
63
break;
64
case FLUSH_CACHE:
65
- flush_icache_range((uintptr_t)ram_ptr, (uintptr_t)ram_ptr + l);
66
+ flush_idcache_range((uintptr_t)ram_ptr, (uintptr_t)ram_ptr, l);
67
break;
68
}
69
}
70
diff --git a/tcg/tcg.c b/tcg/tcg.c
71
index XXXXXXX..XXXXXXX 100644
72
--- a/tcg/tcg.c
73
+++ b/tcg/tcg.c
74
@@ -XXX,XX +XXX,XX @@ void tcg_prologue_init(TCGContext *s)
75
76
buf1 = s->code_ptr;
77
#ifndef CONFIG_TCG_INTERPRETER
78
- flush_icache_range((uintptr_t)buf0, (uintptr_t)buf1);
79
+ flush_idcache_range((uintptr_t)buf0, (uintptr_t)buf0,
80
+ tcg_ptr_byte_diff(buf1, buf0));
81
#endif
82
83
/* Deduct the prologue from the buffer. */
84
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
85
86
#ifndef CONFIG_TCG_INTERPRETER
87
/* flush instruction cache */
88
- flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr);
89
+ flush_idcache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_buf,
90
+ tcg_ptr_byte_diff(s->code_ptr, s->code_buf));
91
#endif
92
93
return tcg_current_code_size(s);
94
diff --git a/util/cacheflush.c b/util/cacheflush.c
95
index XXXXXXX..XXXXXXX 100644
96
--- a/util/cacheflush.c
97
+++ b/util/cacheflush.c
98
@@ -XXX,XX +XXX,XX @@
99
#include <sys/cachectl.h>
100
#endif
101
102
-void flush_icache_range(uintptr_t start, uintptr_t stop)
103
+void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len)
104
{
105
- cacheflush((void *)start, stop - start, ICACHE);
106
+ if (rx != rw) {
107
+ cacheflush((void *)rw, len, DCACHE);
108
+ }
109
+ cacheflush((void *)rx, len, ICACHE);
110
}
111
112
#elif defined(__powerpc__)
113
114
-void flush_icache_range(uintptr_t start, uintptr_t stop)
115
+void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len)
116
{
117
- uintptr_t p, start1, stop1;
118
+ uintptr_t p, b, e;
119
size_t dsize = qemu_dcache_linesize;
120
size_t isize = qemu_icache_linesize;
121
122
- start1 = start & ~(dsize - 1);
123
- stop1 = (stop + dsize - 1) & ~(dsize - 1);
124
- for (p = start1; p < stop1; p += dsize) {
125
+ b = rw & ~(dsize - 1);
126
+ e = (rw + len + dsize - 1) & ~(dsize - 1);
127
+ for (p = b; p < e; p += dsize) {
128
asm volatile ("dcbst 0,%0" : : "r"(p) : "memory");
129
}
130
asm volatile ("sync" : : : "memory");
131
132
- start &= start & ~(isize - 1);
133
- stop1 = (stop + isize - 1) & ~(isize - 1);
134
- for (p = start1; p < stop1; p += isize) {
135
+ b = rx & ~(isize - 1);
136
+ e = (rx + len + isize - 1) & ~(isize - 1);
137
+ for (p = b; p < e; p += isize) {
138
asm volatile ("icbi 0,%0" : : "r"(p) : "memory");
139
}
140
asm volatile ("sync" : : : "memory");
141
@@ -XXX,XX +XXX,XX @@ void flush_icache_range(uintptr_t start, uintptr_t stop)
142
143
#elif defined(__sparc__)
144
145
-void flush_icache_range(uintptr_t start, uintptr_t stop)
146
+void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len)
147
{
148
- uintptr_t p;
149
-
150
- for (p = start & -8; p < ((stop + 7) & -8); p += 8) {
151
+ /* No additional data flush to the RW virtual address required. */
152
+ uintptr_t p, end = (rx + len + 7) & -8;
153
+ for (p = rx & -8; p < end; p += 8) {
154
__asm__ __volatile__("flush\t%0" : : "r" (p));
155
}
156
}
157
158
#else
159
160
-void flush_icache_range(uintptr_t start, uintptr_t stop)
161
+void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len)
162
{
163
- __builtin___clear_cache((char *)start, (char *)stop);
164
+ if (rw != rx) {
165
+ __builtin___clear_cache((char *)rw, (char *)rw + len);
166
+ }
167
+ __builtin___clear_cache((char *)rx, (char *)rx + len);
168
}
169
170
#endif
171
diff --git a/util/cacheinfo.c b/util/cacheinfo.c
172
index XXXXXXX..XXXXXXX 100644
173
--- a/util/cacheinfo.c
174
+++ b/util/cacheinfo.c
175
@@ -XXX,XX +XXX,XX @@ static void fallback_cache_info(int *isize, int *dsize)
176
*isize = *dsize;
177
} else {
178
#if defined(_ARCH_PPC)
179
- /* For PPC, we're going to use the icache size computed for
180
- flush_icache_range. Which means that we must use the
181
- architecture minimum. */
182
+ /*
183
+ * For PPC, we're going to use the cache sizes computed for
184
+ * flush_idcache_range. Which means that we must use the
185
+ * architecture minimum.
186
+ */
187
*isize = *dsize = 16;
188
#else
189
/* Otherwise, 64 bytes is not uncommon. */
190
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
191
index XXXXXXX..XXXXXXX 100644
192
--- a/tcg/aarch64/tcg-target.c.inc
193
+++ b/tcg/aarch64/tcg-target.c.inc
194
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr,
195
}
196
pair = (uint64_t)i2 << 32 | i1;
197
qatomic_set((uint64_t *)jmp_addr, pair);
198
- flush_icache_range(jmp_addr, jmp_addr + 8);
199
+ flush_idcache_range(jmp_addr, jmp_addr, 8);
200
}
201
202
static inline void tcg_out_goto_label(TCGContext *s, TCGLabel *l)
203
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
204
index XXXXXXX..XXXXXXX 100644
205
--- a/tcg/mips/tcg-target.c.inc
206
+++ b/tcg/mips/tcg-target.c.inc
207
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr,
208
uintptr_t addr)
209
{
210
qatomic_set((uint32_t *)jmp_addr, deposit32(OPC_J, 0, 26, addr >> 2));
211
- flush_icache_range(jmp_addr, jmp_addr + 4);
212
+ flush_idcache_range(jmp_addr, jmp_addr, 4);
213
}
214
215
typedef struct {
216
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
217
index XXXXXXX..XXXXXXX 100644
218
--- a/tcg/ppc/tcg-target.c.inc
219
+++ b/tcg/ppc/tcg-target.c.inc
220
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr,
221
/* As per the enclosing if, this is ppc64. Avoid the _Static_assert
222
within qatomic_set that would fail to build a ppc32 host. */
223
qatomic_set__nocheck((uint64_t *)jmp_addr, pair);
224
- flush_icache_range(jmp_addr, jmp_addr + 8);
225
+ flush_idcache_range(jmp_addr, jmp_addr, 8);
226
} else {
227
intptr_t diff = addr - jmp_addr;
228
tcg_debug_assert(in_range_b(diff));
229
qatomic_set((uint32_t *)jmp_addr, B | (diff & 0x3fffffc));
230
- flush_icache_range(jmp_addr, jmp_addr + 4);
231
+ flush_idcache_range(jmp_addr, jmp_addr, 4);
232
}
233
}
234
235
diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc
236
index XXXXXXX..XXXXXXX 100644
237
--- a/tcg/sparc/tcg-target.c.inc
238
+++ b/tcg/sparc/tcg-target.c.inc
239
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr,
240
if (!USE_REG_TB) {
241
qatomic_set((uint32_t *)jmp_addr,
242
         deposit32(CALL, 0, 30, br_disp >> 2));
243
- flush_icache_range(jmp_addr, jmp_addr + 4);
244
+ flush_idcache_range(jmp_addr, jmp_addr, 4);
245
return;
246
}
247
248
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr,
249
}
250
251
qatomic_set((uint64_t *)jmp_addr, deposit64(i2, 32, 32, i1));
252
- flush_icache_range(jmp_addr, jmp_addr + 8);
253
+ flush_idcache_range(jmp_addr, jmp_addr, 8);
254
}
255
--
256
2.25.1
257
258
diff view generated by jsdifflib
Deleted patch
1
For darwin, the CTR_EL0 register is not accessible, but there
2
are system routines that we can use.
3
1
4
For other hosts, copy the single pointer implementation from
5
libgcc and modify it to support the double pointer interface
6
we require. This halves the number of cache operations required
7
when split-rwx is enabled.
8
9
Reviewed-by: Joelle van Dyne <j@getutm.app>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
12
util/cacheflush.c | 69 +++++++++++++++++++++++++++++++++++++++++++++++
13
1 file changed, 69 insertions(+)
14
15
diff --git a/util/cacheflush.c b/util/cacheflush.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/util/cacheflush.c
18
+++ b/util/cacheflush.c
19
@@ -XXX,XX +XXX,XX @@
20
21
#include "qemu/osdep.h"
22
#include "qemu/cacheflush.h"
23
+#include "qemu/bitops.h"
24
25
26
#if defined(__i386__) || defined(__x86_64__) || defined(__s390__)
27
28
/* Caches are coherent and do not require flushing; symbol inline. */
29
30
+#elif defined(__aarch64__)
31
+
32
+#ifdef CONFIG_DARWIN
33
+/* Apple does not expose CTR_EL0, so we must use system interfaces. */
34
+extern void sys_icache_invalidate(void *start, size_t len);
35
+extern void sys_dcache_flush(void *start, size_t len);
36
+void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len)
37
+{
38
+ sys_dcache_flush((void *)rw, len);
39
+ sys_icache_invalidate((void *)rx, len);
40
+}
41
+#else
42
+
43
+/*
44
+ * TODO: unify this with cacheinfo.c.
45
+ * We want to save the whole contents of CTR_EL0, so that we
46
+ * have more than the linesize, but also IDC and DIC.
47
+ */
48
+static unsigned int save_ctr_el0;
49
+static void __attribute__((constructor)) init_ctr_el0(void)
50
+{
51
+ asm volatile("mrs\t%0, ctr_el0" : "=r"(save_ctr_el0));
52
+}
53
+
54
+/*
55
+ * This is a copy of gcc's __aarch64_sync_cache_range, modified
56
+ * to fit this three-operand interface.
57
+ */
58
+void flush_idcache_range(uintptr_t rx, uintptr_t rw, size_t len)
59
+{
60
+ const unsigned CTR_IDC = 1u << 28;
61
+ const unsigned CTR_DIC = 1u << 29;
62
+ const unsigned int ctr_el0 = save_ctr_el0;
63
+ const uintptr_t icache_lsize = 4 << extract32(ctr_el0, 0, 4);
64
+ const uintptr_t dcache_lsize = 4 << extract32(ctr_el0, 16, 4);
65
+ uintptr_t p;
66
+
67
+ /*
68
+ * If CTR_EL0.IDC is enabled, Data cache clean to the Point of Unification
69
+ * is not required for instruction to data coherence.
70
+ */
71
+ if (!(ctr_el0 & CTR_IDC)) {
72
+ /*
73
+ * Loop over the address range, clearing one cache line at once.
74
+ * Data cache must be flushed to unification first to make sure
75
+ * the instruction cache fetches the updated data.
76
+ */
77
+ for (p = rw & -dcache_lsize; p < rw + len; p += dcache_lsize) {
78
+ asm volatile("dc\tcvau, %0" : : "r" (p) : "memory");
79
+ }
80
+ asm volatile("dsb\tish" : : : "memory");
81
+ }
82
+
83
+ /*
84
+ * If CTR_EL0.DIC is enabled, Instruction cache cleaning to the Point
85
+ * of Unification is not required for instruction to data coherence.
86
+ */
87
+ if (!(ctr_el0 & CTR_DIC)) {
88
+ for (p = rx & -icache_lsize; p < rx + len; p += icache_lsize) {
89
+ asm volatile("ic\tivau, %0" : : "r"(p) : "memory");
90
+ }
91
+ asm volatile ("dsb\tish" : : : "memory");
92
+ }
93
+
94
+ asm volatile("isb" : : : "memory");
95
+}
96
+#endif /* CONFIG_DARWIN */
97
+
98
#elif defined(__mips__)
99
100
#ifdef __OpenBSD__
101
--
102
2.25.1
103
104
diff view generated by jsdifflib
Deleted patch
1
This value is constant across all thread-local copies of TCGContext,
2
so we might as well move it out of thread-local storage.
3
1
4
Use the correct function pointer type, and name the variable
5
tcg_qemu_tb_exec, which means that we are able to remove the
6
macro that does the casting.
7
8
Replace HAVE_TCG_QEMU_TB_EXEC with CONFIG_TCG_INTERPRETER,
9
as this is somewhat clearer in intent.
10
11
Reviewed-by: Joelle van Dyne <j@getutm.app>
12
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
13
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
---
16
include/tcg/tcg.h | 9 ++++-----
17
tcg/tcg.c | 9 ++++++++-
18
tcg/tci.c | 4 ++--
19
3 files changed, 14 insertions(+), 8 deletions(-)
20
21
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
22
index XXXXXXX..XXXXXXX 100644
23
--- a/include/tcg/tcg.h
24
+++ b/include/tcg/tcg.h
25
@@ -XXX,XX +XXX,XX @@ struct TCGContext {
26
here, because there's too much arithmetic throughout that relies
27
on addition and subtraction working on bytes. Rely on the GCC
28
extension that allows arithmetic on void*. */
29
- void *code_gen_prologue;
30
void *code_gen_epilogue;
31
void *code_gen_buffer;
32
size_t code_gen_buffer_size;
33
@@ -XXX,XX +XXX,XX @@ static inline unsigned get_mmuidx(TCGMemOpIdx oi)
34
#define TB_EXIT_IDXMAX 1
35
#define TB_EXIT_REQUESTED 3
36
37
-#ifdef HAVE_TCG_QEMU_TB_EXEC
38
-uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr);
39
+#ifdef CONFIG_TCG_INTERPRETER
40
+uintptr_t tcg_qemu_tb_exec(CPUArchState *env, void *tb_ptr);
41
#else
42
-# define tcg_qemu_tb_exec(env, tb_ptr) \
43
- ((uintptr_t (*)(void *, void *))tcg_ctx->code_gen_prologue)(env, tb_ptr)
44
+typedef uintptr_t tcg_prologue_fn(CPUArchState *env, void *tb_ptr);
45
+extern tcg_prologue_fn *tcg_qemu_tb_exec;
46
#endif
47
48
void tcg_register_jit(void *buf, size_t buf_size);
49
diff --git a/tcg/tcg.c b/tcg/tcg.c
50
index XXXXXXX..XXXXXXX 100644
51
--- a/tcg/tcg.c
52
+++ b/tcg/tcg.c
53
@@ -XXX,XX +XXX,XX @@ static TCGContext **tcg_ctxs;
54
static unsigned int n_tcg_ctxs;
55
TCGv_env cpu_env = 0;
56
57
+#ifndef CONFIG_TCG_INTERPRETER
58
+tcg_prologue_fn *tcg_qemu_tb_exec;
59
+#endif
60
+
61
struct tcg_region_tree {
62
QemuMutex lock;
63
GTree *tree;
64
@@ -XXX,XX +XXX,XX @@ void tcg_prologue_init(TCGContext *s)
65
s->code_ptr = buf0;
66
s->code_buf = buf0;
67
s->data_gen_ptr = NULL;
68
- s->code_gen_prologue = buf0;
69
+
70
+#ifndef CONFIG_TCG_INTERPRETER
71
+ tcg_qemu_tb_exec = (tcg_prologue_fn *)buf0;
72
+#endif
73
74
/* Compute a high-water mark, at which we voluntarily flush the buffer
75
and start over. The size here is arbitrary, significantly larger
76
diff --git a/tcg/tci.c b/tcg/tci.c
77
index XXXXXXX..XXXXXXX 100644
78
--- a/tcg/tci.c
79
+++ b/tcg/tci.c
80
@@ -XXX,XX +XXX,XX @@ static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition)
81
* One possible operation in the pseudo code is a call to binary code.
82
* Therefore, disable CFI checks in the interpreter function
83
*/
84
-QEMU_DISABLE_CFI
85
-uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
86
+uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, void *v_tb_ptr)
87
{
88
+ uint8_t *tb_ptr = v_tb_ptr;
89
tcg_target_ulong regs[TCG_TARGET_NB_REGS];
90
long tcg_temps[CPU_TEMP_BUF_NLONGS];
91
uintptr_t sp_value = (uintptr_t)(tcg_temps + CPU_TEMP_BUF_NLONGS);
92
--
93
2.25.1
94
95
diff view generated by jsdifflib
Deleted patch
1
This value is constant across all thread-local copies of TCGContext,
2
so we might as well move it out of thread-local storage.
3
1
4
Reviewed-by: Joelle van Dyne <j@getutm.app>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
include/tcg/tcg.h | 2 +-
8
accel/tcg/tcg-runtime.c | 2 +-
9
tcg/tcg.c | 3 ++-
10
tcg/aarch64/tcg-target.c.inc | 4 ++--
11
tcg/arm/tcg-target.c.inc | 2 +-
12
tcg/i386/tcg-target.c.inc | 4 ++--
13
tcg/mips/tcg-target.c.inc | 2 +-
14
tcg/ppc/tcg-target.c.inc | 2 +-
15
tcg/riscv/tcg-target.c.inc | 4 ++--
16
tcg/s390/tcg-target.c.inc | 4 ++--
17
tcg/sparc/tcg-target.c.inc | 2 +-
18
11 files changed, 16 insertions(+), 15 deletions(-)
19
20
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/include/tcg/tcg.h
23
+++ b/include/tcg/tcg.h
24
@@ -XXX,XX +XXX,XX @@ struct TCGContext {
25
here, because there's too much arithmetic throughout that relies
26
on addition and subtraction working on bytes. Rely on the GCC
27
extension that allows arithmetic on void*. */
28
- void *code_gen_epilogue;
29
void *code_gen_buffer;
30
size_t code_gen_buffer_size;
31
void *code_gen_ptr;
32
@@ -XXX,XX +XXX,XX @@ struct TCGContext {
33
34
extern TCGContext tcg_init_ctx;
35
extern __thread TCGContext *tcg_ctx;
36
+extern void *tcg_code_gen_epilogue;
37
extern TCGv_env cpu_env;
38
39
static inline size_t temp_idx(TCGTemp *ts)
40
diff --git a/accel/tcg/tcg-runtime.c b/accel/tcg/tcg-runtime.c
41
index XXXXXXX..XXXXXXX 100644
42
--- a/accel/tcg/tcg-runtime.c
43
+++ b/accel/tcg/tcg-runtime.c
44
@@ -XXX,XX +XXX,XX @@ void *HELPER(lookup_tb_ptr)(CPUArchState *env)
45
46
tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, curr_cflags());
47
if (tb == NULL) {
48
- return tcg_ctx->code_gen_epilogue;
49
+ return tcg_code_gen_epilogue;
50
}
51
qemu_log_mask_and_addr(CPU_LOG_EXEC, pc,
52
"Chain %d: %p ["
53
diff --git a/tcg/tcg.c b/tcg/tcg.c
54
index XXXXXXX..XXXXXXX 100644
55
--- a/tcg/tcg.c
56
+++ b/tcg/tcg.c
57
@@ -XXX,XX +XXX,XX @@ static int tcg_out_ldst_finalize(TCGContext *s);
58
static TCGContext **tcg_ctxs;
59
static unsigned int n_tcg_ctxs;
60
TCGv_env cpu_env = 0;
61
+void *tcg_code_gen_epilogue;
62
63
#ifndef CONFIG_TCG_INTERPRETER
64
tcg_prologue_fn *tcg_qemu_tb_exec;
65
@@ -XXX,XX +XXX,XX @@ void tcg_prologue_init(TCGContext *s)
66
67
/* Assert that goto_ptr is implemented completely. */
68
if (TCG_TARGET_HAS_goto_ptr) {
69
- tcg_debug_assert(s->code_gen_epilogue != NULL);
70
+ tcg_debug_assert(tcg_code_gen_epilogue != NULL);
71
}
72
}
73
74
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
75
index XXXXXXX..XXXXXXX 100644
76
--- a/tcg/aarch64/tcg-target.c.inc
77
+++ b/tcg/aarch64/tcg-target.c.inc
78
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
79
case INDEX_op_exit_tb:
80
/* Reuse the zeroing that exists for goto_ptr. */
81
if (a0 == 0) {
82
- tcg_out_goto_long(s, s->code_gen_epilogue);
83
+ tcg_out_goto_long(s, tcg_code_gen_epilogue);
84
} else {
85
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X0, a0);
86
tcg_out_goto_long(s, tb_ret_addr);
87
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
88
* Return path for goto_ptr. Set return value to 0, a-la exit_tb,
89
* and fall through to the rest of the epilogue.
90
*/
91
- s->code_gen_epilogue = s->code_ptr;
92
+ tcg_code_gen_epilogue = s->code_ptr;
93
tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_X0, 0);
94
95
/* TB epilogue */
96
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
97
index XXXXXXX..XXXXXXX 100644
98
--- a/tcg/arm/tcg-target.c.inc
99
+++ b/tcg/arm/tcg-target.c.inc
100
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
101
* Return path for goto_ptr. Set return value to 0, a-la exit_tb,
102
* and fall through to the rest of the epilogue.
103
*/
104
- s->code_gen_epilogue = s->code_ptr;
105
+ tcg_code_gen_epilogue = s->code_ptr;
106
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, 0);
107
tcg_out_epilogue(s);
108
}
109
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
110
index XXXXXXX..XXXXXXX 100644
111
--- a/tcg/i386/tcg-target.c.inc
112
+++ b/tcg/i386/tcg-target.c.inc
113
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
114
case INDEX_op_exit_tb:
115
/* Reuse the zeroing that exists for goto_ptr. */
116
if (a0 == 0) {
117
- tcg_out_jmp(s, s->code_gen_epilogue);
118
+ tcg_out_jmp(s, tcg_code_gen_epilogue);
119
} else {
120
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_EAX, a0);
121
tcg_out_jmp(s, tb_ret_addr);
122
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
123
* Return path for goto_ptr. Set return value to 0, a-la exit_tb,
124
* and fall through to the rest of the epilogue.
125
*/
126
- s->code_gen_epilogue = s->code_ptr;
127
+ tcg_code_gen_epilogue = s->code_ptr;
128
tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_EAX, 0);
129
130
/* TB epilogue */
131
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
132
index XXXXXXX..XXXXXXX 100644
133
--- a/tcg/mips/tcg-target.c.inc
134
+++ b/tcg/mips/tcg-target.c.inc
135
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
136
* Return path for goto_ptr. Set return value to 0, a-la exit_tb,
137
* and fall through to the rest of the epilogue.
138
*/
139
- s->code_gen_epilogue = s->code_ptr;
140
+ tcg_code_gen_epilogue = s->code_ptr;
141
tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_V0, TCG_REG_ZERO);
142
143
/* TB epilogue */
144
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
145
index XXXXXXX..XXXXXXX 100644
146
--- a/tcg/ppc/tcg-target.c.inc
147
+++ b/tcg/ppc/tcg-target.c.inc
148
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
149
tcg_out32(s, BCCTR | BO_ALWAYS);
150
151
/* Epilogue */
152
- s->code_gen_epilogue = tb_ret_addr = s->code_ptr;
153
+ tcg_code_gen_epilogue = tb_ret_addr = s->code_ptr;
154
155
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
156
for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
157
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
158
index XXXXXXX..XXXXXXX 100644
159
--- a/tcg/riscv/tcg-target.c.inc
160
+++ b/tcg/riscv/tcg-target.c.inc
161
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
162
case INDEX_op_exit_tb:
163
/* Reuse the zeroing that exists for goto_ptr. */
164
if (a0 == 0) {
165
- tcg_out_call_int(s, s->code_gen_epilogue, true);
166
+ tcg_out_call_int(s, tcg_code_gen_epilogue, true);
167
} else {
168
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0);
169
tcg_out_call_int(s, tb_ret_addr, true);
170
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
171
tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0);
172
173
/* Return path for goto_ptr. Set return value to 0 */
174
- s->code_gen_epilogue = s->code_ptr;
175
+ tcg_code_gen_epilogue = s->code_ptr;
176
tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO);
177
178
/* TB epilogue */
179
diff --git a/tcg/s390/tcg-target.c.inc b/tcg/s390/tcg-target.c.inc
180
index XXXXXXX..XXXXXXX 100644
181
--- a/tcg/s390/tcg-target.c.inc
182
+++ b/tcg/s390/tcg-target.c.inc
183
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
184
/* Reuse the zeroing that exists for goto_ptr. */
185
a0 = args[0];
186
if (a0 == 0) {
187
- tgen_gotoi(s, S390_CC_ALWAYS, s->code_gen_epilogue);
188
+ tgen_gotoi(s, S390_CC_ALWAYS, tcg_code_gen_epilogue);
189
} else {
190
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, a0);
191
tgen_gotoi(s, S390_CC_ALWAYS, tb_ret_addr);
192
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
193
* Return path for goto_ptr. Set return value to 0, a-la exit_tb,
194
* and fall through to the rest of the epilogue.
195
*/
196
- s->code_gen_epilogue = s->code_ptr;
197
+ tcg_code_gen_epilogue = s->code_ptr;
198
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, 0);
199
200
/* TB epilogue */
201
diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc
202
index XXXXXXX..XXXXXXX 100644
203
--- a/tcg/sparc/tcg-target.c.inc
204
+++ b/tcg/sparc/tcg-target.c.inc
205
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
206
tcg_out_nop(s);
207
208
/* Epilogue for goto_ptr. */
209
- s->code_gen_epilogue = s->code_ptr;
210
+ tcg_code_gen_epilogue = s->code_ptr;
211
tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
212
/* delay slot */
213
tcg_out_movi_imm13(s, TCG_REG_O0, 0);
214
--
215
2.25.1
216
217
diff view generated by jsdifflib
Deleted patch
1
Create a function to determine if a pointer is within the buffer.
2
1
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
include/tcg/tcg.h | 11 +++++++++++
7
accel/tcg/translate-all.c | 26 ++++++++------------------
8
2 files changed, 19 insertions(+), 18 deletions(-)
9
10
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/include/tcg/tcg.h
13
+++ b/include/tcg/tcg.h
14
@@ -XXX,XX +XXX,XX @@ extern __thread TCGContext *tcg_ctx;
15
extern void *tcg_code_gen_epilogue;
16
extern TCGv_env cpu_env;
17
18
+static inline bool in_code_gen_buffer(const void *p)
19
+{
20
+ const TCGContext *s = &tcg_init_ctx;
21
+ /*
22
+ * Much like it is valid to have a pointer to the byte past the
23
+ * end of an array (so long as you don't dereference it), allow
24
+ * a pointer to the byte past the end of the code gen buffer.
25
+ */
26
+ return (size_t)(p - s->code_gen_buffer) <= s->code_gen_buffer_size;
27
+}
28
+
29
static inline size_t temp_idx(TCGTemp *ts)
30
{
31
ptrdiff_t n = ts - tcg_ctx->temps;
32
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/accel/tcg/translate-all.c
35
+++ b/accel/tcg/translate-all.c
36
@@ -XXX,XX +XXX,XX @@ void tb_destroy(TranslationBlock *tb)
37
38
bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
39
{
40
- TranslationBlock *tb;
41
- bool r = false;
42
- uintptr_t check_offset;
43
-
44
- /* The host_pc has to be in the region of current code buffer. If
45
- * it is not we will not be able to resolve it here. The two cases
46
- * where host_pc will not be correct are:
47
+ /*
48
+ * The host_pc has to be in the region of the code buffer.
49
+ * If it is not we will not be able to resolve it here.
50
+ * The two cases where host_pc will not be correct are:
51
*
52
* - fault during translation (instruction fetch)
53
* - fault from helper (not using GETPC() macro)
54
*
55
* Either way we need return early as we can't resolve it here.
56
- *
57
- * We are using unsigned arithmetic so if host_pc <
58
- * tcg_init_ctx.code_gen_buffer check_offset will wrap to way
59
- * above the code_gen_buffer_size
60
*/
61
- check_offset = host_pc - (uintptr_t) tcg_init_ctx.code_gen_buffer;
62
-
63
- if (check_offset < tcg_init_ctx.code_gen_buffer_size) {
64
- tb = tcg_tb_lookup(host_pc);
65
+ if (in_code_gen_buffer((const void *)host_pc)) {
66
+ TranslationBlock *tb = tcg_tb_lookup(host_pc);
67
if (tb) {
68
cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit);
69
if (tb_cflags(tb) & CF_NOCACHE) {
70
@@ -XXX,XX +XXX,XX @@ bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
71
tcg_tb_remove(tb);
72
tb_destroy(tb);
73
}
74
- r = true;
75
+ return true;
76
}
77
}
78
-
79
- return r;
80
+ return false;
81
}
82
83
static void page_init(void)
84
--
85
2.25.1
86
87
diff view generated by jsdifflib
Deleted patch
1
Add two helper functions, using a global variable to hold
2
the displacement. The displacement is currently always 0,
3
so no change in behaviour.
4
1
5
Begin using the functions in tcg common code only.
6
7
Reviewed-by: Joelle van Dyne <j@getutm.app>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
accel/tcg/tcg-runtime.h | 2 +-
11
include/disas/disas.h | 2 +-
12
include/exec/exec-all.h | 2 +-
13
include/exec/log.h | 2 +-
14
include/tcg/tcg.h | 26 ++++++++++++++----
15
accel/tcg/cpu-exec.c | 2 +-
16
accel/tcg/tcg-runtime.c | 2 +-
17
accel/tcg/translate-all.c | 33 +++++++++++------------
18
disas.c | 4 ++-
19
tcg/tcg.c | 56 ++++++++++++++++++++++++++++++++++-----
20
tcg/tci.c | 6 +++--
21
tcg/tcg-pool.c.inc | 6 ++++-
22
accel/tcg/trace-events | 2 +-
23
13 files changed, 105 insertions(+), 40 deletions(-)
24
25
diff --git a/accel/tcg/tcg-runtime.h b/accel/tcg/tcg-runtime.h
26
index XXXXXXX..XXXXXXX 100644
27
--- a/accel/tcg/tcg-runtime.h
28
+++ b/accel/tcg/tcg-runtime.h
29
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_1(clrsb_i64, TCG_CALL_NO_RWG_SE, i64, i64)
30
DEF_HELPER_FLAGS_1(ctpop_i32, TCG_CALL_NO_RWG_SE, i32, i32)
31
DEF_HELPER_FLAGS_1(ctpop_i64, TCG_CALL_NO_RWG_SE, i64, i64)
32
33
-DEF_HELPER_FLAGS_1(lookup_tb_ptr, TCG_CALL_NO_WG_SE, ptr, env)
34
+DEF_HELPER_FLAGS_1(lookup_tb_ptr, TCG_CALL_NO_WG_SE, cptr, env)
35
36
DEF_HELPER_FLAGS_1(exit_atomic, TCG_CALL_NO_WG, noreturn, env)
37
38
diff --git a/include/disas/disas.h b/include/disas/disas.h
39
index XXXXXXX..XXXXXXX 100644
40
--- a/include/disas/disas.h
41
+++ b/include/disas/disas.h
42
@@ -XXX,XX +XXX,XX @@
43
#include "cpu.h"
44
45
/* Disassemble this for me please... (debugging). */
46
-void disas(FILE *out, void *code, unsigned long size);
47
+void disas(FILE *out, const void *code, unsigned long size);
48
void target_disas(FILE *out, CPUState *cpu, target_ulong code,
49
target_ulong size);
50
51
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
52
index XXXXXXX..XXXXXXX 100644
53
--- a/include/exec/exec-all.h
54
+++ b/include/exec/exec-all.h
55
@@ -XXX,XX +XXX,XX @@ int probe_access_flags(CPUArchState *env, target_ulong addr,
56
* Note: the address of search data can be obtained by adding @size to @ptr.
57
*/
58
struct tb_tc {
59
- void *ptr; /* pointer to the translated code */
60
+ const void *ptr; /* pointer to the translated code */
61
size_t size;
62
};
63
64
diff --git a/include/exec/log.h b/include/exec/log.h
65
index XXXXXXX..XXXXXXX 100644
66
--- a/include/exec/log.h
67
+++ b/include/exec/log.h
68
@@ -XXX,XX +XXX,XX @@ static inline void log_target_disas(CPUState *cpu, target_ulong start,
69
rcu_read_unlock();
70
}
71
72
-static inline void log_disas(void *code, unsigned long size)
73
+static inline void log_disas(const void *code, unsigned long size)
74
{
75
QemuLogFile *logfile;
76
rcu_read_lock();
77
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
78
index XXXXXXX..XXXXXXX 100644
79
--- a/include/tcg/tcg.h
80
+++ b/include/tcg/tcg.h
81
@@ -XXX,XX +XXX,XX @@ struct TCGContext {
82
extern TCGContext tcg_init_ctx;
83
extern __thread TCGContext *tcg_ctx;
84
extern void *tcg_code_gen_epilogue;
85
+extern uintptr_t tcg_splitwx_diff;
86
extern TCGv_env cpu_env;
87
88
static inline bool in_code_gen_buffer(const void *p)
89
@@ -XXX,XX +XXX,XX @@ static inline bool in_code_gen_buffer(const void *p)
90
return (size_t)(p - s->code_gen_buffer) <= s->code_gen_buffer_size;
91
}
92
93
+#ifdef CONFIG_DEBUG_TCG
94
+const void *tcg_splitwx_to_rx(void *rw);
95
+void *tcg_splitwx_to_rw(const void *rx);
96
+#else
97
+static inline const void *tcg_splitwx_to_rx(void *rw)
98
+{
99
+ return rw ? rw + tcg_splitwx_diff : NULL;
100
+}
101
+
102
+static inline void *tcg_splitwx_to_rw(const void *rx)
103
+{
104
+ return rx ? (void *)rx - tcg_splitwx_diff : NULL;
105
+}
106
+#endif
107
+
108
static inline size_t temp_idx(TCGTemp *ts)
109
{
110
ptrdiff_t n = ts - tcg_ctx->temps;
111
@@ -XXX,XX +XXX,XX @@ static inline TCGLabel *arg_label(TCGArg i)
112
* correct result.
113
*/
114
115
-static inline ptrdiff_t tcg_ptr_byte_diff(void *a, void *b)
116
+static inline ptrdiff_t tcg_ptr_byte_diff(const void *a, const void *b)
117
{
118
return a - b;
119
}
120
@@ -XXX,XX +XXX,XX @@ static inline ptrdiff_t tcg_ptr_byte_diff(void *a, void *b)
121
* to the destination address.
122
*/
123
124
-static inline ptrdiff_t tcg_pcrel_diff(TCGContext *s, void *target)
125
+static inline ptrdiff_t tcg_pcrel_diff(TCGContext *s, const void *target)
126
{
127
- return tcg_ptr_byte_diff(target, s->code_ptr);
128
+ return tcg_ptr_byte_diff(target, tcg_splitwx_to_rx(s->code_ptr));
129
}
130
131
/**
132
@@ -XXX,XX +XXX,XX @@ static inline unsigned get_mmuidx(TCGMemOpIdx oi)
133
#define TB_EXIT_REQUESTED 3
134
135
#ifdef CONFIG_TCG_INTERPRETER
136
-uintptr_t tcg_qemu_tb_exec(CPUArchState *env, void *tb_ptr);
137
+uintptr_t tcg_qemu_tb_exec(CPUArchState *env, const void *tb_ptr);
138
#else
139
-typedef uintptr_t tcg_prologue_fn(CPUArchState *env, void *tb_ptr);
140
+typedef uintptr_t tcg_prologue_fn(CPUArchState *env, const void *tb_ptr);
141
extern tcg_prologue_fn *tcg_qemu_tb_exec;
142
#endif
143
144
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
145
index XXXXXXX..XXXXXXX 100644
146
--- a/accel/tcg/cpu-exec.c
147
+++ b/accel/tcg/cpu-exec.c
148
@@ -XXX,XX +XXX,XX @@ static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
149
uintptr_t ret;
150
TranslationBlock *last_tb;
151
int tb_exit;
152
- uint8_t *tb_ptr = itb->tc.ptr;
153
+ const void *tb_ptr = itb->tc.ptr;
154
155
qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
156
"Trace %d: %p ["
157
diff --git a/accel/tcg/tcg-runtime.c b/accel/tcg/tcg-runtime.c
158
index XXXXXXX..XXXXXXX 100644
159
--- a/accel/tcg/tcg-runtime.c
160
+++ b/accel/tcg/tcg-runtime.c
161
@@ -XXX,XX +XXX,XX @@ uint64_t HELPER(ctpop_i64)(uint64_t arg)
162
return ctpop64(arg);
163
}
164
165
-void *HELPER(lookup_tb_ptr)(CPUArchState *env)
166
+const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
167
{
168
CPUState *cpu = env_cpu(env);
169
TranslationBlock *tb;
170
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
171
index XXXXXXX..XXXXXXX 100644
172
--- a/accel/tcg/translate-all.c
173
+++ b/accel/tcg/translate-all.c
174
@@ -XXX,XX +XXX,XX @@ static uint8_t *encode_sleb128(uint8_t *p, target_long val)
175
176
/* Decode a signed leb128 sequence at *PP; increment *PP past the
177
decoded value. Return the decoded value. */
178
-static target_long decode_sleb128(uint8_t **pp)
179
+static target_long decode_sleb128(const uint8_t **pp)
180
{
181
- uint8_t *p = *pp;
182
+ const uint8_t *p = *pp;
183
target_long val = 0;
184
int byte, shift = 0;
185
186
@@ -XXX,XX +XXX,XX @@ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
187
target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
188
uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
189
CPUArchState *env = cpu->env_ptr;
190
- uint8_t *p = tb->tc.ptr + tb->tc.size;
191
+ const uint8_t *p = tb->tc.ptr + tb->tc.size;
192
int i, j, num_insns = tb->icount;
193
#ifdef CONFIG_PROFILER
194
TCGProfile *prof = &tcg_ctx->prof;
195
@@ -XXX,XX +XXX,XX @@ void tb_destroy(TranslationBlock *tb)
196
bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
197
{
198
/*
199
- * The host_pc has to be in the region of the code buffer.
200
+ * The host_pc has to be in the rx region of the code buffer.
201
* If it is not we will not be able to resolve it here.
202
* The two cases where host_pc will not be correct are:
203
*
204
@@ -XXX,XX +XXX,XX @@ bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
205
*
206
* Either way we need return early as we can't resolve it here.
207
*/
208
- if (in_code_gen_buffer((const void *)host_pc)) {
209
+ if (in_code_gen_buffer((const void *)(host_pc - tcg_splitwx_diff))) {
210
TranslationBlock *tb = tcg_tb_lookup(host_pc);
211
if (tb) {
212
cpu_restore_state_from_tb(cpu, tb, host_pc, will_exit);
213
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
214
}
215
216
gen_code_buf = tcg_ctx->code_gen_ptr;
217
- tb->tc.ptr = gen_code_buf;
218
+ tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf);
219
tb->pc = pc;
220
tb->cs_base = cs_base;
221
tb->flags = flags;
222
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
223
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
224
qemu_log_in_addr_range(tb->pc)) {
225
FILE *logfile = qemu_log_lock();
226
- int code_size, data_size = 0;
227
+ int code_size, data_size;
228
+ const tcg_target_ulong *rx_data_gen_ptr;
229
size_t chunk_start;
230
int insn = 0;
231
232
if (tcg_ctx->data_gen_ptr) {
233
- code_size = tcg_ctx->data_gen_ptr - tb->tc.ptr;
234
+ rx_data_gen_ptr = tcg_splitwx_to_rx(tcg_ctx->data_gen_ptr);
235
+ code_size = (const void *)rx_data_gen_ptr - tb->tc.ptr;
236
data_size = gen_code_size - code_size;
237
} else {
238
+ rx_data_gen_ptr = 0;
239
code_size = gen_code_size;
240
+ data_size = 0;
241
}
242
243
/* Dump header and the first instruction */
244
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
245
if (data_size) {
246
int i;
247
qemu_log(" data: [size=%d]\n", data_size);
248
- for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
249
- if (sizeof(tcg_target_ulong) == 8) {
250
- qemu_log("0x%08" PRIxPTR ": .quad 0x%016" PRIx64 "\n",
251
- (uintptr_t)tcg_ctx->data_gen_ptr + i,
252
- *(uint64_t *)(tcg_ctx->data_gen_ptr + i));
253
- } else {
254
- qemu_log("0x%08" PRIxPTR ": .long 0x%08x\n",
255
- (uintptr_t)tcg_ctx->data_gen_ptr + i,
256
- *(uint32_t *)(tcg_ctx->data_gen_ptr + i));
257
- }
258
+ for (i = 0; i < data_size / sizeof(tcg_target_ulong); i++) {
259
+ qemu_log("0x%08" PRIxPTR ": .quad 0x%" TCG_PRIlx "\n",
260
+ (uintptr_t)&rx_data_gen_ptr[i], rx_data_gen_ptr[i]);
261
}
262
}
263
qemu_log("\n");
264
diff --git a/disas.c b/disas.c
265
index XXXXXXX..XXXXXXX 100644
266
--- a/disas.c
267
+++ b/disas.c
268
@@ -XXX,XX +XXX,XX @@ char *plugin_disas(CPUState *cpu, uint64_t addr, size_t size)
269
}
270
271
/* Disassemble this for me please... (debugging). */
272
-void disas(FILE *out, void *code, unsigned long size)
273
+void disas(FILE *out, const void *ccode, unsigned long size)
274
{
275
+ /* TODO: Push constness through the disas backends. */
276
+ void *code = (void *)ccode;
277
uintptr_t pc;
278
int count;
279
CPUDebug s;
280
diff --git a/tcg/tcg.c b/tcg/tcg.c
281
index XXXXXXX..XXXXXXX 100644
282
--- a/tcg/tcg.c
283
+++ b/tcg/tcg.c
284
@@ -XXX,XX +XXX,XX @@ static TCGContext **tcg_ctxs;
285
static unsigned int n_tcg_ctxs;
286
TCGv_env cpu_env = 0;
287
void *tcg_code_gen_epilogue;
288
+uintptr_t tcg_splitwx_diff;
289
290
#ifndef CONFIG_TCG_INTERPRETER
291
tcg_prologue_fn *tcg_qemu_tb_exec;
292
@@ -XXX,XX +XXX,XX @@ static void tcg_region_trees_init(void)
293
}
294
}
295
296
-static struct tcg_region_tree *tc_ptr_to_region_tree(void *p)
297
+static struct tcg_region_tree *tc_ptr_to_region_tree(const void *cp)
298
{
299
+ void *p = tcg_splitwx_to_rw(cp);
300
size_t region_idx;
301
302
if (p < region.start_aligned) {
303
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(void)
304
size_t region_size;
305
size_t n_regions;
306
size_t i;
307
+ uintptr_t splitwx_diff;
308
309
n_regions = tcg_n_regions();
310
311
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(void)
312
region.end -= page_size;
313
314
/* set guard pages */
315
+ splitwx_diff = tcg_splitwx_diff;
316
for (i = 0; i < region.n; i++) {
317
void *start, *end;
318
int rc;
319
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(void)
320
tcg_region_bounds(i, &start, &end);
321
rc = qemu_mprotect_none(end, page_size);
322
g_assert(!rc);
323
+ if (splitwx_diff) {
324
+ rc = qemu_mprotect_none(end + splitwx_diff, page_size);
325
+ g_assert(!rc);
326
+ }
327
}
328
329
tcg_region_trees_init();
330
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(void)
331
#endif
332
}
333
334
+#ifdef CONFIG_DEBUG_TCG
335
+const void *tcg_splitwx_to_rx(void *rw)
336
+{
337
+ /* Pass NULL pointers unchanged. */
338
+ if (rw) {
339
+ g_assert(in_code_gen_buffer(rw));
340
+ rw += tcg_splitwx_diff;
341
+ }
342
+ return rw;
343
+}
344
+
345
+void *tcg_splitwx_to_rw(const void *rx)
346
+{
347
+ /* Pass NULL pointers unchanged. */
348
+ if (rx) {
349
+ rx -= tcg_splitwx_diff;
350
+ /* Assert that we end with a pointer in the rw region. */
351
+ g_assert(in_code_gen_buffer(rx));
352
+ }
353
+ return (void *)rx;
354
+}
355
+#endif /* CONFIG_DEBUG_TCG */
356
+
357
static void alloc_tcg_plugin_context(TCGContext *s)
358
{
359
#ifdef CONFIG_PLUGIN
360
@@ -XXX,XX +XXX,XX @@ void tcg_prologue_init(TCGContext *s)
361
s->code_buf = buf0;
362
s->data_gen_ptr = NULL;
363
364
+ /*
365
+ * The region trees are not yet configured, but tcg_splitwx_to_rx
366
+ * needs the bounds for an assert.
367
+ */
368
+ region.start = buf0;
369
+ region.end = buf0 + total_size;
370
+
371
#ifndef CONFIG_TCG_INTERPRETER
372
- tcg_qemu_tb_exec = (tcg_prologue_fn *)buf0;
373
+ tcg_qemu_tb_exec = (tcg_prologue_fn *)tcg_splitwx_to_rx(buf0);
374
#endif
375
376
/* Compute a high-water mark, at which we voluntarily flush the buffer
377
@@ -XXX,XX +XXX,XX @@ void tcg_prologue_init(TCGContext *s)
378
379
buf1 = s->code_ptr;
380
#ifndef CONFIG_TCG_INTERPRETER
381
- flush_idcache_range((uintptr_t)buf0, (uintptr_t)buf0,
382
+ flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(buf0), (uintptr_t)buf0,
383
tcg_ptr_byte_diff(buf1, buf0));
384
#endif
385
386
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
387
388
tcg_reg_alloc_start(s);
389
390
- s->code_buf = tb->tc.ptr;
391
- s->code_ptr = tb->tc.ptr;
392
+ /*
393
+ * Reset the buffer pointers when restarting after overflow.
394
+ * TODO: Move this into translate-all.c with the rest of the
395
+ * buffer management. Having only this done here is confusing.
396
+ */
397
+ s->code_buf = tcg_splitwx_to_rw(tb->tc.ptr);
398
+ s->code_ptr = s->code_buf;
399
400
#ifdef TCG_TARGET_NEED_LDST_LABELS
401
QSIMPLEQ_INIT(&s->ldst_labels);
402
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
403
404
#ifndef CONFIG_TCG_INTERPRETER
405
/* flush instruction cache */
406
- flush_idcache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_buf,
407
+ flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s->code_buf),
408
+ (uintptr_t)s->code_buf,
409
tcg_ptr_byte_diff(s->code_ptr, s->code_buf));
410
#endif
411
412
diff --git a/tcg/tci.c b/tcg/tci.c
413
index XXXXXXX..XXXXXXX 100644
414
--- a/tcg/tci.c
415
+++ b/tcg/tci.c
416
@@ -XXX,XX +XXX,XX @@ static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition)
417
* One possible operation in the pseudo code is a call to binary code.
418
* Therefore, disable CFI checks in the interpreter function
419
*/
420
-uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, void *v_tb_ptr)
421
+uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
422
+ const void *v_tb_ptr)
423
{
424
- uint8_t *tb_ptr = v_tb_ptr;
425
+ /* TODO: Propagate const through this file. */
426
+ uint8_t *tb_ptr = (uint8_t *)v_tb_ptr;
427
tcg_target_ulong regs[TCG_TARGET_NB_REGS];
428
long tcg_temps[CPU_TEMP_BUF_NLONGS];
429
uintptr_t sp_value = (uintptr_t)(tcg_temps + CPU_TEMP_BUF_NLONGS);
430
diff --git a/tcg/tcg-pool.c.inc b/tcg/tcg-pool.c.inc
431
index XXXXXXX..XXXXXXX 100644
432
--- a/tcg/tcg-pool.c.inc
433
+++ b/tcg/tcg-pool.c.inc
434
@@ -XXX,XX +XXX,XX @@ static int tcg_out_pool_finalize(TCGContext *s)
435
436
for (; p != NULL; p = p->next) {
437
size_t size = sizeof(tcg_target_ulong) * p->nlong;
438
+ uintptr_t value;
439
+
440
if (!l || l->nlong != p->nlong || memcmp(l->data, p->data, size)) {
441
if (unlikely(a > s->code_gen_highwater)) {
442
return -1;
443
@@ -XXX,XX +XXX,XX @@ static int tcg_out_pool_finalize(TCGContext *s)
444
a += size;
445
l = p;
446
}
447
- if (!patch_reloc(p->label, p->rtype, (intptr_t)a - size, p->addend)) {
448
+
449
+ value = (uintptr_t)tcg_splitwx_to_rx(a) - size;
450
+ if (!patch_reloc(p->label, p->rtype, value, p->addend)) {
451
return -2;
452
}
453
}
454
diff --git a/accel/tcg/trace-events b/accel/tcg/trace-events
455
index XXXXXXX..XXXXXXX 100644
456
--- a/accel/tcg/trace-events
457
+++ b/accel/tcg/trace-events
458
@@ -XXX,XX +XXX,XX @@ exec_tb_nocache(void *tb, uintptr_t pc) "tb:%p pc=0x%"PRIxPTR
459
exec_tb_exit(void *last_tb, unsigned int flags) "tb:%p flags=0x%x"
460
461
# translate-all.c
462
-translate_block(void *tb, uintptr_t pc, uint8_t *tb_code) "tb:%p, pc:0x%"PRIxPTR", tb_code:%p"
463
+translate_block(void *tb, uintptr_t pc, const void *tb_code) "tb:%p, pc:0x%"PRIxPTR", tb_code:%p"
464
--
465
2.25.1
466
467
diff view generated by jsdifflib
Deleted patch
1
Change TCGLabel.u.value_ptr to const, and initialize it with
2
tcg_splitwx_to_rx. Propagate const through tcg/host/ only
3
as far as needed to avoid errors from the value_ptr change.
4
1
5
Reviewed-by: Joelle van Dyne <j@getutm.app>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
include/tcg/tcg.h | 2 +-
9
tcg/tcg.c | 2 +-
10
tcg/aarch64/tcg-target.c.inc | 2 +-
11
tcg/arm/tcg-target.c.inc | 2 +-
12
tcg/mips/tcg-target.c.inc | 5 +++--
13
tcg/ppc/tcg-target.c.inc | 4 ++--
14
tcg/s390/tcg-target.c.inc | 2 +-
15
7 files changed, 10 insertions(+), 9 deletions(-)
16
17
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/include/tcg/tcg.h
20
+++ b/include/tcg/tcg.h
21
@@ -XXX,XX +XXX,XX @@ struct TCGLabel {
22
unsigned refs : 16;
23
union {
24
uintptr_t value;
25
- tcg_insn_unit *value_ptr;
26
+ const tcg_insn_unit *value_ptr;
27
} u;
28
QSIMPLEQ_HEAD(, TCGRelocation) relocs;
29
QSIMPLEQ_ENTRY(TCGLabel) next;
30
diff --git a/tcg/tcg.c b/tcg/tcg.c
31
index XXXXXXX..XXXXXXX 100644
32
--- a/tcg/tcg.c
33
+++ b/tcg/tcg.c
34
@@ -XXX,XX +XXX,XX @@ static void tcg_out_label(TCGContext *s, TCGLabel *l, tcg_insn_unit *ptr)
35
{
36
tcg_debug_assert(!l->has_value);
37
l->has_value = 1;
38
- l->u.value_ptr = ptr;
39
+ l->u.value_ptr = tcg_splitwx_to_rx(ptr);
40
}
41
42
TCGLabel *gen_new_label(void)
43
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
44
index XXXXXXX..XXXXXXX 100644
45
--- a/tcg/aarch64/tcg-target.c.inc
46
+++ b/tcg/aarch64/tcg-target.c.inc
47
@@ -XXX,XX +XXX,XX @@ static void tcg_out_cmp(TCGContext *s, TCGType ext, TCGReg a,
48
}
49
}
50
51
-static inline void tcg_out_goto(TCGContext *s, tcg_insn_unit *target)
52
+static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
53
{
54
ptrdiff_t offset = target - s->code_ptr;
55
tcg_debug_assert(offset == sextract64(offset, 0, 26));
56
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
57
index XXXXXXX..XXXXXXX 100644
58
--- a/tcg/arm/tcg-target.c.inc
59
+++ b/tcg/arm/tcg-target.c.inc
60
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_st8(TCGContext *s, int cond,
61
* with the code buffer limited to 16MB we wouldn't need the long case.
62
* But we also use it for the tail-call to the qemu_ld/st helpers, which does.
63
*/
64
-static void tcg_out_goto(TCGContext *s, int cond, tcg_insn_unit *addr)
65
+static void tcg_out_goto(TCGContext *s, int cond, const tcg_insn_unit *addr)
66
{
67
intptr_t addri = (intptr_t)addr;
68
ptrdiff_t disp = tcg_pcrel_diff(s, addr);
69
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
70
index XXXXXXX..XXXXXXX 100644
71
--- a/tcg/mips/tcg-target.c.inc
72
+++ b/tcg/mips/tcg-target.c.inc
73
@@ -XXX,XX +XXX,XX @@ static tcg_insn_unit *bswap32_addr;
74
static tcg_insn_unit *bswap32u_addr;
75
static tcg_insn_unit *bswap64_addr;
76
77
-static inline uint32_t reloc_pc16_val(tcg_insn_unit *pc, tcg_insn_unit *target)
78
+static inline uint32_t reloc_pc16_val(tcg_insn_unit *pc,
79
+ const tcg_insn_unit *target)
80
{
81
/* Let the compiler perform the right-shift as part of the arithmetic. */
82
ptrdiff_t disp = target - (pc + 1);
83
@@ -XXX,XX +XXX,XX @@ static inline uint32_t reloc_pc16_val(tcg_insn_unit *pc, tcg_insn_unit *target)
84
return disp & 0xffff;
85
}
86
87
-static inline void reloc_pc16(tcg_insn_unit *pc, tcg_insn_unit *target)
88
+static inline void reloc_pc16(tcg_insn_unit *pc, const tcg_insn_unit *target)
89
{
90
*pc = deposit32(*pc, 0, 16, reloc_pc16_val(pc, target));
91
}
92
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
93
index XXXXXXX..XXXXXXX 100644
94
--- a/tcg/ppc/tcg-target.c.inc
95
+++ b/tcg/ppc/tcg-target.c.inc
96
@@ -XXX,XX +XXX,XX @@ static inline bool in_range_b(tcg_target_long target)
97
return target == sextract64(target, 0, 26);
98
}
99
100
-static uint32_t reloc_pc24_val(tcg_insn_unit *pc, tcg_insn_unit *target)
101
+static uint32_t reloc_pc24_val(tcg_insn_unit *pc, const tcg_insn_unit *target)
102
{
103
ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
104
tcg_debug_assert(in_range_b(disp));
105
@@ -XXX,XX +XXX,XX @@ static bool reloc_pc24(tcg_insn_unit *pc, tcg_insn_unit *target)
106
return false;
107
}
108
109
-static uint16_t reloc_pc14_val(tcg_insn_unit *pc, tcg_insn_unit *target)
110
+static uint16_t reloc_pc14_val(tcg_insn_unit *pc, const tcg_insn_unit *target)
111
{
112
ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
113
tcg_debug_assert(disp == (int16_t) disp);
114
diff --git a/tcg/s390/tcg-target.c.inc b/tcg/s390/tcg-target.c.inc
115
index XXXXXXX..XXXXXXX 100644
116
--- a/tcg/s390/tcg-target.c.inc
117
+++ b/tcg/s390/tcg-target.c.inc
118
@@ -XXX,XX +XXX,XX @@ static void tgen_extract(TCGContext *s, TCGReg dest, TCGReg src,
119
tcg_out_risbg(s, dest, src, 64 - len, 63, 64 - ofs, 1);
120
}
121
122
-static void tgen_gotoi(TCGContext *s, int cc, tcg_insn_unit *dest)
123
+static void tgen_gotoi(TCGContext *s, int cc, const tcg_insn_unit *dest)
124
{
125
ptrdiff_t off = dest - s->code_ptr;
126
if (off == (int16_t)off) {
127
--
128
2.25.1
129
130
diff view generated by jsdifflib
Deleted patch
1
We must change all targets at once, since all must match
2
the declaration in tcg.c.
3
1
4
Reviewed-by: Joelle van Dyne <j@getutm.app>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/tcg.c | 2 +-
9
tcg/aarch64/tcg-target.c.inc | 2 +-
10
tcg/arm/tcg-target.c.inc | 2 +-
11
tcg/i386/tcg-target.c.inc | 4 ++--
12
tcg/mips/tcg-target.c.inc | 6 +++---
13
tcg/ppc/tcg-target.c.inc | 8 ++++----
14
tcg/riscv/tcg-target.c.inc | 6 +++---
15
tcg/s390/tcg-target.c.inc | 2 +-
16
tcg/sparc/tcg-target.c.inc | 4 ++--
17
tcg/tci/tcg-target.c.inc | 2 +-
18
10 files changed, 19 insertions(+), 19 deletions(-)
19
20
diff --git a/tcg/tcg.c b/tcg/tcg.c
21
index XXXXXXX..XXXXXXX 100644
22
--- a/tcg/tcg.c
23
+++ b/tcg/tcg.c
24
@@ -XXX,XX +XXX,XX @@ static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
25
intptr_t arg2);
26
static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
27
TCGReg base, intptr_t ofs);
28
-static void tcg_out_call(TCGContext *s, tcg_insn_unit *target);
29
+static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target);
30
static int tcg_target_const_match(tcg_target_long val, TCGType type,
31
const TCGArgConstraint *arg_ct);
32
#ifdef TCG_TARGET_NEED_LDST_LABELS
33
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
34
index XXXXXXX..XXXXXXX 100644
35
--- a/tcg/aarch64/tcg-target.c.inc
36
+++ b/tcg/aarch64/tcg-target.c.inc
37
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_callr(TCGContext *s, TCGReg reg)
38
tcg_out_insn(s, 3207, BLR, reg);
39
}
40
41
-static inline void tcg_out_call(TCGContext *s, tcg_insn_unit *target)
42
+static inline void tcg_out_call(TCGContext *s, const tcg_insn_unit *target)
43
{
44
ptrdiff_t offset = target - s->code_ptr;
45
if (offset == sextract64(offset, 0, 26)) {
46
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
47
index XXXXXXX..XXXXXXX 100644
48
--- a/tcg/arm/tcg-target.c.inc
49
+++ b/tcg/arm/tcg-target.c.inc
50
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto(TCGContext *s, int cond, const tcg_insn_unit *addr)
51
52
/* The call case is mostly used for helpers - so it's not unreasonable
53
* for them to be beyond branch range */
54
-static void tcg_out_call(TCGContext *s, tcg_insn_unit *addr)
55
+static void tcg_out_call(TCGContext *s, const tcg_insn_unit *addr)
56
{
57
intptr_t addri = (intptr_t)addr;
58
ptrdiff_t disp = tcg_pcrel_diff(s, addr);
59
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
60
index XXXXXXX..XXXXXXX 100644
61
--- a/tcg/i386/tcg-target.c.inc
62
+++ b/tcg/i386/tcg-target.c.inc
63
@@ -XXX,XX +XXX,XX @@ static void tcg_out_clz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1,
64
}
65
}
66
67
-static void tcg_out_branch(TCGContext *s, int call, tcg_insn_unit *dest)
68
+static void tcg_out_branch(TCGContext *s, int call, const tcg_insn_unit *dest)
69
{
70
intptr_t disp = tcg_pcrel_diff(s, dest) - 5;
71
72
@@ -XXX,XX +XXX,XX @@ static void tcg_out_branch(TCGContext *s, int call, tcg_insn_unit *dest)
73
}
74
}
75
76
-static inline void tcg_out_call(TCGContext *s, tcg_insn_unit *dest)
77
+static inline void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest)
78
{
79
tcg_out_branch(s, 1, dest);
80
}
81
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
82
index XXXXXXX..XXXXXXX 100644
83
--- a/tcg/mips/tcg-target.c.inc
84
+++ b/tcg/mips/tcg-target.c.inc
85
@@ -XXX,XX +XXX,XX @@ static void tcg_out_opc_sa64(TCGContext *s, MIPSInsn opc1, MIPSInsn opc2,
86
* Type jump.
87
* Returns true if the branch was in range and the insn was emitted.
88
*/
89
-static bool tcg_out_opc_jmp(TCGContext *s, MIPSInsn opc, void *target)
90
+static bool tcg_out_opc_jmp(TCGContext *s, MIPSInsn opc, const void *target)
91
{
92
uintptr_t dest = (uintptr_t)target;
93
uintptr_t from = (uintptr_t)s->code_ptr + 4;
94
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
95
}
96
}
97
98
-static void tcg_out_call_int(TCGContext *s, tcg_insn_unit *arg, bool tail)
99
+static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
100
{
101
/* Note that the ABI requires the called function's address to be
102
loaded into T9, even if a direct branch is in range. */
103
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call_int(TCGContext *s, tcg_insn_unit *arg, bool tail)
104
}
105
}
106
107
-static void tcg_out_call(TCGContext *s, tcg_insn_unit *arg)
108
+static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg)
109
{
110
tcg_out_call_int(s, arg, false);
111
tcg_out_nop(s);
112
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
113
index XXXXXXX..XXXXXXX 100644
114
--- a/tcg/ppc/tcg-target.c.inc
115
+++ b/tcg/ppc/tcg-target.c.inc
116
@@ -XXX,XX +XXX,XX @@ static void tcg_out_xori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
117
tcg_out_zori32(s, dst, src, c, XORI, XORIS);
118
}
119
120
-static void tcg_out_b(TCGContext *s, int mask, tcg_insn_unit *target)
121
+static void tcg_out_b(TCGContext *s, int mask, const tcg_insn_unit *target)
122
{
123
ptrdiff_t disp = tcg_pcrel_diff(s, target);
124
if (in_range_b(disp)) {
125
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr,
126
}
127
}
128
129
-static void tcg_out_call(TCGContext *s, tcg_insn_unit *target)
130
+static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target)
131
{
132
#ifdef _CALL_AIX
133
/* Look through the descriptor. If the branch is in range, and we
134
don't have to spend too much effort on building the toc. */
135
- void *tgt = ((void **)target)[0];
136
- uintptr_t toc = ((uintptr_t *)target)[1];
137
+ const void *tgt = ((const void * const *)target)[0];
138
+ uintptr_t toc = ((const uintptr_t *)target)[1];
139
intptr_t diff = tcg_pcrel_diff(s, tgt);
140
141
if (in_range_b(diff) && toc == (uint32_t)toc) {
142
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
143
index XXXXXXX..XXXXXXX 100644
144
--- a/tcg/riscv/tcg-target.c.inc
145
+++ b/tcg/riscv/tcg-target.c.inc
146
@@ -XXX,XX +XXX,XX @@ static bool reloc_jimm20(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
147
return false;
148
}
149
150
-static bool reloc_call(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
151
+static bool reloc_call(tcg_insn_unit *code_ptr, const tcg_insn_unit *target)
152
{
153
intptr_t offset = (intptr_t)target - (intptr_t)code_ptr;
154
int32_t lo = sextreg(offset, 0, 12);
155
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_goto(TCGContext *s, tcg_insn_unit *target)
156
tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, offset);
157
}
158
159
-static void tcg_out_call_int(TCGContext *s, tcg_insn_unit *arg, bool tail)
160
+static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
161
{
162
TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA;
163
ptrdiff_t offset = tcg_pcrel_diff(s, arg);
164
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call_int(TCGContext *s, tcg_insn_unit *arg, bool tail)
165
}
166
}
167
168
-static void tcg_out_call(TCGContext *s, tcg_insn_unit *arg)
169
+static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg)
170
{
171
tcg_out_call_int(s, arg, false);
172
}
173
diff --git a/tcg/s390/tcg-target.c.inc b/tcg/s390/tcg-target.c.inc
174
index XXXXXXX..XXXXXXX 100644
175
--- a/tcg/s390/tcg-target.c.inc
176
+++ b/tcg/s390/tcg-target.c.inc
177
@@ -XXX,XX +XXX,XX @@ static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
178
tgen_branch(s, cc, l);
179
}
180
181
-static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest)
182
+static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest)
183
{
184
ptrdiff_t off = dest - s->code_ptr;
185
if (off == (int32_t)off) {
186
diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc
187
index XXXXXXX..XXXXXXX 100644
188
--- a/tcg/sparc/tcg-target.c.inc
189
+++ b/tcg/sparc/tcg-target.c.inc
190
@@ -XXX,XX +XXX,XX @@ static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
191
tcg_out_mov(s, TCG_TYPE_I64, rl, tmp);
192
}
193
194
-static void tcg_out_call_nodelay(TCGContext *s, tcg_insn_unit *dest,
195
+static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest,
196
bool in_prologue)
197
{
198
ptrdiff_t disp = tcg_pcrel_diff(s, dest);
199
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call_nodelay(TCGContext *s, tcg_insn_unit *dest,
200
}
201
}
202
203
-static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest)
204
+static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest)
205
{
206
tcg_out_call_nodelay(s, dest, false);
207
tcg_out_nop(s);
208
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
209
index XXXXXXX..XXXXXXX 100644
210
--- a/tcg/tci/tcg-target.c.inc
211
+++ b/tcg/tci/tcg-target.c.inc
212
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type,
213
old_code_ptr[1] = s->code_ptr - old_code_ptr;
214
}
215
216
-static inline void tcg_out_call(TCGContext *s, tcg_insn_unit *arg)
217
+static inline void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg)
218
{
219
uint8_t *old_code_ptr = s->code_ptr;
220
tcg_out_op_t(s, INDEX_op_call);
221
--
222
2.25.1
223
224
diff view generated by jsdifflib
Deleted patch
1
Simplify the arguments to always use s->code_ptr instead of
2
take it as an argument. That makes it easy to ensure that
3
the value_ptr is always the rx version.
4
1
5
Reviewed-by: Joelle van Dyne <j@getutm.app>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
tcg/tcg.c | 6 +++---
10
tcg/i386/tcg-target.c.inc | 10 +++++-----
11
2 files changed, 8 insertions(+), 8 deletions(-)
12
13
diff --git a/tcg/tcg.c b/tcg/tcg.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/tcg.c
16
+++ b/tcg/tcg.c
17
@@ -XXX,XX +XXX,XX @@ static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type,
18
QSIMPLEQ_INSERT_TAIL(&l->relocs, r, next);
19
}
20
21
-static void tcg_out_label(TCGContext *s, TCGLabel *l, tcg_insn_unit *ptr)
22
+static void tcg_out_label(TCGContext *s, TCGLabel *l)
23
{
24
tcg_debug_assert(!l->has_value);
25
l->has_value = 1;
26
- l->u.value_ptr = tcg_splitwx_to_rx(ptr);
27
+ l->u.value_ptr = tcg_splitwx_to_rx(s->code_ptr);
28
}
29
30
TCGLabel *gen_new_label(void)
31
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
32
break;
33
case INDEX_op_set_label:
34
tcg_reg_alloc_bb_end(s, s->reserved_regs);
35
- tcg_out_label(s, arg_label(op->args[0]), s->code_ptr);
36
+ tcg_out_label(s, arg_label(op->args[0]));
37
break;
38
case INDEX_op_call:
39
tcg_reg_alloc_call(s, op);
40
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
41
index XXXXXXX..XXXXXXX 100644
42
--- a/tcg/i386/tcg-target.c.inc
43
+++ b/tcg/i386/tcg-target.c.inc
44
@@ -XXX,XX +XXX,XX @@ static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
45
default:
46
tcg_abort();
47
}
48
- tcg_out_label(s, label_next, s->code_ptr);
49
+ tcg_out_label(s, label_next);
50
}
51
#endif
52
53
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
54
55
tcg_out_movi(s, TCG_TYPE_I32, args[0], 0);
56
tcg_out_jxx(s, JCC_JMP, label_over, 1);
57
- tcg_out_label(s, label_true, s->code_ptr);
58
+ tcg_out_label(s, label_true);
59
60
tcg_out_movi(s, TCG_TYPE_I32, args[0], 1);
61
- tcg_out_label(s, label_over, s->code_ptr);
62
+ tcg_out_label(s, label_over);
63
} else {
64
/* When the destination does not overlap one of the arguments,
65
clear the destination first, jump if cond false, and emit an
66
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
67
tcg_out_brcond2(s, new_args, const_args+1, 1);
68
69
tgen_arithi(s, ARITH_ADD, args[0], 1, 0);
70
- tcg_out_label(s, label_over, s->code_ptr);
71
+ tcg_out_label(s, label_over);
72
}
73
}
74
#endif
75
@@ -XXX,XX +XXX,XX @@ static void tcg_out_cmov(TCGContext *s, TCGCond cond, int rexw,
76
TCGLabel *over = gen_new_label();
77
tcg_out_jxx(s, tcg_cond_to_jcc[tcg_invert_cond(cond)], over, 1);
78
tcg_out_mov(s, TCG_TYPE_I32, dest, v1);
79
- tcg_out_label(s, over, s->code_ptr);
80
+ tcg_out_label(s, over);
81
}
82
}
83
84
--
85
2.25.1
86
87
diff view generated by jsdifflib
Deleted patch
1
We must change all targets at once, since all must match
2
the declaration in tcg.c.
3
1
4
Reviewed-by: Joelle van Dyne <j@getutm.app>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
include/tcg/tcg.h | 2 +-
8
tcg/tcg.c | 10 +++++-----
9
tcg/aarch64/tcg-target.c.inc | 2 +-
10
tcg/arm/tcg-target.c.inc | 2 +-
11
tcg/i386/tcg-target.c.inc | 2 +-
12
tcg/mips/tcg-target.c.inc | 2 +-
13
tcg/ppc/tcg-target.c.inc | 2 +-
14
tcg/riscv/tcg-target.c.inc | 2 +-
15
tcg/s390/tcg-target.c.inc | 2 +-
16
tcg/sparc/tcg-target.c.inc | 2 +-
17
10 files changed, 14 insertions(+), 14 deletions(-)
18
19
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/include/tcg/tcg.h
22
+++ b/include/tcg/tcg.h
23
@@ -XXX,XX +XXX,XX @@ typedef uintptr_t tcg_prologue_fn(CPUArchState *env, const void *tb_ptr);
24
extern tcg_prologue_fn *tcg_qemu_tb_exec;
25
#endif
26
27
-void tcg_register_jit(void *buf, size_t buf_size);
28
+void tcg_register_jit(const void *buf, size_t buf_size);
29
30
#if TCG_TARGET_MAYBE_vec
31
/* Return zero if the tuple (opc, type, vece) is unsupportable;
32
diff --git a/tcg/tcg.c b/tcg/tcg.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/tcg/tcg.c
35
+++ b/tcg/tcg.c
36
@@ -XXX,XX +XXX,XX @@ typedef struct QEMU_PACKED {
37
DebugFrameFDEHeader fde;
38
} DebugFrameHeader;
39
40
-static void tcg_register_jit_int(void *buf, size_t size,
41
+static void tcg_register_jit_int(const void *buf, size_t size,
42
const void *debug_frame,
43
size_t debug_frame_size)
44
__attribute__((unused));
45
@@ -XXX,XX +XXX,XX @@ void tcg_prologue_init(TCGContext *s)
46
total_size -= prologue_size;
47
s->code_gen_buffer_size = total_size;
48
49
- tcg_register_jit(s->code_gen_buffer, total_size);
50
+ tcg_register_jit(tcg_splitwx_to_rx(s->code_gen_buffer), total_size);
51
52
#ifdef DEBUG_DISAS
53
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
54
@@ -XXX,XX +XXX,XX @@ static int find_string(const char *strtab, const char *str)
55
}
56
}
57
58
-static void tcg_register_jit_int(void *buf_ptr, size_t buf_size,
59
+static void tcg_register_jit_int(const void *buf_ptr, size_t buf_size,
60
const void *debug_frame,
61
size_t debug_frame_size)
62
{
63
@@ -XXX,XX +XXX,XX @@ static void tcg_register_jit_int(void *buf_ptr, size_t buf_size,
64
/* No support for the feature. Provide the entry point expected by exec.c,
65
and implement the internal function we declared earlier. */
66
67
-static void tcg_register_jit_int(void *buf, size_t size,
68
+static void tcg_register_jit_int(const void *buf, size_t size,
69
const void *debug_frame,
70
size_t debug_frame_size)
71
{
72
}
73
74
-void tcg_register_jit(void *buf, size_t buf_size)
75
+void tcg_register_jit(const void *buf, size_t buf_size)
76
{
77
}
78
#endif /* ELF_HOST_MACHINE */
79
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
80
index XXXXXXX..XXXXXXX 100644
81
--- a/tcg/aarch64/tcg-target.c.inc
82
+++ b/tcg/aarch64/tcg-target.c.inc
83
@@ -XXX,XX +XXX,XX @@ static const DebugFrame debug_frame = {
84
}
85
};
86
87
-void tcg_register_jit(void *buf, size_t buf_size)
88
+void tcg_register_jit(const void *buf, size_t buf_size)
89
{
90
tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
91
}
92
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
93
index XXXXXXX..XXXXXXX 100644
94
--- a/tcg/arm/tcg-target.c.inc
95
+++ b/tcg/arm/tcg-target.c.inc
96
@@ -XXX,XX +XXX,XX @@ static const DebugFrame debug_frame = {
97
}
98
};
99
100
-void tcg_register_jit(void *buf, size_t buf_size)
101
+void tcg_register_jit(const void *buf, size_t buf_size)
102
{
103
tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
104
}
105
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
106
index XXXXXXX..XXXXXXX 100644
107
--- a/tcg/i386/tcg-target.c.inc
108
+++ b/tcg/i386/tcg-target.c.inc
109
@@ -XXX,XX +XXX,XX @@ static const DebugFrame debug_frame = {
110
#endif
111
112
#if defined(ELF_HOST_MACHINE)
113
-void tcg_register_jit(void *buf, size_t buf_size)
114
+void tcg_register_jit(const void *buf, size_t buf_size)
115
{
116
tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
117
}
118
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
119
index XXXXXXX..XXXXXXX 100644
120
--- a/tcg/mips/tcg-target.c.inc
121
+++ b/tcg/mips/tcg-target.c.inc
122
@@ -XXX,XX +XXX,XX @@ static const DebugFrame debug_frame = {
123
}
124
};
125
126
-void tcg_register_jit(void *buf, size_t buf_size)
127
+void tcg_register_jit(const void *buf, size_t buf_size)
128
{
129
tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
130
}
131
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
132
index XXXXXXX..XXXXXXX 100644
133
--- a/tcg/ppc/tcg-target.c.inc
134
+++ b/tcg/ppc/tcg-target.c.inc
135
@@ -XXX,XX +XXX,XX @@ static DebugFrame debug_frame = {
136
}
137
};
138
139
-void tcg_register_jit(void *buf, size_t buf_size)
140
+void tcg_register_jit(const void *buf, size_t buf_size)
141
{
142
uint8_t *p = &debug_frame.fde_reg_ofs[3];
143
int i;
144
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
145
index XXXXXXX..XXXXXXX 100644
146
--- a/tcg/riscv/tcg-target.c.inc
147
+++ b/tcg/riscv/tcg-target.c.inc
148
@@ -XXX,XX +XXX,XX @@ static const DebugFrame debug_frame = {
149
}
150
};
151
152
-void tcg_register_jit(void *buf, size_t buf_size)
153
+void tcg_register_jit(const void *buf, size_t buf_size)
154
{
155
tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
156
}
157
diff --git a/tcg/s390/tcg-target.c.inc b/tcg/s390/tcg-target.c.inc
158
index XXXXXXX..XXXXXXX 100644
159
--- a/tcg/s390/tcg-target.c.inc
160
+++ b/tcg/s390/tcg-target.c.inc
161
@@ -XXX,XX +XXX,XX @@ static const DebugFrame debug_frame = {
162
}
163
};
164
165
-void tcg_register_jit(void *buf, size_t buf_size)
166
+void tcg_register_jit(const void *buf, size_t buf_size)
167
{
168
tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
169
}
170
diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc
171
index XXXXXXX..XXXXXXX 100644
172
--- a/tcg/sparc/tcg-target.c.inc
173
+++ b/tcg/sparc/tcg-target.c.inc
174
@@ -XXX,XX +XXX,XX @@ static const DebugFrame debug_frame = {
175
.fde_ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
176
};
177
178
-void tcg_register_jit(void *buf, size_t buf_size)
179
+void tcg_register_jit(const void *buf, size_t buf_size)
180
{
181
tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
182
}
183
--
184
2.25.1
185
186
diff view generated by jsdifflib
Deleted patch
1
Pass both rx and rw addresses to tb_target_set_jmp_target.
2
1
3
Reviewed-by: Joelle van Dyne <j@getutm.app>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/aarch64/tcg-target.h | 2 +-
7
tcg/arm/tcg-target.h | 2 +-
8
tcg/i386/tcg-target.h | 6 +++---
9
tcg/mips/tcg-target.h | 2 +-
10
tcg/ppc/tcg-target.h | 2 +-
11
tcg/riscv/tcg-target.h | 2 +-
12
tcg/s390/tcg-target.h | 8 ++++----
13
tcg/sparc/tcg-target.h | 2 +-
14
tcg/tci/tcg-target.h | 6 +++---
15
accel/tcg/cpu-exec.c | 4 +++-
16
tcg/aarch64/tcg-target.c.inc | 12 ++++++------
17
tcg/mips/tcg-target.c.inc | 8 ++++----
18
tcg/ppc/tcg-target.c.inc | 16 ++++++++--------
19
tcg/sparc/tcg-target.c.inc | 14 +++++++-------
20
14 files changed, 44 insertions(+), 42 deletions(-)
21
22
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
23
index XXXXXXX..XXXXXXX 100644
24
--- a/tcg/aarch64/tcg-target.h
25
+++ b/tcg/aarch64/tcg-target.h
26
@@ -XXX,XX +XXX,XX @@ typedef enum {
27
#define TCG_TARGET_DEFAULT_MO (0)
28
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
29
30
-void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t);
31
+void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
32
33
#ifdef CONFIG_SOFTMMU
34
#define TCG_TARGET_NEED_LDST_LABELS
35
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
36
index XXXXXXX..XXXXXXX 100644
37
--- a/tcg/arm/tcg-target.h
38
+++ b/tcg/arm/tcg-target.h
39
@@ -XXX,XX +XXX,XX @@ enum {
40
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
41
42
/* not defined -- call should be eliminated at compile time */
43
-void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t);
44
+void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
45
46
#ifdef CONFIG_SOFTMMU
47
#define TCG_TARGET_NEED_LDST_LABELS
48
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
49
index XXXXXXX..XXXXXXX 100644
50
--- a/tcg/i386/tcg-target.h
51
+++ b/tcg/i386/tcg-target.h
52
@@ -XXX,XX +XXX,XX @@ extern bool have_movbe;
53
#define TCG_TARGET_extract_i64_valid(ofs, len) \
54
(((ofs) == 8 && (len) == 8) || ((ofs) + (len)) == 32)
55
56
-static inline void tb_target_set_jmp_target(uintptr_t tc_ptr,
57
- uintptr_t jmp_addr, uintptr_t addr)
58
+static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
59
+ uintptr_t jmp_rw, uintptr_t addr)
60
{
61
/* patch the branch destination */
62
- qatomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4));
63
+ qatomic_set((int32_t *)jmp_rw, addr - (jmp_rx + 4));
64
/* no need to flush icache explicitly */
65
}
66
67
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
68
index XXXXXXX..XXXXXXX 100644
69
--- a/tcg/mips/tcg-target.h
70
+++ b/tcg/mips/tcg-target.h
71
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
72
#define TCG_TARGET_DEFAULT_MO (0)
73
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
74
75
-void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t);
76
+void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
77
78
#ifdef CONFIG_SOFTMMU
79
#define TCG_TARGET_NEED_LDST_LABELS
80
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
81
index XXXXXXX..XXXXXXX 100644
82
--- a/tcg/ppc/tcg-target.h
83
+++ b/tcg/ppc/tcg-target.h
84
@@ -XXX,XX +XXX,XX @@ extern bool have_vsx;
85
#define TCG_TARGET_HAS_bitsel_vec have_vsx
86
#define TCG_TARGET_HAS_cmpsel_vec 0
87
88
-void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t);
89
+void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
90
91
#define TCG_TARGET_DEFAULT_MO (0)
92
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
93
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
94
index XXXXXXX..XXXXXXX 100644
95
--- a/tcg/riscv/tcg-target.h
96
+++ b/tcg/riscv/tcg-target.h
97
@@ -XXX,XX +XXX,XX @@ typedef enum {
98
#endif
99
100
/* not defined -- call should be eliminated at compile time */
101
-void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t);
102
+void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
103
104
#define TCG_TARGET_DEFAULT_MO (0)
105
106
diff --git a/tcg/s390/tcg-target.h b/tcg/s390/tcg-target.h
107
index XXXXXXX..XXXXXXX 100644
108
--- a/tcg/s390/tcg-target.h
109
+++ b/tcg/s390/tcg-target.h
110
@@ -XXX,XX +XXX,XX @@ enum {
111
TCG_AREG0 = TCG_REG_R10,
112
};
113
114
-static inline void tb_target_set_jmp_target(uintptr_t tc_ptr,
115
- uintptr_t jmp_addr, uintptr_t addr)
116
+static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
117
+ uintptr_t jmp_rw, uintptr_t addr)
118
{
119
/* patch the branch destination */
120
- intptr_t disp = addr - (jmp_addr - 2);
121
- qatomic_set((int32_t *)jmp_addr, disp / 2);
122
+ intptr_t disp = addr - (jmp_rx - 2);
123
+ qatomic_set((int32_t *)jmp_rw, disp / 2);
124
/* no need to flush icache explicitly */
125
}
126
127
diff --git a/tcg/sparc/tcg-target.h b/tcg/sparc/tcg-target.h
128
index XXXXXXX..XXXXXXX 100644
129
--- a/tcg/sparc/tcg-target.h
130
+++ b/tcg/sparc/tcg-target.h
131
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
132
#define TCG_TARGET_DEFAULT_MO (0)
133
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
134
135
-void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t);
136
+void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
137
138
#define TCG_TARGET_NEED_POOL_LABELS
139
140
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
141
index XXXXXXX..XXXXXXX 100644
142
--- a/tcg/tci/tcg-target.h
143
+++ b/tcg/tci/tcg-target.h
144
@@ -XXX,XX +XXX,XX @@ void tci_disas(uint8_t opc);
145
146
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
147
148
-static inline void tb_target_set_jmp_target(uintptr_t tc_ptr,
149
- uintptr_t jmp_addr, uintptr_t addr)
150
+static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
151
+ uintptr_t jmp_rw, uintptr_t addr)
152
{
153
/* patch the branch destination */
154
- qatomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4));
155
+ qatomic_set((int32_t *)jmp_rw, addr - (jmp_rx + 4));
156
/* no need to flush icache explicitly */
157
}
158
159
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
160
index XXXXXXX..XXXXXXX 100644
161
--- a/accel/tcg/cpu-exec.c
162
+++ b/accel/tcg/cpu-exec.c
163
@@ -XXX,XX +XXX,XX @@ void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
164
if (TCG_TARGET_HAS_direct_jump) {
165
uintptr_t offset = tb->jmp_target_arg[n];
166
uintptr_t tc_ptr = (uintptr_t)tb->tc.ptr;
167
- tb_target_set_jmp_target(tc_ptr, tc_ptr + offset, addr);
168
+ uintptr_t jmp_rx = tc_ptr + offset;
169
+ uintptr_t jmp_rw = jmp_rx - tcg_splitwx_diff;
170
+ tb_target_set_jmp_target(tc_ptr, jmp_rx, jmp_rw, addr);
171
} else {
172
tb->jmp_target_arg[n] = addr;
173
}
174
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
175
index XXXXXXX..XXXXXXX 100644
176
--- a/tcg/aarch64/tcg-target.c.inc
177
+++ b/tcg/aarch64/tcg-target.c.inc
178
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_call(TCGContext *s, const tcg_insn_unit *target)
179
}
180
}
181
182
-void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr,
183
- uintptr_t addr)
184
+void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
185
+ uintptr_t jmp_rw, uintptr_t addr)
186
{
187
tcg_insn_unit i1, i2;
188
TCGType rt = TCG_TYPE_I64;
189
TCGReg rd = TCG_REG_TMP;
190
uint64_t pair;
191
192
- ptrdiff_t offset = addr - jmp_addr;
193
+ ptrdiff_t offset = addr - jmp_rx;
194
195
if (offset == sextract64(offset, 0, 26)) {
196
i1 = I3206_B | ((offset >> 2) & 0x3ffffff);
197
i2 = NOP;
198
} else {
199
- offset = (addr >> 12) - (jmp_addr >> 12);
200
+ offset = (addr >> 12) - (jmp_rx >> 12);
201
202
/* patch ADRP */
203
i1 = I3406_ADRP | (offset & 3) << 29 | (offset & 0x1ffffc) << (5 - 2) | rd;
204
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr,
205
i2 = I3401_ADDI | rt << 31 | (addr & 0xfff) << 10 | rd << 5 | rd;
206
}
207
pair = (uint64_t)i2 << 32 | i1;
208
- qatomic_set((uint64_t *)jmp_addr, pair);
209
- flush_idcache_range(jmp_addr, jmp_addr, 8);
210
+ qatomic_set((uint64_t *)jmp_rw, pair);
211
+ flush_idcache_range(jmp_rx, jmp_rw, 8);
212
}
213
214
static inline void tcg_out_goto_label(TCGContext *s, TCGLabel *l)
215
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
216
index XXXXXXX..XXXXXXX 100644
217
--- a/tcg/mips/tcg-target.c.inc
218
+++ b/tcg/mips/tcg-target.c.inc
219
@@ -XXX,XX +XXX,XX @@ static void tcg_target_init(TCGContext *s)
220
tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP); /* global pointer */
221
}
222
223
-void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr,
224
- uintptr_t addr)
225
+void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
226
+ uintptr_t jmp_rw, uintptr_t addr)
227
{
228
- qatomic_set((uint32_t *)jmp_addr, deposit32(OPC_J, 0, 26, addr >> 2));
229
- flush_idcache_range(jmp_addr, jmp_addr, 4);
230
+ qatomic_set((uint32_t *)jmp_rw, deposit32(OPC_J, 0, 26, addr >> 2));
231
+ flush_idcache_range(jmp_rx, jmp_rw, 4);
232
}
233
234
typedef struct {
235
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
236
index XXXXXXX..XXXXXXX 100644
237
--- a/tcg/ppc/tcg-target.c.inc
238
+++ b/tcg/ppc/tcg-target.c.inc
239
@@ -XXX,XX +XXX,XX @@ static void tcg_out_mb(TCGContext *s, TCGArg a0)
240
tcg_out32(s, insn);
241
}
242
243
-void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr,
244
- uintptr_t addr)
245
+void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
246
+ uintptr_t jmp_rw, uintptr_t addr)
247
{
248
if (TCG_TARGET_REG_BITS == 64) {
249
tcg_insn_unit i1, i2;
250
intptr_t tb_diff = addr - tc_ptr;
251
- intptr_t br_diff = addr - (jmp_addr + 4);
252
+ intptr_t br_diff = addr - (jmp_rx + 4);
253
uint64_t pair;
254
255
/* This does not exercise the range of the branch, but we do
256
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr,
257
258
/* As per the enclosing if, this is ppc64. Avoid the _Static_assert
259
within qatomic_set that would fail to build a ppc32 host. */
260
- qatomic_set__nocheck((uint64_t *)jmp_addr, pair);
261
- flush_idcache_range(jmp_addr, jmp_addr, 8);
262
+ qatomic_set__nocheck((uint64_t *)jmp_rw, pair);
263
+ flush_idcache_range(jmp_rx, jmp_rw, 8);
264
} else {
265
- intptr_t diff = addr - jmp_addr;
266
+ intptr_t diff = addr - jmp_rx;
267
tcg_debug_assert(in_range_b(diff));
268
- qatomic_set((uint32_t *)jmp_addr, B | (diff & 0x3fffffc));
269
- flush_idcache_range(jmp_addr, jmp_addr, 4);
270
+ qatomic_set((uint32_t *)jmp_rw, B | (diff & 0x3fffffc));
271
+ flush_idcache_range(jmp_rx, jmp_rw, 4);
272
}
273
}
274
275
diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc
276
index XXXXXXX..XXXXXXX 100644
277
--- a/tcg/sparc/tcg-target.c.inc
278
+++ b/tcg/sparc/tcg-target.c.inc
279
@@ -XXX,XX +XXX,XX @@ void tcg_register_jit(const void *buf, size_t buf_size)
280
tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
281
}
282
283
-void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr,
284
- uintptr_t addr)
285
+void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
286
+ uintptr_t jmp_rw, uintptr_t addr)
287
{
288
intptr_t tb_disp = addr - tc_ptr;
289
- intptr_t br_disp = addr - jmp_addr;
290
+ intptr_t br_disp = addr - jmp_rx;
291
tcg_insn_unit i1, i2;
292
293
/* We can reach the entire address space for ILP32.
294
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr,
295
tcg_debug_assert(br_disp == (int32_t)br_disp);
296
297
if (!USE_REG_TB) {
298
- qatomic_set((uint32_t *)jmp_addr,
299
+ qatomic_set((uint32_t *)jmp_rw,
300
         deposit32(CALL, 0, 30, br_disp >> 2));
301
- flush_idcache_range(jmp_addr, jmp_addr, 4);
302
+ flush_idcache_range(jmp_rx, jmp_rw, 4);
303
return;
304
}
305
306
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr,
307
| INSN_IMM13((tb_disp & 0x3ff) | -0x400));
308
}
309
310
- qatomic_set((uint64_t *)jmp_addr, deposit64(i2, 32, 32, i1));
311
- flush_idcache_range(jmp_addr, jmp_addr, 8);
312
+ qatomic_set((uint64_t *)jmp_rw, deposit64(i2, 32, 32, i1));
313
+ flush_idcache_range(jmp_rx, jmp_rw, 8);
314
}
315
--
316
2.25.1
317
318
diff view generated by jsdifflib
Deleted patch
1
There is nothing within the translators that ought to be
2
changing the TranslationBlock data, so make it const.
3
1
4
This does not actually use the read-only copy of the
5
data structure that exists within the rx region.
6
7
Reviewed-by: Joelle van Dyne <j@getutm.app>
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
include/exec/gen-icount.h | 4 ++--
12
include/exec/translator.h | 2 +-
13
include/tcg/tcg-op.h | 2 +-
14
accel/tcg/translator.c | 4 ++--
15
target/arm/translate-a64.c | 2 +-
16
tcg/tcg-op.c | 2 +-
17
6 files changed, 8 insertions(+), 8 deletions(-)
18
19
diff --git a/include/exec/gen-icount.h b/include/exec/gen-icount.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/include/exec/gen-icount.h
22
+++ b/include/exec/gen-icount.h
23
@@ -XXX,XX +XXX,XX @@ static inline void gen_io_end(void)
24
tcg_temp_free_i32(tmp);
25
}
26
27
-static inline void gen_tb_start(TranslationBlock *tb)
28
+static inline void gen_tb_start(const TranslationBlock *tb)
29
{
30
TCGv_i32 count, imm;
31
32
@@ -XXX,XX +XXX,XX @@ static inline void gen_tb_start(TranslationBlock *tb)
33
tcg_temp_free_i32(count);
34
}
35
36
-static inline void gen_tb_end(TranslationBlock *tb, int num_insns)
37
+static inline void gen_tb_end(const TranslationBlock *tb, int num_insns)
38
{
39
if (tb_cflags(tb) & CF_USE_ICOUNT) {
40
/* Update the num_insn immediate parameter now that we know
41
diff --git a/include/exec/translator.h b/include/exec/translator.h
42
index XXXXXXX..XXXXXXX 100644
43
--- a/include/exec/translator.h
44
+++ b/include/exec/translator.h
45
@@ -XXX,XX +XXX,XX @@ typedef enum DisasJumpType {
46
* Architecture-agnostic disassembly context.
47
*/
48
typedef struct DisasContextBase {
49
- TranslationBlock *tb;
50
+ const TranslationBlock *tb;
51
target_ulong pc_first;
52
target_ulong pc_next;
53
DisasJumpType is_jmp;
54
diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h
55
index XXXXXXX..XXXXXXX 100644
56
--- a/include/tcg/tcg-op.h
57
+++ b/include/tcg/tcg-op.h
58
@@ -XXX,XX +XXX,XX @@ static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1,
59
* be NULL and @idx should be 0. Otherwise, @tb should be valid and
60
* @idx should be one of the TB_EXIT_ values.
61
*/
62
-void tcg_gen_exit_tb(TranslationBlock *tb, unsigned idx);
63
+void tcg_gen_exit_tb(const TranslationBlock *tb, unsigned idx);
64
65
/**
66
* tcg_gen_goto_tb() - output goto_tb TCG operation
67
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
68
index XXXXXXX..XXXXXXX 100644
69
--- a/accel/tcg/translator.c
70
+++ b/accel/tcg/translator.c
71
@@ -XXX,XX +XXX,XX @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
72
}
73
74
/* The disas_log hook may use these values rather than recompute. */
75
- db->tb->size = db->pc_next - db->pc_first;
76
- db->tb->icount = db->num_insns;
77
+ tb->size = db->pc_next - db->pc_first;
78
+ tb->icount = db->num_insns;
79
80
#ifdef DEBUG_DISAS
81
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
82
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
83
index XXXXXXX..XXXXXXX 100644
84
--- a/target/arm/translate-a64.c
85
+++ b/target/arm/translate-a64.c
86
@@ -XXX,XX +XXX,XX @@ static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest)
87
88
static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
89
{
90
- TranslationBlock *tb;
91
+ const TranslationBlock *tb;
92
93
tb = s->base.tb;
94
if (use_goto_tb(s, n, dest)) {
95
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
96
index XXXXXXX..XXXXXXX 100644
97
--- a/tcg/tcg-op.c
98
+++ b/tcg/tcg-op.c
99
@@ -XXX,XX +XXX,XX @@ void tcg_gen_extr32_i64(TCGv_i64 lo, TCGv_i64 hi, TCGv_i64 arg)
100
101
/* QEMU specific operations. */
102
103
-void tcg_gen_exit_tb(TranslationBlock *tb, unsigned idx)
104
+void tcg_gen_exit_tb(const TranslationBlock *tb, unsigned idx)
105
{
106
uintptr_t val = (uintptr_t)tb + idx;
107
108
--
109
2.25.1
110
111
diff view generated by jsdifflib
Deleted patch
1
There is nothing within the translators that ought to be
2
changing the TranslationBlock data, so make it const.
3
1
4
This does not actually use the read-only copy of the
5
data structure that exists within the rx region.
6
7
Reviewed-by: Joelle van Dyne <j@getutm.app>
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
include/hw/core/cpu.h | 3 ++-
12
target/arm/cpu.c | 3 ++-
13
target/avr/cpu.c | 3 ++-
14
target/hppa/cpu.c | 3 ++-
15
target/i386/tcg/tcg-cpu.c | 3 ++-
16
target/microblaze/cpu.c | 3 ++-
17
target/mips/cpu.c | 3 ++-
18
target/riscv/cpu.c | 3 ++-
19
target/rx/cpu.c | 3 ++-
20
target/sh4/cpu.c | 3 ++-
21
target/sparc/cpu.c | 3 ++-
22
target/tricore/cpu.c | 2 +-
23
12 files changed, 23 insertions(+), 12 deletions(-)
24
25
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
26
index XXXXXXX..XXXXXXX 100644
27
--- a/include/hw/core/cpu.h
28
+++ b/include/hw/core/cpu.h
29
@@ -XXX,XX +XXX,XX @@ struct CPUClass {
30
void (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list,
31
Error **errp);
32
void (*set_pc)(CPUState *cpu, vaddr value);
33
- void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb);
34
+ void (*synchronize_from_tb)(CPUState *cpu,
35
+ const struct TranslationBlock *tb);
36
bool (*tlb_fill)(CPUState *cpu, vaddr address, int size,
37
MMUAccessType access_type, int mmu_idx,
38
bool probe, uintptr_t retaddr);
39
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/target/arm/cpu.c
42
+++ b/target/arm/cpu.c
43
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_set_pc(CPUState *cs, vaddr value)
44
}
45
}
46
47
-static void arm_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
48
+static void arm_cpu_synchronize_from_tb(CPUState *cs,
49
+ const TranslationBlock *tb)
50
{
51
ARMCPU *cpu = ARM_CPU(cs);
52
CPUARMState *env = &cpu->env;
53
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
54
index XXXXXXX..XXXXXXX 100644
55
--- a/target/avr/cpu.c
56
+++ b/target/avr/cpu.c
57
@@ -XXX,XX +XXX,XX @@ static bool avr_cpu_has_work(CPUState *cs)
58
&& cpu_interrupts_enabled(env);
59
}
60
61
-static void avr_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
62
+static void avr_cpu_synchronize_from_tb(CPUState *cs,
63
+ const TranslationBlock *tb)
64
{
65
AVRCPU *cpu = AVR_CPU(cs);
66
CPUAVRState *env = &cpu->env;
67
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
68
index XXXXXXX..XXXXXXX 100644
69
--- a/target/hppa/cpu.c
70
+++ b/target/hppa/cpu.c
71
@@ -XXX,XX +XXX,XX @@ static void hppa_cpu_set_pc(CPUState *cs, vaddr value)
72
cpu->env.iaoq_b = value + 4;
73
}
74
75
-static void hppa_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
76
+static void hppa_cpu_synchronize_from_tb(CPUState *cs,
77
+ const TranslationBlock *tb)
78
{
79
HPPACPU *cpu = HPPA_CPU(cs);
80
81
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
82
index XXXXXXX..XXXXXXX 100644
83
--- a/target/i386/tcg/tcg-cpu.c
84
+++ b/target/i386/tcg/tcg-cpu.c
85
@@ -XXX,XX +XXX,XX @@ static void x86_cpu_exec_exit(CPUState *cs)
86
env->eflags = cpu_compute_eflags(env);
87
}
88
89
-static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
90
+static void x86_cpu_synchronize_from_tb(CPUState *cs,
91
+ const TranslationBlock *tb)
92
{
93
X86CPU *cpu = X86_CPU(cs);
94
95
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
96
index XXXXXXX..XXXXXXX 100644
97
--- a/target/microblaze/cpu.c
98
+++ b/target/microblaze/cpu.c
99
@@ -XXX,XX +XXX,XX @@ static void mb_cpu_set_pc(CPUState *cs, vaddr value)
100
cpu->env.iflags = 0;
101
}
102
103
-static void mb_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
104
+static void mb_cpu_synchronize_from_tb(CPUState *cs,
105
+ const TranslationBlock *tb)
106
{
107
MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
108
109
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
110
index XXXXXXX..XXXXXXX 100644
111
--- a/target/mips/cpu.c
112
+++ b/target/mips/cpu.c
113
@@ -XXX,XX +XXX,XX @@ static void mips_cpu_set_pc(CPUState *cs, vaddr value)
114
}
115
}
116
117
-static void mips_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
118
+static void mips_cpu_synchronize_from_tb(CPUState *cs,
119
+ const TranslationBlock *tb)
120
{
121
MIPSCPU *cpu = MIPS_CPU(cs);
122
CPUMIPSState *env = &cpu->env;
123
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
124
index XXXXXXX..XXXXXXX 100644
125
--- a/target/riscv/cpu.c
126
+++ b/target/riscv/cpu.c
127
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
128
env->pc = value;
129
}
130
131
-static void riscv_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
132
+static void riscv_cpu_synchronize_from_tb(CPUState *cs,
133
+ const TranslationBlock *tb)
134
{
135
RISCVCPU *cpu = RISCV_CPU(cs);
136
CPURISCVState *env = &cpu->env;
137
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
138
index XXXXXXX..XXXXXXX 100644
139
--- a/target/rx/cpu.c
140
+++ b/target/rx/cpu.c
141
@@ -XXX,XX +XXX,XX @@ static void rx_cpu_set_pc(CPUState *cs, vaddr value)
142
cpu->env.pc = value;
143
}
144
145
-static void rx_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
146
+static void rx_cpu_synchronize_from_tb(CPUState *cs,
147
+ const TranslationBlock *tb)
148
{
149
RXCPU *cpu = RX_CPU(cs);
150
151
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
152
index XXXXXXX..XXXXXXX 100644
153
--- a/target/sh4/cpu.c
154
+++ b/target/sh4/cpu.c
155
@@ -XXX,XX +XXX,XX @@ static void superh_cpu_set_pc(CPUState *cs, vaddr value)
156
cpu->env.pc = value;
157
}
158
159
-static void superh_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
160
+static void superh_cpu_synchronize_from_tb(CPUState *cs,
161
+ const TranslationBlock *tb)
162
{
163
SuperHCPU *cpu = SUPERH_CPU(cs);
164
165
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
166
index XXXXXXX..XXXXXXX 100644
167
--- a/target/sparc/cpu.c
168
+++ b/target/sparc/cpu.c
169
@@ -XXX,XX +XXX,XX @@ static void sparc_cpu_set_pc(CPUState *cs, vaddr value)
170
cpu->env.npc = value + 4;
171
}
172
173
-static void sparc_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
174
+static void sparc_cpu_synchronize_from_tb(CPUState *cs,
175
+ const TranslationBlock *tb)
176
{
177
SPARCCPU *cpu = SPARC_CPU(cs);
178
179
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
180
index XXXXXXX..XXXXXXX 100644
181
--- a/target/tricore/cpu.c
182
+++ b/target/tricore/cpu.c
183
@@ -XXX,XX +XXX,XX @@ static void tricore_cpu_set_pc(CPUState *cs, vaddr value)
184
}
185
186
static void tricore_cpu_synchronize_from_tb(CPUState *cs,
187
- TranslationBlock *tb)
188
+ const TranslationBlock *tb)
189
{
190
TriCoreCPU *cpu = TRICORE_CPU(cs);
191
CPUTriCoreState *env = &cpu->env;
192
--
193
2.25.1
194
195
diff view generated by jsdifflib
Deleted patch
1
Report better error messages than just "could not allocate".
2
Let alloc_code_gen_buffer set ctx->code_gen_buffer_size
3
and ctx->code_gen_buffer, and simply return bool.
4
1
5
Reviewed-by: Joelle van Dyne <j@getutm.app>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
accel/tcg/translate-all.c | 60 ++++++++++++++++++++++-----------------
10
1 file changed, 34 insertions(+), 26 deletions(-)
11
12
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/accel/tcg/translate-all.c
15
+++ b/accel/tcg/translate-all.c
16
@@ -XXX,XX +XXX,XX @@
17
#include "sysemu/cpus.h"
18
#include "sysemu/cpu-timers.h"
19
#include "sysemu/tcg.h"
20
+#include "qapi/error.h"
21
22
/* #define DEBUG_TB_INVALIDATE */
23
/* #define DEBUG_TB_FLUSH */
24
@@ -XXX,XX +XXX,XX @@ static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
25
(DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
26
? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
27
28
-static inline size_t size_code_gen_buffer(size_t tb_size)
29
+static size_t size_code_gen_buffer(size_t tb_size)
30
{
31
/* Size the buffer. */
32
if (tb_size == 0) {
33
@@ -XXX,XX +XXX,XX @@ static inline void *split_cross_256mb(void *buf1, size_t size1)
34
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
35
__attribute__((aligned(CODE_GEN_ALIGN)));
36
37
-static inline void *alloc_code_gen_buffer(void)
38
+static bool alloc_code_gen_buffer(size_t tb_size, Error **errp)
39
{
40
void *buf = static_code_gen_buffer;
41
void *end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
42
@@ -XXX,XX +XXX,XX @@ static inline void *alloc_code_gen_buffer(void)
43
size = end - buf;
44
45
/* Honor a command-line option limiting the size of the buffer. */
46
- if (size > tcg_ctx->code_gen_buffer_size) {
47
- size = QEMU_ALIGN_DOWN(tcg_ctx->code_gen_buffer_size,
48
- qemu_real_host_page_size);
49
+ if (size > tb_size) {
50
+ size = QEMU_ALIGN_DOWN(tb_size, qemu_real_host_page_size);
51
}
52
tcg_ctx->code_gen_buffer_size = size;
53
54
@@ -XXX,XX +XXX,XX @@ static inline void *alloc_code_gen_buffer(void)
55
#endif
56
57
if (qemu_mprotect_rwx(buf, size)) {
58
- abort();
59
+ error_setg_errno(errp, errno, "mprotect of jit buffer");
60
+ return false;
61
}
62
qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
63
64
- return buf;
65
+ tcg_ctx->code_gen_buffer = buf;
66
+ return true;
67
}
68
#elif defined(_WIN32)
69
-static inline void *alloc_code_gen_buffer(void)
70
+static bool alloc_code_gen_buffer(size_t size, Error **errp)
71
{
72
- size_t size = tcg_ctx->code_gen_buffer_size;
73
- return VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
74
- PAGE_EXECUTE_READWRITE);
75
+ void *buf = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
76
+ PAGE_EXECUTE_READWRITE);
77
+ if (buf == NULL) {
78
+ error_setg_win32(errp, GetLastError(),
79
+ "allocate %zu bytes for jit buffer", size);
80
+ return false;
81
+ }
82
+
83
+ tcg_ctx->code_gen_buffer = buf;
84
+ tcg_ctx->code_gen_buffer_size = size;
85
+ return true;
86
}
87
#else
88
-static inline void *alloc_code_gen_buffer(void)
89
+static bool alloc_code_gen_buffer(size_t size, Error **errp)
90
{
91
int prot = PROT_WRITE | PROT_READ | PROT_EXEC;
92
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
93
- size_t size = tcg_ctx->code_gen_buffer_size;
94
void *buf;
95
96
buf = mmap(NULL, size, prot, flags, -1, 0);
97
if (buf == MAP_FAILED) {
98
- return NULL;
99
+ error_setg_errno(errp, errno,
100
+ "allocate %zu bytes for jit buffer", size);
101
+ return false;
102
}
103
+ tcg_ctx->code_gen_buffer_size = size;
104
105
#ifdef __mips__
106
if (cross_256mb(buf, size)) {
107
@@ -XXX,XX +XXX,XX @@ static inline void *alloc_code_gen_buffer(void)
108
/* Request large pages for the buffer. */
109
qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
110
111
- return buf;
112
+ tcg_ctx->code_gen_buffer = buf;
113
+ return true;
114
}
115
#endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
116
117
-static inline void code_gen_alloc(size_t tb_size)
118
-{
119
- tcg_ctx->code_gen_buffer_size = size_code_gen_buffer(tb_size);
120
- tcg_ctx->code_gen_buffer = alloc_code_gen_buffer();
121
- if (tcg_ctx->code_gen_buffer == NULL) {
122
- fprintf(stderr, "Could not allocate dynamic translator buffer\n");
123
- exit(1);
124
- }
125
-}
126
-
127
static bool tb_cmp(const void *ap, const void *bp)
128
{
129
const TranslationBlock *a = ap;
130
@@ -XXX,XX +XXX,XX @@ static void tb_htable_init(void)
131
size. */
132
void tcg_exec_init(unsigned long tb_size)
133
{
134
+ bool ok;
135
+
136
tcg_allowed = true;
137
cpu_gen_init();
138
page_init();
139
tb_htable_init();
140
- code_gen_alloc(tb_size);
141
+
142
+ ok = alloc_code_gen_buffer(size_code_gen_buffer(tb_size), &error_fatal);
143
+ assert(ok);
144
+
145
#if defined(CONFIG_SOFTMMU)
146
/* There's no guest base to take into account, so go ahead and
147
initialize the prologue now. */
148
--
149
2.25.1
150
151
diff view generated by jsdifflib
Deleted patch
1
Plumb the value through to alloc_code_gen_buffer. This is not
2
supported by any os or tcg backend, so for now enabling it will
3
result in an error.
4
1
5
Reviewed-by: Joelle van Dyne <j@getutm.app>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
include/sysemu/tcg.h | 3 ++-
9
tcg/aarch64/tcg-target.h | 1 +
10
tcg/arm/tcg-target.h | 1 +
11
tcg/i386/tcg-target.h | 1 +
12
tcg/mips/tcg-target.h | 1 +
13
tcg/ppc/tcg-target.h | 1 +
14
tcg/riscv/tcg-target.h | 1 +
15
tcg/s390/tcg-target.h | 1 +
16
tcg/sparc/tcg-target.h | 1 +
17
tcg/tci/tcg-target.h | 1 +
18
accel/tcg/tcg-all.c | 26 +++++++++++++++++++++++++-
19
accel/tcg/translate-all.c | 35 +++++++++++++++++++++++++++--------
20
bsd-user/main.c | 2 +-
21
linux-user/main.c | 2 +-
22
qemu-options.hx | 7 +++++++
23
15 files changed, 72 insertions(+), 12 deletions(-)
24
25
diff --git a/include/sysemu/tcg.h b/include/sysemu/tcg.h
26
index XXXXXXX..XXXXXXX 100644
27
--- a/include/sysemu/tcg.h
28
+++ b/include/sysemu/tcg.h
29
@@ -XXX,XX +XXX,XX @@
30
#ifndef SYSEMU_TCG_H
31
#define SYSEMU_TCG_H
32
33
-void tcg_exec_init(unsigned long tb_size);
34
+void tcg_exec_init(unsigned long tb_size, int splitwx);
35
+
36
#ifdef CONFIG_TCG
37
extern bool tcg_allowed;
38
#define tcg_enabled() (tcg_allowed)
39
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
40
index XXXXXXX..XXXXXXX 100644
41
--- a/tcg/aarch64/tcg-target.h
42
+++ b/tcg/aarch64/tcg-target.h
43
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
44
#define TCG_TARGET_NEED_LDST_LABELS
45
#endif
46
#define TCG_TARGET_NEED_POOL_LABELS
47
+#define TCG_TARGET_SUPPORT_MIRROR 0
48
49
#endif /* AARCH64_TCG_TARGET_H */
50
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
51
index XXXXXXX..XXXXXXX 100644
52
--- a/tcg/arm/tcg-target.h
53
+++ b/tcg/arm/tcg-target.h
54
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
55
#define TCG_TARGET_NEED_LDST_LABELS
56
#endif
57
#define TCG_TARGET_NEED_POOL_LABELS
58
+#define TCG_TARGET_SUPPORT_MIRROR 0
59
60
#endif
61
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
62
index XXXXXXX..XXXXXXX 100644
63
--- a/tcg/i386/tcg-target.h
64
+++ b/tcg/i386/tcg-target.h
65
@@ -XXX,XX +XXX,XX @@ static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
66
#define TCG_TARGET_NEED_LDST_LABELS
67
#endif
68
#define TCG_TARGET_NEED_POOL_LABELS
69
+#define TCG_TARGET_SUPPORT_MIRROR 0
70
71
#endif
72
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
73
index XXXXXXX..XXXXXXX 100644
74
--- a/tcg/mips/tcg-target.h
75
+++ b/tcg/mips/tcg-target.h
76
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
77
78
#define TCG_TARGET_DEFAULT_MO (0)
79
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
80
+#define TCG_TARGET_SUPPORT_MIRROR 0
81
82
void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
83
84
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
85
index XXXXXXX..XXXXXXX 100644
86
--- a/tcg/ppc/tcg-target.h
87
+++ b/tcg/ppc/tcg-target.h
88
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
89
#define TCG_TARGET_NEED_LDST_LABELS
90
#endif
91
#define TCG_TARGET_NEED_POOL_LABELS
92
+#define TCG_TARGET_SUPPORT_MIRROR 0
93
94
#endif
95
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
96
index XXXXXXX..XXXXXXX 100644
97
--- a/tcg/riscv/tcg-target.h
98
+++ b/tcg/riscv/tcg-target.h
99
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
100
#define TCG_TARGET_NEED_POOL_LABELS
101
102
#define TCG_TARGET_HAS_MEMORY_BSWAP 0
103
+#define TCG_TARGET_SUPPORT_MIRROR 0
104
105
#endif
106
diff --git a/tcg/s390/tcg-target.h b/tcg/s390/tcg-target.h
107
index XXXXXXX..XXXXXXX 100644
108
--- a/tcg/s390/tcg-target.h
109
+++ b/tcg/s390/tcg-target.h
110
@@ -XXX,XX +XXX,XX @@ static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
111
#define TCG_TARGET_NEED_LDST_LABELS
112
#endif
113
#define TCG_TARGET_NEED_POOL_LABELS
114
+#define TCG_TARGET_SUPPORT_MIRROR 0
115
116
#endif
117
diff --git a/tcg/sparc/tcg-target.h b/tcg/sparc/tcg-target.h
118
index XXXXXXX..XXXXXXX 100644
119
--- a/tcg/sparc/tcg-target.h
120
+++ b/tcg/sparc/tcg-target.h
121
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
122
void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
123
124
#define TCG_TARGET_NEED_POOL_LABELS
125
+#define TCG_TARGET_SUPPORT_MIRROR 0
126
127
#endif
128
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
129
index XXXXXXX..XXXXXXX 100644
130
--- a/tcg/tci/tcg-target.h
131
+++ b/tcg/tci/tcg-target.h
132
@@ -XXX,XX +XXX,XX @@ void tci_disas(uint8_t opc);
133
#define TCG_TARGET_DEFAULT_MO (0)
134
135
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
136
+#define TCG_TARGET_SUPPORT_MIRROR 0
137
138
static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
139
uintptr_t jmp_rw, uintptr_t addr)
140
diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c
141
index XXXXXXX..XXXXXXX 100644
142
--- a/accel/tcg/tcg-all.c
143
+++ b/accel/tcg/tcg-all.c
144
@@ -XXX,XX +XXX,XX @@ struct TCGState {
145
AccelState parent_obj;
146
147
bool mttcg_enabled;
148
+ int splitwx_enabled;
149
unsigned long tb_size;
150
};
151
typedef struct TCGState TCGState;
152
@@ -XXX,XX +XXX,XX @@ static void tcg_accel_instance_init(Object *obj)
153
TCGState *s = TCG_STATE(obj);
154
155
s->mttcg_enabled = default_mttcg_enabled();
156
+
157
+ /* If debugging enabled, default "auto on", otherwise off. */
158
+#ifdef CONFIG_DEBUG_TCG
159
+ s->splitwx_enabled = -1;
160
+#else
161
+ s->splitwx_enabled = 0;
162
+#endif
163
}
164
165
bool mttcg_enabled;
166
@@ -XXX,XX +XXX,XX @@ static int tcg_init(MachineState *ms)
167
{
168
TCGState *s = TCG_STATE(current_accel());
169
170
- tcg_exec_init(s->tb_size * 1024 * 1024);
171
+ tcg_exec_init(s->tb_size * 1024 * 1024, s->splitwx_enabled);
172
mttcg_enabled = s->mttcg_enabled;
173
174
/*
175
@@ -XXX,XX +XXX,XX @@ static void tcg_set_tb_size(Object *obj, Visitor *v,
176
s->tb_size = value;
177
}
178
179
+static bool tcg_get_splitwx(Object *obj, Error **errp)
180
+{
181
+ TCGState *s = TCG_STATE(obj);
182
+ return s->splitwx_enabled;
183
+}
184
+
185
+static void tcg_set_splitwx(Object *obj, bool value, Error **errp)
186
+{
187
+ TCGState *s = TCG_STATE(obj);
188
+ s->splitwx_enabled = value;
189
+}
190
+
191
static void tcg_accel_class_init(ObjectClass *oc, void *data)
192
{
193
AccelClass *ac = ACCEL_CLASS(oc);
194
@@ -XXX,XX +XXX,XX @@ static void tcg_accel_class_init(ObjectClass *oc, void *data)
195
object_class_property_set_description(oc, "tb-size",
196
"TCG translation block cache size");
197
198
+ object_class_property_add_bool(oc, "split-wx",
199
+ tcg_get_splitwx, tcg_set_splitwx);
200
+ object_class_property_set_description(oc, "split-wx",
201
+ "Map jit pages into separate RW and RX regions");
202
}
203
204
static const TypeInfo tcg_accel_type = {
205
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
206
index XXXXXXX..XXXXXXX 100644
207
--- a/accel/tcg/translate-all.c
208
+++ b/accel/tcg/translate-all.c
209
@@ -XXX,XX +XXX,XX @@ static inline void *split_cross_256mb(void *buf1, size_t size1)
210
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
211
__attribute__((aligned(CODE_GEN_ALIGN)));
212
213
-static bool alloc_code_gen_buffer(size_t tb_size, Error **errp)
214
+static bool alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp)
215
{
216
- void *buf = static_code_gen_buffer;
217
- void *end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
218
+ void *buf, *end;
219
size_t size;
220
221
+ if (splitwx > 0) {
222
+ error_setg(errp, "jit split-wx not supported");
223
+ return false;
224
+ }
225
+
226
/* page-align the beginning and end of the buffer */
227
+ buf = static_code_gen_buffer;
228
+ end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
229
buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
230
end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
231
232
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer(size_t tb_size, Error **errp)
233
return true;
234
}
235
#elif defined(_WIN32)
236
-static bool alloc_code_gen_buffer(size_t size, Error **errp)
237
+static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
238
{
239
- void *buf = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
240
+ void *buf;
241
+
242
+ if (splitwx > 0) {
243
+ error_setg(errp, "jit split-wx not supported");
244
+ return false;
245
+ }
246
+
247
+ buf = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
248
PAGE_EXECUTE_READWRITE);
249
if (buf == NULL) {
250
error_setg_win32(errp, GetLastError(),
251
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer(size_t size, Error **errp)
252
return true;
253
}
254
#else
255
-static bool alloc_code_gen_buffer(size_t size, Error **errp)
256
+static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
257
{
258
int prot = PROT_WRITE | PROT_READ | PROT_EXEC;
259
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
260
void *buf;
261
262
+ if (splitwx > 0) {
263
+ error_setg(errp, "jit split-wx not supported");
264
+ return false;
265
+ }
266
+
267
buf = mmap(NULL, size, prot, flags, -1, 0);
268
if (buf == MAP_FAILED) {
269
error_setg_errno(errp, errno,
270
@@ -XXX,XX +XXX,XX @@ static void tb_htable_init(void)
271
/* Must be called before using the QEMU cpus. 'tb_size' is the size
272
(in bytes) allocated to the translation buffer. Zero means default
273
size. */
274
-void tcg_exec_init(unsigned long tb_size)
275
+void tcg_exec_init(unsigned long tb_size, int splitwx)
276
{
277
bool ok;
278
279
@@ -XXX,XX +XXX,XX @@ void tcg_exec_init(unsigned long tb_size)
280
page_init();
281
tb_htable_init();
282
283
- ok = alloc_code_gen_buffer(size_code_gen_buffer(tb_size), &error_fatal);
284
+ ok = alloc_code_gen_buffer(size_code_gen_buffer(tb_size),
285
+ splitwx, &error_fatal);
286
assert(ok);
287
288
#if defined(CONFIG_SOFTMMU)
289
diff --git a/bsd-user/main.c b/bsd-user/main.c
290
index XXXXXXX..XXXXXXX 100644
291
--- a/bsd-user/main.c
292
+++ b/bsd-user/main.c
293
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
294
}
295
296
/* init tcg before creating CPUs and to get qemu_host_page_size */
297
- tcg_exec_init(0);
298
+ tcg_exec_init(0, false);
299
300
cpu_type = parse_cpu_option(cpu_model);
301
cpu = cpu_create(cpu_type);
302
diff --git a/linux-user/main.c b/linux-user/main.c
303
index XXXXXXX..XXXXXXX 100644
304
--- a/linux-user/main.c
305
+++ b/linux-user/main.c
306
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv, char **envp)
307
cpu_type = parse_cpu_option(cpu_model);
308
309
/* init tcg before creating CPUs and to get qemu_host_page_size */
310
- tcg_exec_init(0);
311
+ tcg_exec_init(0, false);
312
313
cpu = cpu_create(cpu_type);
314
env = cpu->env_ptr;
315
diff --git a/qemu-options.hx b/qemu-options.hx
316
index XXXXXXX..XXXXXXX 100644
317
--- a/qemu-options.hx
318
+++ b/qemu-options.hx
319
@@ -XXX,XX +XXX,XX @@ DEF("accel", HAS_ARG, QEMU_OPTION_accel,
320
" igd-passthru=on|off (enable Xen integrated Intel graphics passthrough, default=off)\n"
321
" kernel-irqchip=on|off|split controls accelerated irqchip support (default=on)\n"
322
" kvm-shadow-mem=size of KVM shadow MMU in bytes\n"
323
+ " split-wx=on|off (enable TCG split w^x mapping)\n"
324
" tb-size=n (TCG translation block cache size)\n"
325
" thread=single|multi (enable multi-threaded TCG)\n", QEMU_ARCH_ALL)
326
SRST
327
@@ -XXX,XX +XXX,XX @@ SRST
328
``kvm-shadow-mem=size``
329
Defines the size of the KVM shadow MMU.
330
331
+ ``split-wx=on|off``
332
+ Controls the use of split w^x mapping for the TCG code generation
333
+ buffer. Some operating systems require this to be enabled, and in
334
+ such a case this will default on. On other operating systems, this
335
+ will default off, but one may enable this for testing or debugging.
336
+
337
``tb-size=n``
338
Controls the size (in MiB) of the TCG translation block cache.
339
340
--
341
2.25.1
342
343
diff view generated by jsdifflib
Deleted patch
1
We cannot use a real temp file, because we would need to find
2
a filesystem that does not have noexec enabled. However, a
3
memfd is not associated with any filesystem.
4
1
5
Reviewed-by: Joelle van Dyne <j@getutm.app>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
accel/tcg/translate-all.c | 84 +++++++++++++++++++++++++++++++++++----
9
1 file changed, 76 insertions(+), 8 deletions(-)
10
11
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/translate-all.c
14
+++ b/accel/tcg/translate-all.c
15
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
16
return true;
17
}
18
#else
19
-static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
20
+static bool alloc_code_gen_buffer_anon(size_t size, int prot,
21
+ int flags, Error **errp)
22
{
23
- int prot = PROT_WRITE | PROT_READ | PROT_EXEC;
24
- int flags = MAP_PRIVATE | MAP_ANONYMOUS;
25
void *buf;
26
27
- if (splitwx > 0) {
28
- error_setg(errp, "jit split-wx not supported");
29
- return false;
30
- }
31
-
32
buf = mmap(NULL, size, prot, flags, -1, 0);
33
if (buf == MAP_FAILED) {
34
error_setg_errno(errp, errno,
35
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
36
tcg_ctx->code_gen_buffer = buf;
37
return true;
38
}
39
+
40
+#ifdef CONFIG_POSIX
41
+#include "qemu/memfd.h"
42
+
43
+static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp)
44
+{
45
+ void *buf_rw, *buf_rx;
46
+ int fd = -1;
47
+
48
+ buf_rw = qemu_memfd_alloc("tcg-jit", size, 0, &fd, errp);
49
+ if (buf_rw == NULL) {
50
+ return false;
51
+ }
52
+
53
+ buf_rx = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_SHARED, fd, 0);
54
+ if (buf_rx == MAP_FAILED) {
55
+ error_setg_errno(errp, errno,
56
+ "failed to map shared memory for execute");
57
+ munmap(buf_rw, size);
58
+ close(fd);
59
+ return false;
60
+ }
61
+ close(fd);
62
+
63
+ tcg_ctx->code_gen_buffer = buf_rw;
64
+ tcg_ctx->code_gen_buffer_size = size;
65
+ tcg_splitwx_diff = buf_rx - buf_rw;
66
+
67
+ /* Request large pages for the buffer and the splitwx. */
68
+ qemu_madvise(buf_rw, size, QEMU_MADV_HUGEPAGE);
69
+ qemu_madvise(buf_rx, size, QEMU_MADV_HUGEPAGE);
70
+ return true;
71
+}
72
+#endif /* CONFIG_POSIX */
73
+
74
+static bool alloc_code_gen_buffer_splitwx(size_t size, Error **errp)
75
+{
76
+ if (TCG_TARGET_SUPPORT_MIRROR) {
77
+#ifdef CONFIG_POSIX
78
+ return alloc_code_gen_buffer_splitwx_memfd(size, errp);
79
+#endif
80
+ }
81
+ error_setg(errp, "jit split-wx not supported");
82
+ return false;
83
+}
84
+
85
+static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
86
+{
87
+ ERRP_GUARD();
88
+ int prot, flags;
89
+
90
+ if (splitwx) {
91
+ if (alloc_code_gen_buffer_splitwx(size, errp)) {
92
+ return true;
93
+ }
94
+ /*
95
+ * If splitwx force-on (1), fail;
96
+ * if splitwx default-on (-1), fall through to splitwx off.
97
+ */
98
+ if (splitwx > 0) {
99
+ return false;
100
+ }
101
+ error_free_or_abort(errp);
102
+ }
103
+
104
+ prot = PROT_READ | PROT_WRITE | PROT_EXEC;
105
+ flags = MAP_PRIVATE | MAP_ANONYMOUS;
106
+#ifdef CONFIG_TCG_INTERPRETER
107
+ /* The tcg interpreter does not need execute permission. */
108
+ prot = PROT_READ | PROT_WRITE;
109
+#endif
110
+
111
+ return alloc_code_gen_buffer_anon(size, prot, flags, errp);
112
+}
113
#endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
114
115
static bool tb_cmp(const void *ap, const void *bp)
116
--
117
2.25.1
118
119
diff view generated by jsdifflib
Deleted patch
1
Cribbed from code posted by Joelle van Dyne <j@getutm.app>,
2
and rearranged to a cleaner structure.
3
1
4
Reviewed-by: Joelle van Dyne <j@getutm.app>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
accel/tcg/translate-all.c | 67 +++++++++++++++++++++++++++++++++++++++
8
1 file changed, 67 insertions(+)
9
10
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/accel/tcg/translate-all.c
13
+++ b/accel/tcg/translate-all.c
14
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp)
15
}
16
#endif /* CONFIG_POSIX */
17
18
+#ifdef CONFIG_DARWIN
19
+#include <mach/mach.h>
20
+
21
+extern kern_return_t mach_vm_remap(vm_map_t target_task,
22
+ mach_vm_address_t *target_address,
23
+ mach_vm_size_t size,
24
+ mach_vm_offset_t mask,
25
+ int flags,
26
+ vm_map_t src_task,
27
+ mach_vm_address_t src_address,
28
+ boolean_t copy,
29
+ vm_prot_t *cur_protection,
30
+ vm_prot_t *max_protection,
31
+ vm_inherit_t inheritance);
32
+
33
+static bool alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp)
34
+{
35
+ kern_return_t ret;
36
+ mach_vm_address_t buf_rw, buf_rx;
37
+ vm_prot_t cur_prot, max_prot;
38
+
39
+ /* Map the read-write portion via normal anon memory. */
40
+ if (!alloc_code_gen_buffer_anon(size, PROT_READ | PROT_WRITE,
41
+ MAP_PRIVATE | MAP_ANONYMOUS, errp)) {
42
+ return false;
43
+ }
44
+
45
+ buf_rw = (mach_vm_address_t)tcg_ctx->code_gen_buffer;
46
+ buf_rx = 0;
47
+ ret = mach_vm_remap(mach_task_self(),
48
+ &buf_rx,
49
+ size,
50
+ 0,
51
+ VM_FLAGS_ANYWHERE,
52
+ mach_task_self(),
53
+ buf_rw,
54
+ false,
55
+ &cur_prot,
56
+ &max_prot,
57
+ VM_INHERIT_NONE);
58
+ if (ret != KERN_SUCCESS) {
59
+ /* TODO: Convert "ret" to a human readable error message. */
60
+ error_setg(errp, "vm_remap for jit splitwx failed");
61
+ munmap((void *)buf_rw, size);
62
+ return false;
63
+ }
64
+
65
+ if (mprotect((void *)buf_rx, size, PROT_READ | PROT_EXEC) != 0) {
66
+ error_setg_errno(errp, errno, "mprotect for jit splitwx");
67
+ munmap((void *)buf_rx, size);
68
+ munmap((void *)buf_rw, size);
69
+ return false;
70
+ }
71
+
72
+ tcg_splitwx_diff = buf_rx - buf_rw;
73
+ return true;
74
+}
75
+#endif /* CONFIG_DARWIN */
76
+
77
static bool alloc_code_gen_buffer_splitwx(size_t size, Error **errp)
78
{
79
if (TCG_TARGET_SUPPORT_MIRROR) {
80
+#ifdef CONFIG_DARWIN
81
+ return alloc_code_gen_buffer_splitwx_vmremap(size, errp);
82
+#endif
83
#ifdef CONFIG_POSIX
84
return alloc_code_gen_buffer_splitwx_memfd(size, errp);
85
#endif
86
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
87
#ifdef CONFIG_TCG_INTERPRETER
88
/* The tcg interpreter does not need execute permission. */
89
prot = PROT_READ | PROT_WRITE;
90
+#elif defined(CONFIG_DARWIN)
91
+ /* Applicable to both iOS and macOS (Apple Silicon). */
92
+ if (!splitwx) {
93
+ flags |= MAP_JIT;
94
+ }
95
#endif
96
97
return alloc_code_gen_buffer_anon(size, prot, flags, errp);
98
--
99
2.25.1
100
101
diff view generated by jsdifflib
Deleted patch
1
This produces a small pc-relative displacement within the
2
generated code to the TB structure that preceeds it.
3
1
4
Reviewed-by: Joelle van Dyne <j@getutm.app>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
accel/tcg/cpu-exec.c | 35 +++++++++++++++++++++--------------
8
tcg/tcg-op.c | 13 ++++++++++++-
9
2 files changed, 33 insertions(+), 15 deletions(-)
10
11
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/cpu-exec.c
14
+++ b/accel/tcg/cpu-exec.c
15
@@ -XXX,XX +XXX,XX @@ static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
16
* TCG is not considered a security-sensitive part of QEMU so this does not
17
* affect the impact of CFI in environment with high security requirements
18
*/
19
-QEMU_DISABLE_CFI
20
-static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
21
+static inline TranslationBlock * QEMU_DISABLE_CFI
22
+cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
23
{
24
CPUArchState *env = cpu->env_ptr;
25
uintptr_t ret;
26
TranslationBlock *last_tb;
27
- int tb_exit;
28
const void *tb_ptr = itb->tc.ptr;
29
30
qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
31
@@ -XXX,XX +XXX,XX @@ static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
32
33
ret = tcg_qemu_tb_exec(env, tb_ptr);
34
cpu->can_do_io = 1;
35
- last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
36
- tb_exit = ret & TB_EXIT_MASK;
37
- trace_exec_tb_exit(last_tb, tb_exit);
38
+ /*
39
+ * TODO: Delay swapping back to the read-write region of the TB
40
+ * until we actually need to modify the TB. The read-only copy,
41
+ * coming from the rx region, shares the same host TLB entry as
42
+ * the code that executed the exit_tb opcode that arrived here.
43
+ * If we insist on touching both the RX and the RW pages, we
44
+ * double the host TLB pressure.
45
+ */
46
+ last_tb = tcg_splitwx_to_rw((void *)(ret & ~TB_EXIT_MASK));
47
+ *tb_exit = ret & TB_EXIT_MASK;
48
49
- if (tb_exit > TB_EXIT_IDX1) {
50
+ trace_exec_tb_exit(last_tb, *tb_exit);
51
+
52
+ if (*tb_exit > TB_EXIT_IDX1) {
53
/* We didn't start executing this TB (eg because the instruction
54
* counter hit zero); we must restore the guest PC to the address
55
* of the start of the TB.
56
@@ -XXX,XX +XXX,XX @@ static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
57
cc->set_pc(cpu, last_tb->pc);
58
}
59
}
60
- return ret;
61
+ return last_tb;
62
}
63
64
#ifndef CONFIG_USER_ONLY
65
@@ -XXX,XX +XXX,XX @@ static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
66
{
67
TranslationBlock *tb;
68
uint32_t cflags = curr_cflags() | CF_NOCACHE;
69
+ int tb_exit;
70
71
if (ignore_icount) {
72
cflags &= ~CF_USE_ICOUNT;
73
@@ -XXX,XX +XXX,XX @@ static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
74
75
/* execute the generated code */
76
trace_exec_tb_nocache(tb, tb->pc);
77
- cpu_tb_exec(cpu, tb);
78
+ cpu_tb_exec(cpu, tb, &tb_exit);
79
80
mmap_lock();
81
tb_phys_invalidate(tb, -1);
82
@@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu)
83
uint32_t flags;
84
uint32_t cflags = 1;
85
uint32_t cf_mask = cflags & CF_HASH_MASK;
86
+ int tb_exit;
87
88
if (sigsetjmp(cpu->jmp_env, 0) == 0) {
89
start_exclusive();
90
@@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu)
91
cpu_exec_enter(cpu);
92
/* execute the generated code */
93
trace_exec_tb(tb, pc);
94
- cpu_tb_exec(cpu, tb);
95
+ cpu_tb_exec(cpu, tb, &tb_exit);
96
cpu_exec_exit(cpu);
97
} else {
98
/*
99
@@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
100
static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
101
TranslationBlock **last_tb, int *tb_exit)
102
{
103
- uintptr_t ret;
104
int32_t insns_left;
105
106
trace_exec_tb(tb, tb->pc);
107
- ret = cpu_tb_exec(cpu, tb);
108
- tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
109
- *tb_exit = ret & TB_EXIT_MASK;
110
+ tb = cpu_tb_exec(cpu, tb, tb_exit);
111
if (*tb_exit != TB_EXIT_REQUESTED) {
112
*last_tb = tb;
113
return;
114
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
115
index XXXXXXX..XXXXXXX 100644
116
--- a/tcg/tcg-op.c
117
+++ b/tcg/tcg-op.c
118
@@ -XXX,XX +XXX,XX @@ void tcg_gen_extr32_i64(TCGv_i64 lo, TCGv_i64 hi, TCGv_i64 arg)
119
120
void tcg_gen_exit_tb(const TranslationBlock *tb, unsigned idx)
121
{
122
- uintptr_t val = (uintptr_t)tb + idx;
123
+ /*
124
+ * Let the jit code return the read-only version of the
125
+ * TranslationBlock, so that we minimize the pc-relative
126
+ * distance of the address of the exit_tb code to TB.
127
+ * This will improve utilization of pc-relative address loads.
128
+ *
129
+ * TODO: Move this to translator_loop, so that all const
130
+ * TranslationBlock pointers refer to read-only memory.
131
+ * This requires coordination with targets that do not use
132
+ * the translator_loop.
133
+ */
134
+ uintptr_t val = (uintptr_t)tcg_splitwx_to_rx((void *)tb) + idx;
135
136
if (tb == NULL) {
137
tcg_debug_assert(idx == 0);
138
--
139
2.25.1
140
141
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Joelle van Dyne <j@getutm.app>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/i386/tcg-target.h | 2 +-
5
tcg/i386/tcg-target.c.inc | 20 +++++++++++---------
6
2 files changed, 12 insertions(+), 10 deletions(-)
7
1
8
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/i386/tcg-target.h
11
+++ b/tcg/i386/tcg-target.h
12
@@ -XXX,XX +XXX,XX @@ static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
13
#define TCG_TARGET_NEED_LDST_LABELS
14
#endif
15
#define TCG_TARGET_NEED_POOL_LABELS
16
-#define TCG_TARGET_SUPPORT_MIRROR 0
17
+#define TCG_TARGET_SUPPORT_MIRROR 1
18
19
#endif
20
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
21
index XXXXXXX..XXXXXXX 100644
22
--- a/tcg/i386/tcg-target.c.inc
23
+++ b/tcg/i386/tcg-target.c.inc
24
@@ -XXX,XX +XXX,XX @@ static bool have_lzcnt;
25
# define have_lzcnt 0
26
#endif
27
28
-static tcg_insn_unit *tb_ret_addr;
29
+static const tcg_insn_unit *tb_ret_addr;
30
31
static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
32
intptr_t value, intptr_t addend)
33
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
34
value += addend;
35
switch(type) {
36
case R_386_PC32:
37
- value -= (uintptr_t)code_ptr;
38
+ value -= (uintptr_t)tcg_splitwx_to_rx(code_ptr);
39
if (value != (int32_t)value) {
40
return false;
41
}
42
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
43
tcg_patch32(code_ptr, value);
44
break;
45
case R_386_PC8:
46
- value -= (uintptr_t)code_ptr;
47
+ value -= (uintptr_t)tcg_splitwx_to_rx(code_ptr);
48
if (value != (int8_t)value) {
49
return false;
50
}
51
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type,
52
}
53
54
/* Try a 7 byte pc-relative lea before the 10 byte movq. */
55
- diff = arg - ((uintptr_t)s->code_ptr + 7);
56
+ diff = tcg_pcrel_diff(s, (const void *)arg) - 7;
57
if (diff == (int32_t)diff) {
58
tcg_out_opc(s, OPC_LEA | P_REXW, ret, 0, 0);
59
tcg_out8(s, (LOWREGMASK(ret) << 3) | 5);
60
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest)
61
tcg_out_branch(s, 1, dest);
62
}
63
64
-static void tcg_out_jmp(TCGContext *s, tcg_insn_unit *dest)
65
+static void tcg_out_jmp(TCGContext *s, const tcg_insn_unit *dest)
66
{
67
tcg_out_branch(s, 0, dest);
68
}
69
@@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64,
70
label->datahi_reg = datahi;
71
label->addrlo_reg = addrlo;
72
label->addrhi_reg = addrhi;
73
- label->raddr = raddr;
74
+ /* TODO: Cast goes away when all hosts converted */
75
+ label->raddr = (void *)tcg_splitwx_to_rx(raddr);
76
label->label_ptr[0] = label_ptr[0];
77
if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
78
label->label_ptr[1] = label_ptr[1];
79
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
80
/* jump displacement must be aligned for atomic patching;
81
* see if we need to add extra nops before jump
82
*/
83
- gap = tcg_pcrel_diff(s, QEMU_ALIGN_PTR_UP(s->code_ptr + 1, 4));
84
+ gap = QEMU_ALIGN_PTR_UP(s->code_ptr + 1, 4) - s->code_ptr;
85
if (gap != 1) {
86
tcg_out_nopn(s, gap - 1);
87
}
88
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
89
* Return path for goto_ptr. Set return value to 0, a-la exit_tb,
90
* and fall through to the rest of the epilogue.
91
*/
92
- tcg_code_gen_epilogue = s->code_ptr;
93
+ /* TODO: Cast goes away when all hosts converted */
94
+ tcg_code_gen_epilogue = (void *)tcg_splitwx_to_rx(s->code_ptr);
95
tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_EAX, 0);
96
97
/* TB epilogue */
98
- tb_ret_addr = s->code_ptr;
99
+ tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
100
101
tcg_out_addi(s, TCG_REG_CALL_STACK, stack_addend);
102
103
--
104
2.25.1
105
106
diff view generated by jsdifflib
Deleted patch
1
A typo generated a branch-and-link insn instead of plain branch.
2
1
3
Reviewed-by: Joelle van Dyne <j@getutm.app>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/aarch64/tcg-target.c.inc | 2 +-
7
1 file changed, 1 insertion(+), 1 deletion(-)
8
9
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/aarch64/tcg-target.c.inc
12
+++ b/tcg/aarch64/tcg-target.c.inc
13
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_goto_long(TCGContext *s, tcg_insn_unit *target)
14
{
15
ptrdiff_t offset = target - s->code_ptr;
16
if (offset == sextract64(offset, 0, 26)) {
17
- tcg_out_insn(s, 3206, BL, offset);
18
+ tcg_out_insn(s, 3206, B, offset);
19
} else {
20
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, (intptr_t)target);
21
tcg_out_insn(s, 3207, BR, TCG_REG_TMP);
22
--
23
2.25.1
24
25
diff view generated by jsdifflib
1
Reviewed-by: Joelle van Dyne <j@getutm.app>
1
DisasContextBase.pc_next has type vaddr; use the correct log format.
2
3
Fixes: 85c19af63e7 ("include/exec: Use vaddr in DisasContextBase for virtual addresses")
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
5
---
4
tcg/aarch64/tcg-target.h | 2 +-
6
target/mips/tcg/octeon_translate.c | 4 ++--
5
tcg/aarch64/tcg-target.c.inc | 57 ++++++++++++++++++++----------------
7
1 file changed, 2 insertions(+), 2 deletions(-)
6
2 files changed, 33 insertions(+), 26 deletions(-)
7
8
8
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
9
diff --git a/target/mips/tcg/octeon_translate.c b/target/mips/tcg/octeon_translate.c
9
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/aarch64/tcg-target.h
11
--- a/target/mips/tcg/octeon_translate.c
11
+++ b/tcg/aarch64/tcg-target.h
12
+++ b/target/mips/tcg/octeon_translate.c
12
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
13
@@ -XXX,XX +XXX,XX @@ static bool trans_BBIT(DisasContext *ctx, arg_BBIT *a)
13
#define TCG_TARGET_NEED_LDST_LABELS
14
TCGv p;
14
#endif
15
15
#define TCG_TARGET_NEED_POOL_LABELS
16
if (ctx->hflags & MIPS_HFLAG_BMASK) {
16
-#define TCG_TARGET_SUPPORT_MIRROR 0
17
- LOG_DISAS("Branch in delay / forbidden slot at PC 0x"
17
+#define TCG_TARGET_SUPPORT_MIRROR 1
18
- TARGET_FMT_lx "\n", ctx->base.pc_next);
18
19
+ LOG_DISAS("Branch in delay / forbidden slot at PC 0x%" VADDR_PRIx "\n",
19
#endif /* AARCH64_TCG_TARGET_H */
20
+ ctx->base.pc_next);
20
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
21
generate_exception_end(ctx, EXCP_RI);
21
index XXXXXXX..XXXXXXX 100644
22
--- a/tcg/aarch64/tcg-target.c.inc
23
+++ b/tcg/aarch64/tcg-target.c.inc
24
@@ -XXX,XX +XXX,XX @@ static const int tcg_target_call_oarg_regs[1] = {
25
#define TCG_REG_GUEST_BASE TCG_REG_X28
26
#endif
27
28
-static inline bool reloc_pc26(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
29
+static bool reloc_pc26(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
30
{
31
- ptrdiff_t offset = target - code_ptr;
32
+ const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
33
+ ptrdiff_t offset = target - src_rx;
34
+
35
if (offset == sextract64(offset, 0, 26)) {
36
/* read instruction, mask away previous PC_REL26 parameter contents,
37
set the proper offset, then write back the instruction. */
38
- *code_ptr = deposit32(*code_ptr, 0, 26, offset);
39
+ *src_rw = deposit32(*src_rw, 0, 26, offset);
40
return true;
22
return true;
41
}
23
}
42
return false;
43
}
44
45
-static inline bool reloc_pc19(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
46
+static bool reloc_pc19(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
47
{
48
- ptrdiff_t offset = target - code_ptr;
49
+ const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
50
+ ptrdiff_t offset = target - src_rx;
51
+
52
if (offset == sextract64(offset, 0, 19)) {
53
- *code_ptr = deposit32(*code_ptr, 5, 19, offset);
54
+ *src_rw = deposit32(*src_rw, 5, 19, offset);
55
return true;
56
}
57
return false;
58
}
59
60
-static inline bool patch_reloc(tcg_insn_unit *code_ptr, int type,
61
- intptr_t value, intptr_t addend)
62
+static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
63
+ intptr_t value, intptr_t addend)
64
{
65
tcg_debug_assert(addend == 0);
66
switch (type) {
67
case R_AARCH64_JUMP26:
68
case R_AARCH64_CALL26:
69
- return reloc_pc26(code_ptr, (tcg_insn_unit *)value);
70
+ return reloc_pc26(code_ptr, (const tcg_insn_unit *)value);
71
case R_AARCH64_CONDBR19:
72
- return reloc_pc19(code_ptr, (tcg_insn_unit *)value);
73
+ return reloc_pc19(code_ptr, (const tcg_insn_unit *)value);
74
default:
75
g_assert_not_reached();
76
}
77
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
78
/* Look for host pointer values within 4G of the PC. This happens
79
often when loading pointers to QEMU's own data structures. */
80
if (type == TCG_TYPE_I64) {
81
- tcg_target_long disp = value - (intptr_t)s->code_ptr;
82
+ intptr_t src_rx = (intptr_t)tcg_splitwx_to_rx(s->code_ptr);
83
+ tcg_target_long disp = value - src_rx;
84
if (disp == sextract64(disp, 0, 21)) {
85
tcg_out_insn(s, 3406, ADR, rd, disp);
86
return;
87
}
88
- disp = (value >> 12) - ((intptr_t)s->code_ptr >> 12);
89
+ disp = (value >> 12) - (src_rx >> 12);
90
if (disp == sextract64(disp, 0, 21)) {
91
tcg_out_insn(s, 3406, ADRP, rd, disp);
92
if (value & 0xfff) {
93
@@ -XXX,XX +XXX,XX @@ static void tcg_out_cmp(TCGContext *s, TCGType ext, TCGReg a,
94
95
static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
96
{
97
- ptrdiff_t offset = target - s->code_ptr;
98
+ ptrdiff_t offset = tcg_pcrel_diff(s, target) >> 2;
99
tcg_debug_assert(offset == sextract64(offset, 0, 26));
100
tcg_out_insn(s, 3206, B, offset);
101
}
102
103
-static inline void tcg_out_goto_long(TCGContext *s, tcg_insn_unit *target)
104
+static void tcg_out_goto_long(TCGContext *s, const tcg_insn_unit *target)
105
{
106
- ptrdiff_t offset = target - s->code_ptr;
107
+ ptrdiff_t offset = tcg_pcrel_diff(s, target) >> 2;
108
if (offset == sextract64(offset, 0, 26)) {
109
tcg_out_insn(s, 3206, B, offset);
110
} else {
111
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_callr(TCGContext *s, TCGReg reg)
112
tcg_out_insn(s, 3207, BLR, reg);
113
}
114
115
-static inline void tcg_out_call(TCGContext *s, const tcg_insn_unit *target)
116
+static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target)
117
{
118
- ptrdiff_t offset = target - s->code_ptr;
119
+ ptrdiff_t offset = tcg_pcrel_diff(s, target) >> 2;
120
if (offset == sextract64(offset, 0, 26)) {
121
tcg_out_insn(s, 3206, BL, offset);
122
} else {
123
@@ -XXX,XX +XXX,XX @@ static void tcg_out_brcond(TCGContext *s, TCGType ext, TCGCond c, TCGArg a,
124
tcg_out_reloc(s, s->code_ptr, R_AARCH64_CONDBR19, l, 0);
125
offset = tcg_in32(s) >> 5;
126
} else {
127
- offset = l->u.value_ptr - s->code_ptr;
128
+ offset = tcg_pcrel_diff(s, l->u.value_ptr) >> 2;
129
tcg_debug_assert(offset == sextract64(offset, 0, 19));
130
}
131
132
@@ -XXX,XX +XXX,XX @@ static void * const qemu_st_helpers[16] = {
133
[MO_BEQ] = helper_be_stq_mmu,
134
};
135
136
-static inline void tcg_out_adr(TCGContext *s, TCGReg rd, void *target)
137
+static inline void tcg_out_adr(TCGContext *s, TCGReg rd, const void *target)
138
{
139
ptrdiff_t offset = tcg_pcrel_diff(s, target);
140
tcg_debug_assert(offset == sextract64(offset, 0, 21));
141
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
142
MemOp opc = get_memop(oi);
143
MemOp size = opc & MO_SIZE;
144
145
- if (!reloc_pc19(lb->label_ptr[0], s->code_ptr)) {
146
+ if (!reloc_pc19(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
147
return false;
148
}
149
150
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
151
MemOp opc = get_memop(oi);
152
MemOp size = opc & MO_SIZE;
153
154
- if (!reloc_pc19(lb->label_ptr[0], s->code_ptr)) {
155
+ if (!reloc_pc19(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
156
return false;
157
}
158
159
@@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
160
label->type = ext;
161
label->datalo_reg = data_reg;
162
label->addrlo_reg = addr_reg;
163
- label->raddr = raddr;
164
+ /* TODO: Cast goes away when all hosts converted */
165
+ label->raddr = (void *)tcg_splitwx_to_rx(raddr);
166
label->label_ptr[0] = label_ptr;
167
}
168
169
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
170
#endif /* CONFIG_SOFTMMU */
171
}
172
173
-static tcg_insn_unit *tb_ret_addr;
174
+static const tcg_insn_unit *tb_ret_addr;
175
176
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
177
const TCGArg args[TCG_MAX_OP_ARGS],
178
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
179
* Return path for goto_ptr. Set return value to 0, a-la exit_tb,
180
* and fall through to the rest of the epilogue.
181
*/
182
- tcg_code_gen_epilogue = s->code_ptr;
183
+ /* TODO: Cast goes away when all hosts converted */
184
+ tcg_code_gen_epilogue = (void *)tcg_splitwx_to_rx(s->code_ptr);
185
tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_X0, 0);
186
187
/* TB epilogue */
188
- tb_ret_addr = s->code_ptr;
189
+ tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
190
191
/* Remove TCG locals stack space. */
192
tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_SP, TCG_REG_SP,
193
--
24
--
194
2.25.1
25
2.43.0
195
196
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Joelle van Dyne <j@getutm.app>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
include/disas/dis-asm.h | 4 ++--
6
disas.c | 4 +---
7
disas/capstone.c | 2 +-
8
3 files changed, 4 insertions(+), 6 deletions(-)
9
1
10
diff --git a/include/disas/dis-asm.h b/include/disas/dis-asm.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/include/disas/dis-asm.h
13
+++ b/include/disas/dis-asm.h
14
@@ -XXX,XX +XXX,XX @@ typedef struct disassemble_info {
15
(bfd_vma addr, struct disassemble_info * info);
16
17
/* These are for buffer_read_memory. */
18
- bfd_byte *buffer;
19
+ const bfd_byte *buffer;
20
bfd_vma buffer_vma;
21
int buffer_length;
22
23
@@ -XXX,XX +XXX,XX @@ int print_insn_rx(bfd_vma, disassemble_info *);
24
25
#ifdef CONFIG_CAPSTONE
26
bool cap_disas_target(disassemble_info *info, uint64_t pc, size_t size);
27
-bool cap_disas_host(disassemble_info *info, void *code, size_t size);
28
+bool cap_disas_host(disassemble_info *info, const void *code, size_t size);
29
bool cap_disas_monitor(disassemble_info *info, uint64_t pc, int count);
30
bool cap_disas_plugin(disassemble_info *info, uint64_t pc, size_t size);
31
#else
32
diff --git a/disas.c b/disas.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/disas.c
35
+++ b/disas.c
36
@@ -XXX,XX +XXX,XX @@ char *plugin_disas(CPUState *cpu, uint64_t addr, size_t size)
37
}
38
39
/* Disassemble this for me please... (debugging). */
40
-void disas(FILE *out, const void *ccode, unsigned long size)
41
+void disas(FILE *out, const void *code, unsigned long size)
42
{
43
- /* TODO: Push constness through the disas backends. */
44
- void *code = (void *)ccode;
45
uintptr_t pc;
46
int count;
47
CPUDebug s;
48
diff --git a/disas/capstone.c b/disas/capstone.c
49
index XXXXXXX..XXXXXXX 100644
50
--- a/disas/capstone.c
51
+++ b/disas/capstone.c
52
@@ -XXX,XX +XXX,XX @@ bool cap_disas_target(disassemble_info *info, uint64_t pc, size_t size)
53
}
54
55
/* Disassemble SIZE bytes at CODE for the host. */
56
-bool cap_disas_host(disassemble_info *info, void *code, size_t size)
57
+bool cap_disas_host(disassemble_info *info, const void *code, size_t size)
58
{
59
csh handle;
60
const uint8_t *cbuf;
61
--
62
2.25.1
63
64
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Joelle van Dyne <j@getutm.app>
2
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/tci.c | 60 +++++++++++++++++++++++++++++++------------------------
6
1 file changed, 34 insertions(+), 26 deletions(-)
7
1
8
diff --git a/tcg/tci.c b/tcg/tci.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/tci.c
11
+++ b/tcg/tci.c
12
@@ -XXX,XX +XXX,XX @@ static uint64_t tci_uint64(uint32_t high, uint32_t low)
13
#endif
14
15
/* Read constant (native size) from bytecode. */
16
-static tcg_target_ulong tci_read_i(uint8_t **tb_ptr)
17
+static tcg_target_ulong tci_read_i(const uint8_t **tb_ptr)
18
{
19
- tcg_target_ulong value = *(tcg_target_ulong *)(*tb_ptr);
20
+ tcg_target_ulong value = *(const tcg_target_ulong *)(*tb_ptr);
21
*tb_ptr += sizeof(value);
22
return value;
23
}
24
25
/* Read unsigned constant (32 bit) from bytecode. */
26
-static uint32_t tci_read_i32(uint8_t **tb_ptr)
27
+static uint32_t tci_read_i32(const uint8_t **tb_ptr)
28
{
29
- uint32_t value = *(uint32_t *)(*tb_ptr);
30
+ uint32_t value = *(const uint32_t *)(*tb_ptr);
31
*tb_ptr += sizeof(value);
32
return value;
33
}
34
35
/* Read signed constant (32 bit) from bytecode. */
36
-static int32_t tci_read_s32(uint8_t **tb_ptr)
37
+static int32_t tci_read_s32(const uint8_t **tb_ptr)
38
{
39
- int32_t value = *(int32_t *)(*tb_ptr);
40
+ int32_t value = *(const int32_t *)(*tb_ptr);
41
*tb_ptr += sizeof(value);
42
return value;
43
}
44
45
#if TCG_TARGET_REG_BITS == 64
46
/* Read constant (64 bit) from bytecode. */
47
-static uint64_t tci_read_i64(uint8_t **tb_ptr)
48
+static uint64_t tci_read_i64(const uint8_t **tb_ptr)
49
{
50
- uint64_t value = *(uint64_t *)(*tb_ptr);
51
+ uint64_t value = *(const uint64_t *)(*tb_ptr);
52
*tb_ptr += sizeof(value);
53
return value;
54
}
55
@@ -XXX,XX +XXX,XX @@ static uint64_t tci_read_i64(uint8_t **tb_ptr)
56
57
/* Read indexed register (native size) from bytecode. */
58
static tcg_target_ulong
59
-tci_read_r(const tcg_target_ulong *regs, uint8_t **tb_ptr)
60
+tci_read_r(const tcg_target_ulong *regs, const uint8_t **tb_ptr)
61
{
62
tcg_target_ulong value = tci_read_reg(regs, **tb_ptr);
63
*tb_ptr += 1;
64
@@ -XXX,XX +XXX,XX @@ tci_read_r(const tcg_target_ulong *regs, uint8_t **tb_ptr)
65
}
66
67
/* Read indexed register (8 bit) from bytecode. */
68
-static uint8_t tci_read_r8(const tcg_target_ulong *regs, uint8_t **tb_ptr)
69
+static uint8_t tci_read_r8(const tcg_target_ulong *regs, const uint8_t **tb_ptr)
70
{
71
uint8_t value = tci_read_reg8(regs, **tb_ptr);
72
*tb_ptr += 1;
73
@@ -XXX,XX +XXX,XX @@ static uint8_t tci_read_r8(const tcg_target_ulong *regs, uint8_t **tb_ptr)
74
75
#if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64
76
/* Read indexed register (8 bit signed) from bytecode. */
77
-static int8_t tci_read_r8s(const tcg_target_ulong *regs, uint8_t **tb_ptr)
78
+static int8_t tci_read_r8s(const tcg_target_ulong *regs, const uint8_t **tb_ptr)
79
{
80
int8_t value = tci_read_reg8s(regs, **tb_ptr);
81
*tb_ptr += 1;
82
@@ -XXX,XX +XXX,XX @@ static int8_t tci_read_r8s(const tcg_target_ulong *regs, uint8_t **tb_ptr)
83
#endif
84
85
/* Read indexed register (16 bit) from bytecode. */
86
-static uint16_t tci_read_r16(const tcg_target_ulong *regs, uint8_t **tb_ptr)
87
+static uint16_t tci_read_r16(const tcg_target_ulong *regs,
88
+ const uint8_t **tb_ptr)
89
{
90
uint16_t value = tci_read_reg16(regs, **tb_ptr);
91
*tb_ptr += 1;
92
@@ -XXX,XX +XXX,XX @@ static uint16_t tci_read_r16(const tcg_target_ulong *regs, uint8_t **tb_ptr)
93
94
#if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64
95
/* Read indexed register (16 bit signed) from bytecode. */
96
-static int16_t tci_read_r16s(const tcg_target_ulong *regs, uint8_t **tb_ptr)
97
+static int16_t tci_read_r16s(const tcg_target_ulong *regs,
98
+ const uint8_t **tb_ptr)
99
{
100
int16_t value = tci_read_reg16s(regs, **tb_ptr);
101
*tb_ptr += 1;
102
@@ -XXX,XX +XXX,XX @@ static int16_t tci_read_r16s(const tcg_target_ulong *regs, uint8_t **tb_ptr)
103
#endif
104
105
/* Read indexed register (32 bit) from bytecode. */
106
-static uint32_t tci_read_r32(const tcg_target_ulong *regs, uint8_t **tb_ptr)
107
+static uint32_t tci_read_r32(const tcg_target_ulong *regs,
108
+ const uint8_t **tb_ptr)
109
{
110
uint32_t value = tci_read_reg32(regs, **tb_ptr);
111
*tb_ptr += 1;
112
@@ -XXX,XX +XXX,XX @@ static uint32_t tci_read_r32(const tcg_target_ulong *regs, uint8_t **tb_ptr)
113
114
#if TCG_TARGET_REG_BITS == 32
115
/* Read two indexed registers (2 * 32 bit) from bytecode. */
116
-static uint64_t tci_read_r64(const tcg_target_ulong *regs, uint8_t **tb_ptr)
117
+static uint64_t tci_read_r64(const tcg_target_ulong *regs,
118
+ const uint8_t **tb_ptr)
119
{
120
uint32_t low = tci_read_r32(regs, tb_ptr);
121
return tci_uint64(tci_read_r32(regs, tb_ptr), low);
122
}
123
#elif TCG_TARGET_REG_BITS == 64
124
/* Read indexed register (32 bit signed) from bytecode. */
125
-static int32_t tci_read_r32s(const tcg_target_ulong *regs, uint8_t **tb_ptr)
126
+static int32_t tci_read_r32s(const tcg_target_ulong *regs,
127
+ const uint8_t **tb_ptr)
128
{
129
int32_t value = tci_read_reg32s(regs, **tb_ptr);
130
*tb_ptr += 1;
131
@@ -XXX,XX +XXX,XX @@ static int32_t tci_read_r32s(const tcg_target_ulong *regs, uint8_t **tb_ptr)
132
}
133
134
/* Read indexed register (64 bit) from bytecode. */
135
-static uint64_t tci_read_r64(const tcg_target_ulong *regs, uint8_t **tb_ptr)
136
+static uint64_t tci_read_r64(const tcg_target_ulong *regs,
137
+ const uint8_t **tb_ptr)
138
{
139
uint64_t value = tci_read_reg64(regs, **tb_ptr);
140
*tb_ptr += 1;
141
@@ -XXX,XX +XXX,XX @@ static uint64_t tci_read_r64(const tcg_target_ulong *regs, uint8_t **tb_ptr)
142
143
/* Read indexed register(s) with target address from bytecode. */
144
static target_ulong
145
-tci_read_ulong(const tcg_target_ulong *regs, uint8_t **tb_ptr)
146
+tci_read_ulong(const tcg_target_ulong *regs, const uint8_t **tb_ptr)
147
{
148
target_ulong taddr = tci_read_r(regs, tb_ptr);
149
#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
150
@@ -XXX,XX +XXX,XX @@ tci_read_ulong(const tcg_target_ulong *regs, uint8_t **tb_ptr)
151
152
/* Read indexed register or constant (native size) from bytecode. */
153
static tcg_target_ulong
154
-tci_read_ri(const tcg_target_ulong *regs, uint8_t **tb_ptr)
155
+tci_read_ri(const tcg_target_ulong *regs, const uint8_t **tb_ptr)
156
{
157
tcg_target_ulong value;
158
TCGReg r = **tb_ptr;
159
@@ -XXX,XX +XXX,XX @@ tci_read_ri(const tcg_target_ulong *regs, uint8_t **tb_ptr)
160
}
161
162
/* Read indexed register or constant (32 bit) from bytecode. */
163
-static uint32_t tci_read_ri32(const tcg_target_ulong *regs, uint8_t **tb_ptr)
164
+static uint32_t tci_read_ri32(const tcg_target_ulong *regs,
165
+ const uint8_t **tb_ptr)
166
{
167
uint32_t value;
168
TCGReg r = **tb_ptr;
169
@@ -XXX,XX +XXX,XX @@ static uint32_t tci_read_ri32(const tcg_target_ulong *regs, uint8_t **tb_ptr)
170
171
#if TCG_TARGET_REG_BITS == 32
172
/* Read two indexed registers or constants (2 * 32 bit) from bytecode. */
173
-static uint64_t tci_read_ri64(const tcg_target_ulong *regs, uint8_t **tb_ptr)
174
+static uint64_t tci_read_ri64(const tcg_target_ulong *regs,
175
+ const uint8_t **tb_ptr)
176
{
177
uint32_t low = tci_read_ri32(regs, tb_ptr);
178
return tci_uint64(tci_read_ri32(regs, tb_ptr), low);
179
}
180
#elif TCG_TARGET_REG_BITS == 64
181
/* Read indexed register or constant (64 bit) from bytecode. */
182
-static uint64_t tci_read_ri64(const tcg_target_ulong *regs, uint8_t **tb_ptr)
183
+static uint64_t tci_read_ri64(const tcg_target_ulong *regs,
184
+ const uint8_t **tb_ptr)
185
{
186
uint64_t value;
187
TCGReg r = **tb_ptr;
188
@@ -XXX,XX +XXX,XX @@ static uint64_t tci_read_ri64(const tcg_target_ulong *regs, uint8_t **tb_ptr)
189
}
190
#endif
191
192
-static tcg_target_ulong tci_read_label(uint8_t **tb_ptr)
193
+static tcg_target_ulong tci_read_label(const uint8_t **tb_ptr)
194
{
195
tcg_target_ulong label = tci_read_i(tb_ptr);
196
tci_assert(label != 0);
197
@@ -XXX,XX +XXX,XX @@ static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition)
198
uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
199
const void *v_tb_ptr)
200
{
201
- /* TODO: Propagate const through this file. */
202
- uint8_t *tb_ptr = (uint8_t *)v_tb_ptr;
203
+ const uint8_t *tb_ptr = v_tb_ptr;
204
tcg_target_ulong regs[TCG_TARGET_NB_REGS];
205
long tcg_temps[CPU_TEMP_BUF_NLONGS];
206
uintptr_t sp_value = (uintptr_t)(tcg_temps + CPU_TEMP_BUF_NLONGS);
207
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
208
TCGOpcode opc = tb_ptr[0];
209
#if defined(CONFIG_DEBUG_TCG) && !defined(NDEBUG)
210
uint8_t op_size = tb_ptr[1];
211
- uint8_t *old_code_ptr = tb_ptr;
212
+ const uint8_t *old_code_ptr = tb_ptr;
213
#endif
214
tcg_target_ulong t0;
215
tcg_target_ulong t1;
216
--
217
2.25.1
218
219
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Joelle van Dyne <j@getutm.app>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
include/tcg/tcg.h | 13 +++++++++++++
5
1 file changed, 13 insertions(+)
6
1
7
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
8
index XXXXXXX..XXXXXXX 100644
9
--- a/include/tcg/tcg.h
10
+++ b/include/tcg/tcg.h
11
@@ -XXX,XX +XXX,XX @@ static inline ptrdiff_t tcg_pcrel_diff(TCGContext *s, const void *target)
12
return tcg_ptr_byte_diff(target, tcg_splitwx_to_rx(s->code_ptr));
13
}
14
15
+/**
16
+ * tcg_tbrel_diff
17
+ * @s: the tcg context
18
+ * @target: address of the target
19
+ *
20
+ * Produce a difference, from the beginning of the current TB code
21
+ * to the destination address.
22
+ */
23
+static inline ptrdiff_t tcg_tbrel_diff(TCGContext *s, const void *target)
24
+{
25
+ return tcg_ptr_byte_diff(target, tcg_splitwx_to_rx(s->code_buf));
26
+}
27
+
28
/**
29
* tcg_current_code_size
30
* @s: the tcg context
31
--
32
2.25.1
33
34
diff view generated by jsdifflib
Deleted patch
1
Use tcg_tbrel_diff when we need a displacement to a label,
2
and with a NULL argument when we need the normalizing addend.
3
1
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/ppc/tcg-target.c.inc | 6 +++---
7
1 file changed, 3 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/ppc/tcg-target.c.inc
12
+++ b/tcg/ppc/tcg-target.c.inc
13
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
14
}
15
16
/* Load addresses within the TB with one insn. */
17
- tb_diff = arg - (intptr_t)s->code_gen_ptr;
18
+ tb_diff = tcg_tbrel_diff(s, (void *)arg);
19
if (!in_prologue && USE_REG_TB && tb_diff == (int16_t)tb_diff) {
20
tcg_out32(s, ADDI | TAI(ret, TCG_REG_TB, tb_diff));
21
return;
22
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
23
/* Use the constant pool, if possible. */
24
if (!in_prologue && USE_REG_TB) {
25
new_pool_label(s, arg, R_PPC_ADDR16, s->code_ptr,
26
- -(intptr_t)s->code_gen_ptr);
27
+ tcg_tbrel_diff(s, NULL));
28
tcg_out32(s, LD | TAI(ret, TCG_REG_TB, 0));
29
return;
30
}
31
@@ -XXX,XX +XXX,XX @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, TCGReg ret,
32
*/
33
if (USE_REG_TB) {
34
rel = R_PPC_ADDR16;
35
- add = -(intptr_t)s->code_gen_ptr;
36
+ add = tcg_tbrel_diff(s, NULL);
37
} else {
38
rel = R_PPC_ADDR32;
39
add = 0;
40
--
41
2.25.1
42
43
diff view generated by jsdifflib
Deleted patch
1
The maximum TB code gen size is UINT16_MAX, which the current
2
code does not support. Use our utility function to optimally
3
add an arbitrary constant.
4
1
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/ppc/tcg-target.c.inc | 5 ++---
8
1 file changed, 2 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/ppc/tcg-target.c.inc
13
+++ b/tcg/ppc/tcg-target.c.inc
14
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
15
set_jmp_reset_offset(s, args[0]);
16
if (USE_REG_TB) {
17
/* For the unlinked case, need to reset TCG_REG_TB. */
18
- c = -tcg_current_code_size(s);
19
- assert(c == (int16_t)c);
20
- tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, c));
21
+ tcg_out_mem_long(s, ADDI, ADD, TCG_REG_TB, TCG_REG_TB,
22
+ -tcg_current_code_size(s));
23
}
24
break;
25
case INDEX_op_goto_ptr:
26
--
27
2.25.1
28
29
diff view generated by jsdifflib
Deleted patch
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
---
3
tcg/ppc/tcg-target.h | 2 +-
4
tcg/ppc/tcg-target.c.inc | 53 +++++++++++++++++++++++-----------------
5
2 files changed, 31 insertions(+), 24 deletions(-)
6
1
7
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/ppc/tcg-target.h
10
+++ b/tcg/ppc/tcg-target.h
11
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
12
#define TCG_TARGET_NEED_LDST_LABELS
13
#endif
14
#define TCG_TARGET_NEED_POOL_LABELS
15
-#define TCG_TARGET_SUPPORT_MIRROR 0
16
+#define TCG_TARGET_SUPPORT_MIRROR 1
17
18
#endif
19
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
20
index XXXXXXX..XXXXXXX 100644
21
--- a/tcg/ppc/tcg-target.c.inc
22
+++ b/tcg/ppc/tcg-target.c.inc
23
@@ -XXX,XX +XXX,XX @@
24
#define TCG_CT_CONST_MONE 0x2000
25
#define TCG_CT_CONST_WSZ 0x4000
26
27
-static tcg_insn_unit *tb_ret_addr;
28
-
29
TCGPowerISA have_isa;
30
static bool have_isel;
31
bool have_altivec;
32
@@ -XXX,XX +XXX,XX @@ static inline bool in_range_b(tcg_target_long target)
33
return target == sextract64(target, 0, 26);
34
}
35
36
-static uint32_t reloc_pc24_val(tcg_insn_unit *pc, const tcg_insn_unit *target)
37
+static uint32_t reloc_pc24_val(const tcg_insn_unit *pc,
38
+             const tcg_insn_unit *target)
39
{
40
ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
41
tcg_debug_assert(in_range_b(disp));
42
return disp & 0x3fffffc;
43
}
44
45
-static bool reloc_pc24(tcg_insn_unit *pc, tcg_insn_unit *target)
46
+static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
47
{
48
- ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
49
+ const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
50
+ ptrdiff_t disp = tcg_ptr_byte_diff(target, src_rx);
51
+
52
if (in_range_b(disp)) {
53
- *pc = (*pc & ~0x3fffffc) | (disp & 0x3fffffc);
54
+ *src_rw = (*src_rw & ~0x3fffffc) | (disp & 0x3fffffc);
55
return true;
56
}
57
return false;
58
}
59
60
-static uint16_t reloc_pc14_val(tcg_insn_unit *pc, const tcg_insn_unit *target)
61
+static uint16_t reloc_pc14_val(const tcg_insn_unit *pc,
62
+             const tcg_insn_unit *target)
63
{
64
ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
65
tcg_debug_assert(disp == (int16_t) disp);
66
return disp & 0xfffc;
67
}
68
69
-static bool reloc_pc14(tcg_insn_unit *pc, tcg_insn_unit *target)
70
+static bool reloc_pc14(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
71
{
72
- ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
73
+ const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
74
+ ptrdiff_t disp = tcg_ptr_byte_diff(target, src_rx);
75
+
76
if (disp == (int16_t) disp) {
77
- *pc = (*pc & ~0xfffc) | (disp & 0xfffc);
78
+ *src_rw = (*src_rw & ~0xfffc) | (disp & 0xfffc);
79
return true;
80
}
81
return false;
82
@@ -XXX,XX +XXX,XX @@ static const uint32_t tcg_to_isel[] = {
83
static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
84
intptr_t value, intptr_t addend)
85
{
86
- tcg_insn_unit *target;
87
+ const tcg_insn_unit *target;
88
int16_t lo;
89
int32_t hi;
90
91
value += addend;
92
- target = (tcg_insn_unit *)value;
93
+ target = (const tcg_insn_unit *)value;
94
95
switch (type) {
96
case R_PPC_REL14:
97
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
98
static void tcg_out_bc(TCGContext *s, int bc, TCGLabel *l)
99
{
100
if (l->has_value) {
101
- bc |= reloc_pc14_val(s->code_ptr, l->u.value_ptr);
102
+ bc |= reloc_pc14_val(tcg_splitwx_to_rx(s->code_ptr), l->u.value_ptr);
103
} else {
104
tcg_out_reloc(s, s->code_ptr, R_PPC_REL14, l, 0);
105
}
106
@@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
107
label->datahi_reg = datahi_reg;
108
label->addrlo_reg = addrlo_reg;
109
label->addrhi_reg = addrhi_reg;
110
- label->raddr = raddr;
111
+ /* TODO: Cast goes away when all hosts converted */
112
+ label->raddr = (void *)tcg_splitwx_to_rx(raddr);
113
label->label_ptr[0] = lptr;
114
}
115
116
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
117
MemOp opc = get_memop(oi);
118
TCGReg hi, lo, arg = TCG_REG_R3;
119
120
- if (!reloc_pc14(lb->label_ptr[0], s->code_ptr)) {
121
+ if (!reloc_pc14(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
122
return false;
123
}
124
125
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
126
MemOp s_bits = opc & MO_SIZE;
127
TCGReg hi, lo, arg = TCG_REG_R3;
128
129
- if (!reloc_pc14(lb->label_ptr[0], s->code_ptr)) {
130
+ if (!reloc_pc14(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
131
return false;
132
}
133
134
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
135
int i;
136
137
#ifdef _CALL_AIX
138
- void **desc = (void **)s->code_ptr;
139
- desc[0] = desc + 2; /* entry point */
140
- desc[1] = 0; /* environment pointer */
141
- s->code_ptr = (void *)(desc + 2); /* skip over descriptor */
142
+ const void **desc = (const void **)s->code_ptr;
143
+ desc[0] = tcg_splitwx_to_rx(desc + 2); /* entry point */
144
+ desc[1] = 0; /* environment pointer */
145
+ s->code_ptr = (void *)(desc + 2); /* skip over descriptor */
146
#endif
147
148
tcg_set_frame(s, TCG_REG_CALL_STACK, REG_SAVE_BOT - CPU_TEMP_BUF_SIZE,
149
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
150
tcg_out32(s, BCCTR | BO_ALWAYS);
151
152
/* Epilogue */
153
- tcg_code_gen_epilogue = tb_ret_addr = s->code_ptr;
154
+ /* TODO: Cast goes away when all hosts converted */
155
+ tcg_code_gen_epilogue = (void *)tcg_splitwx_to_rx(s->code_ptr);
156
157
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
158
for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
159
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
160
switch (opc) {
161
case INDEX_op_exit_tb:
162
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R3, args[0]);
163
- tcg_out_b(s, 0, tb_ret_addr);
164
+ tcg_out_b(s, 0, tcg_code_gen_epilogue);
165
break;
166
case INDEX_op_goto_tb:
167
if (s->tb_jmp_insn_offset) {
168
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
169
uint32_t insn = B;
170
171
if (l->has_value) {
172
- insn |= reloc_pc24_val(s->code_ptr, l->u.value_ptr);
173
+ insn |= reloc_pc24_val(tcg_splitwx_to_rx(s->code_ptr),
174
+ l->u.value_ptr);
175
} else {
176
tcg_out_reloc(s, s->code_ptr, R_PPC_REL24, l, 0);
177
}
178
--
179
2.25.1
180
181
diff view generated by jsdifflib
Deleted patch
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
---
3
tcg/sparc/tcg-target.c.inc | 15 +++++++--------
4
1 file changed, 7 insertions(+), 8 deletions(-)
5
1
6
diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc
7
index XXXXXXX..XXXXXXX 100644
8
--- a/tcg/sparc/tcg-target.c.inc
9
+++ b/tcg/sparc/tcg-target.c.inc
10
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
11
12
/* A 13-bit constant relative to the TB. */
13
if (!in_prologue && USE_REG_TB) {
14
- test = arg - (uintptr_t)s->code_gen_ptr;
15
+ test = tcg_tbrel_diff(s, (void *)arg);
16
if (check_fit_ptr(test, 13)) {
17
tcg_out_arithi(s, ret, TCG_REG_TB, test, ARITH_ADD);
18
return;
19
@@ -XXX,XX +XXX,XX @@ static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
20
return false;
21
}
22
23
-static void tcg_out_ld_ptr(TCGContext *s, TCGReg ret, uintptr_t arg)
24
+static void tcg_out_ld_ptr(TCGContext *s, TCGReg ret, const void *arg)
25
{
26
- intptr_t diff = arg - (uintptr_t)s->code_gen_ptr;
27
+ intptr_t diff = tcg_tbrel_diff(s, arg);
28
if (USE_REG_TB && check_fit_ptr(diff, 13)) {
29
tcg_out_ld(s, TCG_TYPE_PTR, ret, TCG_REG_TB, diff);
30
return;
31
}
32
- tcg_out_movi(s, TCG_TYPE_PTR, ret, arg & ~0x3ff);
33
- tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, arg & 0x3ff);
34
+ tcg_out_movi(s, TCG_TYPE_PTR, ret, (uintptr_t)arg & ~0x3ff);
35
+ tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, (uintptr_t)arg & 0x3ff);
36
}
37
38
static inline void tcg_out_sety(TCGContext *s, TCGReg rs)
39
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
40
tcg_out_movi_imm13(s, TCG_REG_O0, a0);
41
break;
42
} else if (USE_REG_TB) {
43
- intptr_t tb_diff = a0 - (uintptr_t)s->code_gen_ptr;
44
+ intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0);
45
if (check_fit_ptr(tb_diff, 13)) {
46
tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
47
/* Note that TCG_REG_TB has been unwound to O1. */
48
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
49
}
50
} else {
51
/* indirect jump method */
52
- tcg_out_ld_ptr(s, TCG_REG_TB,
53
- (uintptr_t)(s->tb_jmp_target_addr + a0));
54
+ tcg_out_ld_ptr(s, TCG_REG_TB, s->tb_jmp_target_addr + a0);
55
tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL);
56
tcg_out_nop(s);
57
}
58
--
59
2.25.1
60
61
diff view generated by jsdifflib
Deleted patch
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
---
3
tcg/sparc/tcg-target.h | 2 +-
4
tcg/sparc/tcg-target.c.inc | 24 +++++++++++++-----------
5
2 files changed, 14 insertions(+), 12 deletions(-)
6
1
7
diff --git a/tcg/sparc/tcg-target.h b/tcg/sparc/tcg-target.h
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/sparc/tcg-target.h
10
+++ b/tcg/sparc/tcg-target.h
11
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
12
void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
13
14
#define TCG_TARGET_NEED_POOL_LABELS
15
-#define TCG_TARGET_SUPPORT_MIRROR 0
16
+#define TCG_TARGET_SUPPORT_MIRROR 1
17
18
#endif
19
diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc
20
index XXXXXXX..XXXXXXX 100644
21
--- a/tcg/sparc/tcg-target.c.inc
22
+++ b/tcg/sparc/tcg-target.c.inc
23
@@ -XXX,XX +XXX,XX @@ static inline int check_fit_i32(int32_t val, unsigned int bits)
24
# define check_fit_ptr check_fit_i32
25
#endif
26
27
-static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
28
+static bool patch_reloc(tcg_insn_unit *src_rw, int type,
29
intptr_t value, intptr_t addend)
30
{
31
- uint32_t insn = *code_ptr;
32
+ const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
33
+ uint32_t insn = *src_rw;
34
intptr_t pcrel;
35
36
value += addend;
37
- pcrel = tcg_ptr_byte_diff((tcg_insn_unit *)value, code_ptr);
38
+ pcrel = tcg_ptr_byte_diff((tcg_insn_unit *)value, src_rx);
39
40
switch (type) {
41
case R_SPARC_WDISP16:
42
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
43
g_assert_not_reached();
44
}
45
46
- *code_ptr = insn;
47
+ *src_rw = insn;
48
return true;
49
}
50
51
@@ -XXX,XX +XXX,XX @@ static void tcg_out_mb(TCGContext *s, TCGArg a0)
52
}
53
54
#ifdef CONFIG_SOFTMMU
55
-static tcg_insn_unit *qemu_ld_trampoline[16];
56
-static tcg_insn_unit *qemu_st_trampoline[16];
57
+static const tcg_insn_unit *qemu_ld_trampoline[16];
58
+static const tcg_insn_unit *qemu_st_trampoline[16];
59
60
static void emit_extend(TCGContext *s, TCGReg r, int op)
61
{
62
@@ -XXX,XX +XXX,XX @@ static void build_trampolines(TCGContext *s)
63
while ((uintptr_t)s->code_ptr & 15) {
64
tcg_out_nop(s);
65
}
66
- qemu_ld_trampoline[i] = s->code_ptr;
67
+ qemu_ld_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
68
69
if (SPARC64 || TARGET_LONG_BITS == 32) {
70
ra = TCG_REG_O3;
71
@@ -XXX,XX +XXX,XX @@ static void build_trampolines(TCGContext *s)
72
while ((uintptr_t)s->code_ptr & 15) {
73
tcg_out_nop(s);
74
}
75
- qemu_st_trampoline[i] = s->code_ptr;
76
+ qemu_st_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
77
78
if (SPARC64) {
79
emit_extend(s, TCG_REG_O2, i);
80
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
81
tcg_out_nop(s);
82
83
/* Epilogue for goto_ptr. */
84
- tcg_code_gen_epilogue = s->code_ptr;
85
+ /* TODO: Cast goes away when all hosts converted */
86
+ tcg_code_gen_epilogue = (void *)tcg_splitwx_to_rx(s->code_ptr);
87
tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
88
/* delay slot */
89
tcg_out_movi_imm13(s, TCG_REG_O0, 0);
90
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
91
#ifdef CONFIG_SOFTMMU
92
unsigned memi = get_mmuidx(oi);
93
TCGReg addrz, param;
94
- tcg_insn_unit *func;
95
+ const tcg_insn_unit *func;
96
tcg_insn_unit *label_ptr;
97
98
addrz = tcg_out_tlb_load(s, addr, memi, memop,
99
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
100
#ifdef CONFIG_SOFTMMU
101
unsigned memi = get_mmuidx(oi);
102
TCGReg addrz, param;
103
- tcg_insn_unit *func;
104
+ const tcg_insn_unit *func;
105
tcg_insn_unit *label_ptr;
106
107
addrz = tcg_out_tlb_load(s, addr, memi, memop,
108
--
109
2.25.1
110
111
diff view generated by jsdifflib
Deleted patch
1
Use tcg_tbrel_diff when we need a displacement to a label,
2
and with a NULL argument when we need the normalizing addend.
3
1
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/s390/tcg-target.c.inc | 16 ++++++++--------
7
1 file changed, 8 insertions(+), 8 deletions(-)
8
9
diff --git a/tcg/s390/tcg-target.c.inc b/tcg/s390/tcg-target.c.inc
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/s390/tcg-target.c.inc
12
+++ b/tcg/s390/tcg-target.c.inc
13
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
14
return;
15
}
16
} else if (USE_REG_TB && !in_prologue) {
17
- ptrdiff_t off = sval - (uintptr_t)s->code_gen_ptr;
18
+ ptrdiff_t off = tcg_tbrel_diff(s, (void *)sval);
19
if (off == sextract64(off, 0, 20)) {
20
/* This is certain to be an address within TB, and therefore
21
OFF will be negative; don't try RX_LA. */
22
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
23
} else if (USE_REG_TB && !in_prologue) {
24
tcg_out_insn(s, RXY, LG, ret, TCG_REG_TB, TCG_REG_NONE, 0);
25
new_pool_label(s, sval, R_390_20, s->code_ptr - 2,
26
- -(intptr_t)s->code_gen_ptr);
27
+ tcg_tbrel_diff(s, NULL));
28
} else {
29
TCGReg base = ret ? ret : TCG_TMP0;
30
tcg_out_insn(s, RIL, LARL, base, 0);
31
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ld_abs(TCGContext *s, TCGType type, TCGReg dest, void *abs)
32
}
33
}
34
if (USE_REG_TB) {
35
- ptrdiff_t disp = abs - (void *)s->code_gen_ptr;
36
+ ptrdiff_t disp = tcg_tbrel_diff(s, abs);
37
if (disp == sextract64(disp, 0, 20)) {
38
tcg_out_ld(s, type, dest, TCG_REG_TB, disp);
39
return;
40
@@ -XXX,XX +XXX,XX @@ static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
41
if (!maybe_out_small_movi(s, type, TCG_TMP0, val)) {
42
tcg_out_insn(s, RXY, NG, dest, TCG_REG_TB, TCG_REG_NONE, 0);
43
new_pool_label(s, val & valid, R_390_20, s->code_ptr - 2,
44
- -(intptr_t)s->code_gen_ptr);
45
+ tcg_tbrel_diff(s, NULL));
46
return;
47
}
48
} else {
49
@@ -XXX,XX +XXX,XX @@ static void tgen_ori(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
50
} else if (USE_REG_TB) {
51
tcg_out_insn(s, RXY, OG, dest, TCG_REG_TB, TCG_REG_NONE, 0);
52
new_pool_label(s, val, R_390_20, s->code_ptr - 2,
53
- -(intptr_t)s->code_gen_ptr);
54
+ tcg_tbrel_diff(s, NULL));
55
} else {
56
/* Perform the OR via sequential modifications to the high and
57
low parts. Do this via recursion to handle 16-bit vs 32-bit
58
@@ -XXX,XX +XXX,XX @@ static void tgen_xori(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
59
} else if (USE_REG_TB) {
60
tcg_out_insn(s, RXY, XG, dest, TCG_REG_TB, TCG_REG_NONE, 0);
61
new_pool_label(s, val, R_390_20, s->code_ptr - 2,
62
- -(intptr_t)s->code_gen_ptr);
63
+ tcg_tbrel_diff(s, NULL));
64
} else {
65
/* Perform the xor by parts. */
66
tcg_debug_assert(s390_facilities & FACILITY_EXT_IMM);
67
@@ -XXX,XX +XXX,XX @@ static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
68
op = (is_unsigned ? RXY_CLY : RXY_CY);
69
tcg_out_insn_RXY(s, op, r1, TCG_REG_TB, TCG_REG_NONE, 0);
70
new_pool_label(s, (uint32_t)c2, R_390_20, s->code_ptr - 2,
71
- 4 - (intptr_t)s->code_gen_ptr);
72
+ 4 - tcg_tbrel_diff(s, NULL));
73
} else {
74
op = (is_unsigned ? RXY_CLG : RXY_CG);
75
tcg_out_insn_RXY(s, op, r1, TCG_REG_TB, TCG_REG_NONE, 0);
76
new_pool_label(s, c2, R_390_20, s->code_ptr - 2,
77
- -(intptr_t)s->code_gen_ptr);
78
+ tcg_tbrel_diff(s, NULL));
79
}
80
goto exit;
81
} else {
82
--
83
2.25.1
84
85
diff view generated by jsdifflib
Deleted patch
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
---
3
tcg/s390/tcg-target.h | 2 +-
4
tcg/s390/tcg-target.c.inc | 69 +++++++++++++++++----------------------
5
2 files changed, 31 insertions(+), 40 deletions(-)
6
1
7
diff --git a/tcg/s390/tcg-target.h b/tcg/s390/tcg-target.h
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/s390/tcg-target.h
10
+++ b/tcg/s390/tcg-target.h
11
@@ -XXX,XX +XXX,XX @@ static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
12
#define TCG_TARGET_NEED_LDST_LABELS
13
#endif
14
#define TCG_TARGET_NEED_POOL_LABELS
15
-#define TCG_TARGET_SUPPORT_MIRROR 0
16
+#define TCG_TARGET_SUPPORT_MIRROR 1
17
18
#endif
19
diff --git a/tcg/s390/tcg-target.c.inc b/tcg/s390/tcg-target.c.inc
20
index XXXXXXX..XXXXXXX 100644
21
--- a/tcg/s390/tcg-target.c.inc
22
+++ b/tcg/s390/tcg-target.c.inc
23
@@ -XXX,XX +XXX,XX @@ static void * const qemu_st_helpers[16] = {
24
};
25
#endif
26
27
-static tcg_insn_unit *tb_ret_addr;
28
+static const tcg_insn_unit *tb_ret_addr;
29
uint64_t s390_facilities;
30
31
-static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
32
+static bool patch_reloc(tcg_insn_unit *src_rw, int type,
33
intptr_t value, intptr_t addend)
34
{
35
+ const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
36
intptr_t pcrel2;
37
uint32_t old;
38
39
value += addend;
40
- pcrel2 = (tcg_insn_unit *)value - code_ptr;
41
+ pcrel2 = (tcg_insn_unit *)value - src_rx;
42
43
switch (type) {
44
case R_390_PC16DBL:
45
if (pcrel2 == (int16_t)pcrel2) {
46
- tcg_patch16(code_ptr, pcrel2);
47
+ tcg_patch16(src_rw, pcrel2);
48
return true;
49
}
50
break;
51
case R_390_PC32DBL:
52
if (pcrel2 == (int32_t)pcrel2) {
53
- tcg_patch32(code_ptr, pcrel2);
54
+ tcg_patch32(src_rw, pcrel2);
55
return true;
56
}
57
break;
58
case R_390_20:
59
if (value == sextract64(value, 0, 20)) {
60
- old = *(uint32_t *)code_ptr & 0xf00000ff;
61
+ old = *(uint32_t *)src_rw & 0xf00000ff;
62
old |= ((value & 0xfff) << 16) | ((value & 0xff000) >> 4);
63
- tcg_patch32(code_ptr, old);
64
+ tcg_patch32(src_rw, old);
65
return true;
66
}
67
break;
68
@@ -XXX,XX +XXX,XX @@ static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
69
}
70
71
/* load data from an absolute host address */
72
-static void tcg_out_ld_abs(TCGContext *s, TCGType type, TCGReg dest, void *abs)
73
+static void tcg_out_ld_abs(TCGContext *s, TCGType type,
74
+ TCGReg dest, const void *abs)
75
{
76
intptr_t addr = (intptr_t)abs;
77
78
@@ -XXX,XX +XXX,XX @@ static void tgen_extract(TCGContext *s, TCGReg dest, TCGReg src,
79
80
static void tgen_gotoi(TCGContext *s, int cc, const tcg_insn_unit *dest)
81
{
82
- ptrdiff_t off = dest - s->code_ptr;
83
+ ptrdiff_t off = tcg_pcrel_diff(s, dest) >> 1;
84
if (off == (int16_t)off) {
85
tcg_out_insn(s, RI, BRC, cc, off);
86
} else if (off == (int32_t)off) {
87
@@ -XXX,XX +XXX,XX @@ static void tgen_branch(TCGContext *s, int cc, TCGLabel *l)
88
static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc,
89
TCGReg r1, TCGReg r2, TCGLabel *l)
90
{
91
- intptr_t off = 0;
92
-
93
- if (l->has_value) {
94
- off = l->u.value_ptr - s->code_ptr;
95
- tcg_debug_assert(off == (int16_t)off);
96
- } else {
97
- tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, 2);
98
- }
99
-
100
+ tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, 2);
101
tcg_out16(s, (opc & 0xff00) | (r1 << 4) | r2);
102
- tcg_out16(s, off);
103
+ tcg_out16(s, 0);
104
tcg_out16(s, cc << 12 | (opc & 0xff));
105
}
106
107
static void tgen_compare_imm_branch(TCGContext *s, S390Opcode opc, int cc,
108
TCGReg r1, int i2, TCGLabel *l)
109
{
110
- tcg_target_long off = 0;
111
-
112
- if (l->has_value) {
113
- off = l->u.value_ptr - s->code_ptr;
114
- tcg_debug_assert(off == (int16_t)off);
115
- } else {
116
- tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, 2);
117
- }
118
-
119
+ tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, 2);
120
tcg_out16(s, (opc & 0xff00) | (r1 << 4) | cc);
121
- tcg_out16(s, off);
122
+ tcg_out16(s, 0);
123
tcg_out16(s, (i2 << 8) | (opc & 0xff));
124
}
125
126
@@ -XXX,XX +XXX,XX @@ static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
127
128
static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest)
129
{
130
- ptrdiff_t off = dest - s->code_ptr;
131
+ ptrdiff_t off = tcg_pcrel_diff(s, dest) >> 1;
132
if (off == (int32_t)off) {
133
tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off);
134
} else {
135
@@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
136
label->oi = oi;
137
label->datalo_reg = data;
138
label->addrlo_reg = addr;
139
- label->raddr = raddr;
140
+ /* TODO: Cast goes away when all hosts converted */
141
+ label->raddr = (void *)tcg_splitwx_to_rx(raddr);
142
label->label_ptr[0] = label_ptr;
143
}
144
145
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
146
MemOp opc = get_memop(oi);
147
148
if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL,
149
- (intptr_t)s->code_ptr, 2)) {
150
+ (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 2)) {
151
return false;
152
}
153
154
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
155
MemOp opc = get_memop(oi);
156
157
if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL,
158
- (intptr_t)s->code_ptr, 2)) {
159
+ (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 2)) {
160
return false;
161
}
162
163
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
164
case INDEX_op_goto_tb:
165
a0 = args[0];
166
if (s->tb_jmp_insn_offset) {
167
- /* branch displacement must be aligned for atomic patching;
168
+ /*
169
+ * branch displacement must be aligned for atomic patching;
170
* see if we need to add extra nop before branch
171
*/
172
if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) {
173
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
174
} else {
175
/* load address stored at s->tb_jmp_target_addr + a0 */
176
tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_REG_TB,
177
- s->tb_jmp_target_addr + a0);
178
+ tcg_splitwx_to_rx(s->tb_jmp_target_addr + a0));
179
/* and go there */
180
tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_TB);
181
}
182
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
183
TCG_REG_TB to the beginning of this TB. */
184
if (USE_REG_TB) {
185
int ofs = -tcg_current_code_size(s);
186
- assert(ofs == (int16_t)ofs);
187
- tcg_out_insn(s, RI, AGHI, TCG_REG_TB, ofs);
188
+ /* All TB are restricted to 64KiB by unwind info. */
189
+ tcg_debug_assert(ofs == sextract64(ofs, 0, 20));
190
+ tcg_out_insn(s, RXY, LAY, TCG_REG_TB,
191
+ TCG_REG_TB, TCG_REG_NONE, ofs);
192
}
193
break;
194
195
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
196
* Return path for goto_ptr. Set return value to 0, a-la exit_tb,
197
* and fall through to the rest of the epilogue.
198
*/
199
- tcg_code_gen_epilogue = s->code_ptr;
200
+ /* TODO: Cast goes away when all hosts converted */
201
+ tcg_code_gen_epilogue = (void *)tcg_splitwx_to_rx(s->code_ptr);
202
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, 0);
203
204
/* TB epilogue */
205
- tb_ret_addr = s->code_ptr;
206
+ tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
207
208
/* lmg %r6,%r15,fs+48(%r15) (restore registers) */
209
tcg_out_insn(s, RXY, LMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15,
210
--
211
2.25.1
212
213
diff view generated by jsdifflib
Deleted patch
1
The offset even checks were folded into the range check incorrectly.
2
By offsetting by 1, and not decrementing the width, we silently
3
allowed out of range branches.
4
1
5
Assert that the offset is always even instead. Move tcg_out_goto
6
down into the CONFIG_SOFTMMU block so that it is not unused.
7
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
tcg/riscv/tcg-target.c.inc | 28 +++++++++++++++-------------
12
1 file changed, 15 insertions(+), 13 deletions(-)
13
14
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
15
index XXXXXXX..XXXXXXX 100644
16
--- a/tcg/riscv/tcg-target.c.inc
17
+++ b/tcg/riscv/tcg-target.c.inc
18
@@ -XXX,XX +XXX,XX @@ static bool reloc_sbimm12(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
19
{
20
intptr_t offset = (intptr_t)target - (intptr_t)code_ptr;
21
22
- if (offset == sextreg(offset, 1, 12) << 1) {
23
+ tcg_debug_assert((offset & 1) == 0);
24
+ if (offset == sextreg(offset, 0, 12)) {
25
code_ptr[0] |= encode_sbimm12(offset);
26
return true;
27
}
28
@@ -XXX,XX +XXX,XX @@ static bool reloc_jimm20(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
29
{
30
intptr_t offset = (intptr_t)target - (intptr_t)code_ptr;
31
32
- if (offset == sextreg(offset, 1, 20) << 1) {
33
+ tcg_debug_assert((offset & 1) == 0);
34
+ if (offset == sextreg(offset, 0, 20)) {
35
code_ptr[0] |= encode_ujimm20(offset);
36
return true;
37
}
38
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond2(TCGContext *s, TCGCond cond, TCGReg ret,
39
g_assert_not_reached();
40
}
41
42
-static inline void tcg_out_goto(TCGContext *s, tcg_insn_unit *target)
43
-{
44
- ptrdiff_t offset = tcg_pcrel_diff(s, target);
45
- tcg_debug_assert(offset == sextreg(offset, 1, 20) << 1);
46
- tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, offset);
47
-}
48
-
49
static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
50
{
51
TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA;
52
ptrdiff_t offset = tcg_pcrel_diff(s, arg);
53
int ret;
54
55
- if (offset == sextreg(offset, 1, 20) << 1) {
56
+ tcg_debug_assert((offset & 1) == 0);
57
+ if (offset == sextreg(offset, 0, 20)) {
58
/* short jump: -2097150 to 2097152 */
59
tcg_out_opc_jump(s, OPC_JAL, link, offset);
60
- } else if (TCG_TARGET_REG_BITS == 32 ||
61
- offset == sextreg(offset, 1, 31) << 1) {
62
+ } else if (TCG_TARGET_REG_BITS == 32 || offset == (int32_t)offset) {
63
/* long jump: -2147483646 to 2147483648 */
64
tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP0, 0);
65
tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, 0);
66
- ret = reloc_call(s->code_ptr - 2, arg);\
67
+ ret = reloc_call(s->code_ptr - 2, arg);
68
tcg_debug_assert(ret == true);
69
} else if (TCG_TARGET_REG_BITS == 64) {
70
/* far jump: 64-bit */
71
@@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON(TCG_TARGET_REG_BITS < TARGET_LONG_BITS);
72
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
73
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11));
74
75
+static void tcg_out_goto(TCGContext *s, tcg_insn_unit *target)
76
+{
77
+ tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0);
78
+ bool ok = reloc_jimm20(s->code_ptr - 1, target);
79
+ tcg_debug_assert(ok);
80
+}
81
+
82
static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl,
83
TCGReg addrh, TCGMemOpIdx oi,
84
tcg_insn_unit **label_ptr, bool is_load)
85
--
86
2.25.1
87
88
diff view generated by jsdifflib
Deleted patch
1
Since 7ecd02a06f8, we are prepared to re-start code generation
2
with a smaller TB if a relocation is out of range. We no longer
3
need to leave a nop in the stream Just In Case.
4
1
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/riscv/tcg-target.c.inc | 56 ++++----------------------------------
9
1 file changed, 6 insertions(+), 50 deletions(-)
10
11
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/riscv/tcg-target.c.inc
14
+++ b/tcg/riscv/tcg-target.c.inc
15
@@ -XXX,XX +XXX,XX @@ static bool reloc_call(tcg_insn_unit *code_ptr, const tcg_insn_unit *target)
16
static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
17
intptr_t value, intptr_t addend)
18
{
19
- uint32_t insn = *code_ptr;
20
- intptr_t diff;
21
- bool short_jmp;
22
-
23
tcg_debug_assert(addend == 0);
24
-
25
switch (type) {
26
case R_RISCV_BRANCH:
27
- diff = value - (uintptr_t)code_ptr;
28
- short_jmp = diff == sextreg(diff, 0, 12);
29
- if (short_jmp) {
30
- return reloc_sbimm12(code_ptr, (tcg_insn_unit *)value);
31
- } else {
32
- /* Invert the condition */
33
- insn = insn ^ (1 << 12);
34
- /* Clear the offset */
35
- insn &= 0x01fff07f;
36
- /* Set the offset to the PC + 8 */
37
- insn |= encode_sbimm12(8);
38
-
39
- /* Move forward */
40
- code_ptr[0] = insn;
41
-
42
- /* Overwrite the NOP with jal x0,value */
43
- diff = value - (uintptr_t)(code_ptr + 1);
44
- insn = encode_uj(OPC_JAL, TCG_REG_ZERO, diff);
45
- code_ptr[1] = insn;
46
-
47
- return true;
48
- }
49
- break;
50
+ return reloc_sbimm12(code_ptr, (tcg_insn_unit *)value);
51
case R_RISCV_JAL:
52
return reloc_jimm20(code_ptr, (tcg_insn_unit *)value);
53
case R_RISCV_CALL:
54
return reloc_call(code_ptr, (tcg_insn_unit *)value);
55
default:
56
- tcg_abort();
57
+ g_assert_not_reached();
58
}
59
}
60
61
@@ -XXX,XX +XXX,XX @@ static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
62
arg2 = t;
63
}
64
65
- if (l->has_value) {
66
- intptr_t diff = tcg_pcrel_diff(s, l->u.value_ptr);
67
- if (diff == sextreg(diff, 0, 12)) {
68
- tcg_out_opc_branch(s, op, arg1, arg2, diff);
69
- } else {
70
- /* Invert the conditional branch. */
71
- tcg_out_opc_branch(s, op ^ (1 << 12), arg1, arg2, 8);
72
- tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, diff - 4);
73
- }
74
- } else {
75
- tcg_out_reloc(s, s->code_ptr, R_RISCV_BRANCH, l, 0);
76
- tcg_out_opc_branch(s, op, arg1, arg2, 0);
77
- /* NOP to allow patching later */
78
- tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_ZERO, TCG_REG_ZERO, 0);
79
- }
80
+ tcg_out_reloc(s, s->code_ptr, R_RISCV_BRANCH, l, 0);
81
+ tcg_out_opc_branch(s, op, arg1, arg2, 0);
82
}
83
84
static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
85
@@ -XXX,XX +XXX,XX @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl,
86
/* Compare masked address with the TLB entry. */
87
label_ptr[0] = s->code_ptr;
88
tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0);
89
- /* NOP to allow patching later */
90
- tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_ZERO, TCG_REG_ZERO, 0);
91
92
/* TLB Hit - translate address using addend. */
93
if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
94
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
95
}
96
97
/* resolve label address */
98
- if (!patch_reloc(l->label_ptr[0], R_RISCV_BRANCH,
99
- (intptr_t) s->code_ptr, 0)) {
100
+ if (!reloc_sbimm12(l->label_ptr[0], s->code_ptr)) {
101
return false;
102
}
103
104
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
105
}
106
107
/* resolve label address */
108
- if (!patch_reloc(l->label_ptr[0], R_RISCV_BRANCH,
109
- (intptr_t) s->code_ptr, 0)) {
110
+ if (!reloc_sbimm12(l->label_ptr[0], s->code_ptr)) {
111
return false;
112
}
113
114
--
115
2.25.1
116
117
diff view generated by jsdifflib
Deleted patch
1
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/riscv/tcg-target.h | 2 +-
5
tcg/riscv/tcg-target.c.inc | 41 +++++++++++++++++++++-----------------
6
2 files changed, 24 insertions(+), 19 deletions(-)
7
1
8
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/riscv/tcg-target.h
11
+++ b/tcg/riscv/tcg-target.h
12
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
13
#define TCG_TARGET_NEED_POOL_LABELS
14
15
#define TCG_TARGET_HAS_MEMORY_BSWAP 0
16
-#define TCG_TARGET_SUPPORT_MIRROR 0
17
+#define TCG_TARGET_SUPPORT_MIRROR 1
18
19
#endif
20
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
21
index XXXXXXX..XXXXXXX 100644
22
--- a/tcg/riscv/tcg-target.c.inc
23
+++ b/tcg/riscv/tcg-target.c.inc
24
@@ -XXX,XX +XXX,XX @@ static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
25
* Relocations
26
*/
27
28
-static bool reloc_sbimm12(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
29
+static bool reloc_sbimm12(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
30
{
31
- intptr_t offset = (intptr_t)target - (intptr_t)code_ptr;
32
+ const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
33
+ intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
34
35
tcg_debug_assert((offset & 1) == 0);
36
if (offset == sextreg(offset, 0, 12)) {
37
- code_ptr[0] |= encode_sbimm12(offset);
38
+ *src_rw |= encode_sbimm12(offset);
39
return true;
40
}
41
42
return false;
43
}
44
45
-static bool reloc_jimm20(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
46
+static bool reloc_jimm20(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
47
{
48
- intptr_t offset = (intptr_t)target - (intptr_t)code_ptr;
49
+ const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
50
+ intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
51
52
tcg_debug_assert((offset & 1) == 0);
53
if (offset == sextreg(offset, 0, 20)) {
54
- code_ptr[0] |= encode_ujimm20(offset);
55
+ *src_rw |= encode_ujimm20(offset);
56
return true;
57
}
58
59
return false;
60
}
61
62
-static bool reloc_call(tcg_insn_unit *code_ptr, const tcg_insn_unit *target)
63
+static bool reloc_call(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
64
{
65
- intptr_t offset = (intptr_t)target - (intptr_t)code_ptr;
66
+ const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
67
+ intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
68
int32_t lo = sextreg(offset, 0, 12);
69
int32_t hi = offset - lo;
70
71
if (offset == hi + lo) {
72
- code_ptr[0] |= encode_uimm20(hi);
73
- code_ptr[1] |= encode_imm12(lo);
74
+ src_rw[0] |= encode_uimm20(hi);
75
+ src_rw[1] |= encode_imm12(lo);
76
return true;
77
}
78
79
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
80
if (tmp == (int32_t)tmp) {
81
tcg_out_opc_upper(s, OPC_AUIPC, rd, 0);
82
tcg_out_opc_imm(s, OPC_ADDI, rd, rd, 0);
83
- ret = reloc_call(s->code_ptr - 2, (tcg_insn_unit *)val);
84
+ ret = reloc_call(s->code_ptr - 2, (const tcg_insn_unit *)val);
85
tcg_debug_assert(ret == true);
86
return;
87
}
88
@@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON(TCG_TARGET_REG_BITS < TARGET_LONG_BITS);
89
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
90
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11));
91
92
-static void tcg_out_goto(TCGContext *s, tcg_insn_unit *target)
93
+static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
94
{
95
tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0);
96
bool ok = reloc_jimm20(s->code_ptr - 1, target);
97
@@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi,
98
label->datahi_reg = datahi;
99
label->addrlo_reg = addrlo;
100
label->addrhi_reg = addrhi;
101
- label->raddr = raddr;
102
+ /* TODO: Cast goes away when all hosts converted */
103
+ label->raddr = (void *)tcg_splitwx_to_rx(raddr);
104
label->label_ptr[0] = label_ptr[0];
105
}
106
107
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
108
}
109
110
/* resolve label address */
111
- if (!reloc_sbimm12(l->label_ptr[0], s->code_ptr)) {
112
+ if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
113
return false;
114
}
115
116
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
117
}
118
119
/* resolve label address */
120
- if (!reloc_sbimm12(l->label_ptr[0], s->code_ptr)) {
121
+ if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
122
return false;
123
}
124
125
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
126
#endif
127
}
128
129
-static tcg_insn_unit *tb_ret_addr;
130
+static const tcg_insn_unit *tb_ret_addr;
131
132
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
133
const TCGArg *args, const int *const_args)
134
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
135
tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0);
136
137
/* Return path for goto_ptr. Set return value to 0 */
138
- tcg_code_gen_epilogue = s->code_ptr;
139
+ /* TODO: Cast goes away when all hosts converted */
140
+ tcg_code_gen_epilogue = (void *)tcg_splitwx_to_rx(s->code_ptr);
141
tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO);
142
143
/* TB epilogue */
144
- tb_ret_addr = s->code_ptr;
145
+ tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
146
for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
147
tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
148
TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
149
--
150
2.25.1
151
152
diff view generated by jsdifflib
Deleted patch
1
Re-use the 256MiB region handling from alloc_code_gen_buffer_anon,
2
and replace that with the shared file mapping.
3
1
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
accel/tcg/translate-all.c | 46 ++++++++++++++++++++++++++++++++-------
7
1 file changed, 38 insertions(+), 8 deletions(-)
8
9
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/accel/tcg/translate-all.c
12
+++ b/accel/tcg/translate-all.c
13
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer_anon(size_t size, int prot,
14
15
static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp)
16
{
17
- void *buf_rw, *buf_rx;
18
+ void *buf_rw = NULL, *buf_rx = MAP_FAILED;
19
int fd = -1;
20
21
+#ifdef __mips__
22
+ /* Find space for the RX mapping, vs the 256MiB regions. */
23
+ if (!alloc_code_gen_buffer_anon(size, PROT_NONE,
24
+ MAP_PRIVATE | MAP_ANONYMOUS |
25
+ MAP_NORESERVE, errp)) {
26
+ return false;
27
+ }
28
+ /* The size of the mapping may have been adjusted. */
29
+ size = tcg_ctx->code_gen_buffer_size;
30
+ buf_rx = tcg_ctx->code_gen_buffer;
31
+#endif
32
+
33
buf_rw = qemu_memfd_alloc("tcg-jit", size, 0, &fd, errp);
34
if (buf_rw == NULL) {
35
- return false;
36
+ goto fail;
37
}
38
39
+#ifdef __mips__
40
+ void *tmp = mmap(buf_rx, size, PROT_READ | PROT_EXEC,
41
+ MAP_SHARED | MAP_FIXED, fd, 0);
42
+ if (tmp != buf_rx) {
43
+ goto fail_rx;
44
+ }
45
+#else
46
buf_rx = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_SHARED, fd, 0);
47
if (buf_rx == MAP_FAILED) {
48
- error_setg_errno(errp, errno,
49
- "failed to map shared memory for execute");
50
- munmap(buf_rw, size);
51
- close(fd);
52
- return false;
53
+ goto fail_rx;
54
}
55
- close(fd);
56
+#endif
57
58
+ close(fd);
59
tcg_ctx->code_gen_buffer = buf_rw;
60
tcg_ctx->code_gen_buffer_size = size;
61
tcg_splitwx_diff = buf_rx - buf_rw;
62
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp)
63
qemu_madvise(buf_rw, size, QEMU_MADV_HUGEPAGE);
64
qemu_madvise(buf_rx, size, QEMU_MADV_HUGEPAGE);
65
return true;
66
+
67
+ fail_rx:
68
+ error_setg_errno(errp, errno, "failed to map shared memory for execute");
69
+ fail:
70
+ if (buf_rx != MAP_FAILED) {
71
+ munmap(buf_rx, size);
72
+ }
73
+ if (buf_rw) {
74
+ munmap(buf_rw, size);
75
+ }
76
+ if (fd >= 0) {
77
+ close(fd);
78
+ }
79
+ return false;
80
}
81
#endif /* CONFIG_POSIX */
82
83
--
84
2.25.1
85
86
diff view generated by jsdifflib
Deleted patch
1
This target was not updated with 7ecd02a06f8, and so did
2
not allow re-compilation with relocation overflow.
3
1
4
Remove reloc_26 and reloc_26_val as unused.
5
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/mips/tcg-target.c.inc | 53 ++++++++++++++-------------------------
9
1 file changed, 19 insertions(+), 34 deletions(-)
10
11
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/mips/tcg-target.c.inc
14
+++ b/tcg/mips/tcg-target.c.inc
15
@@ -XXX,XX +XXX,XX @@ static tcg_insn_unit *bswap32_addr;
16
static tcg_insn_unit *bswap32u_addr;
17
static tcg_insn_unit *bswap64_addr;
18
19
-static inline uint32_t reloc_pc16_val(tcg_insn_unit *pc,
20
- const tcg_insn_unit *target)
21
+static bool reloc_pc16(tcg_insn_unit *pc, const tcg_insn_unit *target)
22
{
23
/* Let the compiler perform the right-shift as part of the arithmetic. */
24
ptrdiff_t disp = target - (pc + 1);
25
- tcg_debug_assert(disp == (int16_t)disp);
26
- return disp & 0xffff;
27
-}
28
-
29
-static inline void reloc_pc16(tcg_insn_unit *pc, const tcg_insn_unit *target)
30
-{
31
- *pc = deposit32(*pc, 0, 16, reloc_pc16_val(pc, target));
32
-}
33
-
34
-static inline uint32_t reloc_26_val(tcg_insn_unit *pc, tcg_insn_unit *target)
35
-{
36
- tcg_debug_assert((((uintptr_t)pc ^ (uintptr_t)target) & 0xf0000000) == 0);
37
- return ((uintptr_t)target >> 2) & 0x3ffffff;
38
-}
39
-
40
-static inline void reloc_26(tcg_insn_unit *pc, tcg_insn_unit *target)
41
-{
42
- *pc = deposit32(*pc, 0, 26, reloc_26_val(pc, target));
43
+ if (disp == (int16_t)disp) {
44
+ *pc = deposit32(*pc, 0, 16, disp);
45
+ return true;
46
+ }
47
+ return false;
48
}
49
50
static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
51
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
52
{
53
tcg_debug_assert(type == R_MIPS_PC16);
54
tcg_debug_assert(addend == 0);
55
- reloc_pc16(code_ptr, (tcg_insn_unit *)value);
56
- return true;
57
+ return reloc_pc16(code_ptr, (const tcg_insn_unit *)value);
58
}
59
60
#define TCG_CT_CONST_ZERO 0x100
61
@@ -XXX,XX +XXX,XX @@ static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
62
}
63
64
tcg_out_opc_br(s, b_opc, arg1, arg2);
65
- if (l->has_value) {
66
- reloc_pc16(s->code_ptr - 1, l->u.value_ptr);
67
- } else {
68
- tcg_out_reloc(s, s->code_ptr - 1, R_MIPS_PC16, l, 0);
69
- }
70
+ tcg_out_reloc(s, s->code_ptr - 1, R_MIPS_PC16, l, 0);
71
tcg_out_nop(s);
72
}
73
74
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
75
int i;
76
77
/* resolve label address */
78
- reloc_pc16(l->label_ptr[0], s->code_ptr);
79
- if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
80
- reloc_pc16(l->label_ptr[1], s->code_ptr);
81
+ if (!reloc_pc16(l->label_ptr[0], s->code_ptr)
82
+ || (TCG_TARGET_REG_BITS < TARGET_LONG_BITS
83
+ && !reloc_pc16(l->label_ptr[1], s->code_ptr))) {
84
+ return false;
85
}
86
87
i = 1;
88
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
89
}
90
91
tcg_out_opc_br(s, OPC_BEQ, TCG_REG_ZERO, TCG_REG_ZERO);
92
- reloc_pc16(s->code_ptr - 1, l->raddr);
93
+ if (!reloc_pc16(s->code_ptr - 1, l->raddr)) {
94
+ return false;
95
+ }
96
97
/* delay slot */
98
if (TCG_TARGET_REG_BITS == 64 && l->type == TCG_TYPE_I32) {
99
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
100
int i;
101
102
/* resolve label address */
103
- reloc_pc16(l->label_ptr[0], s->code_ptr);
104
- if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
105
- reloc_pc16(l->label_ptr[1], s->code_ptr);
106
+ if (!reloc_pc16(l->label_ptr[0], s->code_ptr)
107
+ || (TCG_TARGET_REG_BITS < TARGET_LONG_BITS
108
+ && !reloc_pc16(l->label_ptr[1], s->code_ptr))) {
109
+ return false;
110
}
111
112
i = 1;
113
--
114
2.25.1
115
116
diff view generated by jsdifflib
Deleted patch
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
---
3
tcg/mips/tcg-target.h | 2 +-
4
tcg/mips/tcg-target.c.inc | 43 ++++++++++++++++++++++-----------------
5
2 files changed, 25 insertions(+), 20 deletions(-)
6
1
7
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/mips/tcg-target.h
10
+++ b/tcg/mips/tcg-target.h
11
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
12
13
#define TCG_TARGET_DEFAULT_MO (0)
14
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
15
-#define TCG_TARGET_SUPPORT_MIRROR 0
16
+#define TCG_TARGET_SUPPORT_MIRROR 1
17
18
void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
19
20
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
21
index XXXXXXX..XXXXXXX 100644
22
--- a/tcg/mips/tcg-target.c.inc
23
+++ b/tcg/mips/tcg-target.c.inc
24
@@ -XXX,XX +XXX,XX @@ static const TCGReg tcg_target_call_oarg_regs[2] = {
25
TCG_REG_V1
26
};
27
28
-static tcg_insn_unit *tb_ret_addr;
29
-static tcg_insn_unit *bswap32_addr;
30
-static tcg_insn_unit *bswap32u_addr;
31
-static tcg_insn_unit *bswap64_addr;
32
+static const tcg_insn_unit *tb_ret_addr;
33
+static const tcg_insn_unit *bswap32_addr;
34
+static const tcg_insn_unit *bswap32u_addr;
35
+static const tcg_insn_unit *bswap64_addr;
36
37
-static bool reloc_pc16(tcg_insn_unit *pc, const tcg_insn_unit *target)
38
+static bool reloc_pc16(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
39
{
40
/* Let the compiler perform the right-shift as part of the arithmetic. */
41
- ptrdiff_t disp = target - (pc + 1);
42
+ const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
43
+ ptrdiff_t disp = target - (src_rx + 1);
44
if (disp == (int16_t)disp) {
45
- *pc = deposit32(*pc, 0, 16, disp);
46
+ *src_rw = deposit32(*src_rw, 0, 16, disp);
47
return true;
48
}
49
return false;
50
@@ -XXX,XX +XXX,XX @@ static void tcg_out_opc_sa64(TCGContext *s, MIPSInsn opc1, MIPSInsn opc2,
51
static bool tcg_out_opc_jmp(TCGContext *s, MIPSInsn opc, const void *target)
52
{
53
uintptr_t dest = (uintptr_t)target;
54
- uintptr_t from = (uintptr_t)s->code_ptr + 4;
55
+ uintptr_t from = (uintptr_t)tcg_splitwx_to_rx(s->code_ptr) + 4;
56
int32_t inst;
57
58
/* The pc-region branch happens within the 256MB region of
59
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_bswap16s(TCGContext *s, TCGReg ret, TCGReg arg)
60
}
61
}
62
63
-static void tcg_out_bswap_subr(TCGContext *s, tcg_insn_unit *sub)
64
+static void tcg_out_bswap_subr(TCGContext *s, const tcg_insn_unit *sub)
65
{
66
bool ok = tcg_out_opc_jmp(s, OPC_JAL, sub);
67
tcg_debug_assert(ok);
68
@@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi,
69
label->datahi_reg = datahi;
70
label->addrlo_reg = addrlo;
71
label->addrhi_reg = addrhi;
72
- label->raddr = raddr;
73
+ /* TODO: Cast goes away when all hosts converted */
74
+ label->raddr = (void *)tcg_splitwx_to_rx(raddr);
75
label->label_ptr[0] = label_ptr[0];
76
if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
77
label->label_ptr[1] = label_ptr[1];
78
@@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi,
79
80
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
81
{
82
+ const tcg_insn_unit *tgt_rx = tcg_splitwx_to_rx(s->code_ptr);
83
TCGMemOpIdx oi = l->oi;
84
MemOp opc = get_memop(oi);
85
TCGReg v0;
86
int i;
87
88
/* resolve label address */
89
- if (!reloc_pc16(l->label_ptr[0], s->code_ptr)
90
+ if (!reloc_pc16(l->label_ptr[0], tgt_rx)
91
|| (TCG_TARGET_REG_BITS < TARGET_LONG_BITS
92
- && !reloc_pc16(l->label_ptr[1], s->code_ptr))) {
93
+ && !reloc_pc16(l->label_ptr[1], tgt_rx))) {
94
return false;
95
}
96
97
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
98
99
static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
100
{
101
+ const tcg_insn_unit *tgt_rx = tcg_splitwx_to_rx(s->code_ptr);
102
TCGMemOpIdx oi = l->oi;
103
MemOp opc = get_memop(oi);
104
MemOp s_bits = opc & MO_SIZE;
105
int i;
106
107
/* resolve label address */
108
- if (!reloc_pc16(l->label_ptr[0], s->code_ptr)
109
+ if (!reloc_pc16(l->label_ptr[0], tgt_rx)
110
|| (TCG_TARGET_REG_BITS < TARGET_LONG_BITS
111
- && !reloc_pc16(l->label_ptr[1], s->code_ptr))) {
112
+ && !reloc_pc16(l->label_ptr[1], tgt_rx))) {
113
return false;
114
}
115
116
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
117
* Return path for goto_ptr. Set return value to 0, a-la exit_tb,
118
* and fall through to the rest of the epilogue.
119
*/
120
- tcg_code_gen_epilogue = s->code_ptr;
121
+ /* TODO: Cast goes away when all hosts converted */
122
+ tcg_code_gen_epilogue = (void *)tcg_splitwx_to_rx(s->code_ptr);
123
tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_V0, TCG_REG_ZERO);
124
125
/* TB epilogue */
126
- tb_ret_addr = s->code_ptr;
127
+ tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
128
for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
129
tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
130
TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
131
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
132
/*
133
* bswap32 -- 32-bit swap (signed result for mips64). a0 = abcd.
134
*/
135
- bswap32_addr = align_code_ptr(s);
136
+ bswap32_addr = tcg_splitwx_to_rx(align_code_ptr(s));
137
/* t3 = (ssss)d000 */
138
tcg_out_opc_sa(s, OPC_SLL, TCG_TMP3, TCG_TMP0, 24);
139
/* t1 = 000a */
140
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
141
/*
142
* bswap32u -- unsigned 32-bit swap. a0 = ....abcd.
143
*/
144
- bswap32u_addr = align_code_ptr(s);
145
+ bswap32u_addr = tcg_splitwx_to_rx(align_code_ptr(s));
146
/* t1 = (0000)000d */
147
tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, TCG_TMP0, 0xff);
148
/* t3 = 000a */
149
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
150
/*
151
* bswap64 -- 64-bit swap. a0 = abcdefgh
152
*/
153
- bswap64_addr = align_code_ptr(s);
154
+ bswap64_addr = tcg_splitwx_to_rx(align_code_ptr(s));
155
/* t3 = h0000000 */
156
tcg_out_dsll(s, TCG_TMP3, TCG_TMP0, 56);
157
/* t1 = 0000000a */
158
--
159
2.25.1
160
161
diff view generated by jsdifflib
Deleted patch
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
---
3
tcg/arm/tcg-target.h | 2 +-
4
tcg/arm/tcg-target.c.inc | 37 +++++++++++++++++++++----------------
5
2 files changed, 22 insertions(+), 17 deletions(-)
6
1
7
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/arm/tcg-target.h
10
+++ b/tcg/arm/tcg-target.h
11
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
12
#define TCG_TARGET_NEED_LDST_LABELS
13
#endif
14
#define TCG_TARGET_NEED_POOL_LABELS
15
-#define TCG_TARGET_SUPPORT_MIRROR 0
16
+#define TCG_TARGET_SUPPORT_MIRROR 1
17
18
#endif
19
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
20
index XXXXXXX..XXXXXXX 100644
21
--- a/tcg/arm/tcg-target.c.inc
22
+++ b/tcg/arm/tcg-target.c.inc
23
@@ -XXX,XX +XXX,XX @@ static const uint8_t tcg_cond_to_arm_cond[] = {
24
[TCG_COND_GTU] = COND_HI,
25
};
26
27
-static inline bool reloc_pc24(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
28
+static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
29
{
30
- ptrdiff_t offset = (tcg_ptr_byte_diff(target, code_ptr) - 8) >> 2;
31
+ const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
32
+ ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) >> 2;
33
+
34
if (offset == sextract32(offset, 0, 24)) {
35
- *code_ptr = (*code_ptr & ~0xffffff) | (offset & 0xffffff);
36
+ *src_rw = deposit32(*src_rw, 0, 24, offset);
37
return true;
38
}
39
return false;
40
}
41
42
-static inline bool reloc_pc13(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
43
+static bool reloc_pc13(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
44
{
45
- ptrdiff_t offset = tcg_ptr_byte_diff(target, code_ptr) - 8;
46
+ const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
47
+ ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8;
48
49
if (offset >= -0xfff && offset <= 0xfff) {
50
- tcg_insn_unit insn = *code_ptr;
51
+ tcg_insn_unit insn = *src_rw;
52
bool u = (offset >= 0);
53
if (!u) {
54
offset = -offset;
55
}
56
insn = deposit32(insn, 23, 1, u);
57
insn = deposit32(insn, 0, 12, offset);
58
- *code_ptr = insn;
59
+ *src_rw = insn;
60
return true;
61
}
62
return false;
63
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
64
tcg_debug_assert(addend == 0);
65
66
if (type == R_ARM_PC24) {
67
- return reloc_pc24(code_ptr, (tcg_insn_unit *)value);
68
+ return reloc_pc24(code_ptr, (const tcg_insn_unit *)value);
69
} else if (type == R_ARM_PC13) {
70
- return reloc_pc13(code_ptr, (tcg_insn_unit *)value);
71
+ return reloc_pc13(code_ptr, (const tcg_insn_unit *)value);
72
} else {
73
g_assert_not_reached();
74
}
75
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi32(TCGContext *s, int cond, int rd, uint32_t arg)
76
77
/* Check for a pc-relative address. This will usually be the TB,
78
or within the TB, which is immediately before the code block. */
79
- diff = arg - ((intptr_t)s->code_ptr + 8);
80
+ diff = tcg_pcrel_diff(s, (void *)arg) - 8;
81
if (diff >= 0) {
82
rot = encode_imm(diff);
83
if (rot >= 0) {
84
@@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
85
label->datahi_reg = datahi;
86
label->addrlo_reg = addrlo;
87
label->addrhi_reg = addrhi;
88
- label->raddr = raddr;
89
+ /* TODO: Cast goes away when all hosts converted */
90
+ label->raddr = (void *)tcg_splitwx_to_rx(raddr);
91
label->label_ptr[0] = label_ptr;
92
}
93
94
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
95
MemOp opc = get_memop(oi);
96
void *func;
97
98
- if (!reloc_pc24(lb->label_ptr[0], s->code_ptr)) {
99
+ if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
100
return false;
101
}
102
103
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
104
TCGMemOpIdx oi = lb->oi;
105
MemOp opc = get_memop(oi);
106
107
- if (!reloc_pc24(lb->label_ptr[0], s->code_ptr)) {
108
+ if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
109
return false;
110
}
111
112
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
113
TCGReg base = TCG_REG_PC;
114
115
tcg_debug_assert(s->tb_jmp_insn_offset == 0);
116
- ptr = (intptr_t)(s->tb_jmp_target_addr + args[0]);
117
- dif = ptr - ((intptr_t)s->code_ptr + 8);
118
+ ptr = (intptr_t)tcg_splitwx_to_rx(s->tb_jmp_target_addr + args[0]);
119
+ dif = tcg_pcrel_diff(s, (void *)ptr) - 8;
120
dil = sextract32(dif, 0, 12);
121
if (dif != dil) {
122
/* The TB is close, but outside the 12 bits addressable by
123
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
124
* Return path for goto_ptr. Set return value to 0, a-la exit_tb,
125
* and fall through to the rest of the epilogue.
126
*/
127
- tcg_code_gen_epilogue = s->code_ptr;
128
+ /* TODO: Cast goes away when all hosts converted */
129
+ tcg_code_gen_epilogue = (void *)tcg_splitwx_to_rx(s->code_ptr);
130
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, 0);
131
tcg_out_epilogue(s);
132
}
133
--
134
2.25.1
135
136
diff view generated by jsdifflib
Deleted patch
1
Now that all native tcg hosts support splitwx, remove the define.
2
Replace the one use with a test for CONFIG_TCG_INTERPRETER.
3
1
4
Reviewed-by: Joelle van Dyne <j@getutm.app>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/aarch64/tcg-target.h | 1 -
8
tcg/arm/tcg-target.h | 1 -
9
tcg/i386/tcg-target.h | 1 -
10
tcg/mips/tcg-target.h | 1 -
11
tcg/ppc/tcg-target.h | 1 -
12
tcg/riscv/tcg-target.h | 1 -
13
tcg/s390/tcg-target.h | 1 -
14
tcg/sparc/tcg-target.h | 1 -
15
tcg/tci/tcg-target.h | 1 -
16
accel/tcg/translate-all.c | 16 +++++++++-------
17
10 files changed, 9 insertions(+), 16 deletions(-)
18
19
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/tcg/aarch64/tcg-target.h
22
+++ b/tcg/aarch64/tcg-target.h
23
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
24
#define TCG_TARGET_NEED_LDST_LABELS
25
#endif
26
#define TCG_TARGET_NEED_POOL_LABELS
27
-#define TCG_TARGET_SUPPORT_MIRROR 1
28
29
#endif /* AARCH64_TCG_TARGET_H */
30
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
31
index XXXXXXX..XXXXXXX 100644
32
--- a/tcg/arm/tcg-target.h
33
+++ b/tcg/arm/tcg-target.h
34
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
35
#define TCG_TARGET_NEED_LDST_LABELS
36
#endif
37
#define TCG_TARGET_NEED_POOL_LABELS
38
-#define TCG_TARGET_SUPPORT_MIRROR 1
39
40
#endif
41
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
42
index XXXXXXX..XXXXXXX 100644
43
--- a/tcg/i386/tcg-target.h
44
+++ b/tcg/i386/tcg-target.h
45
@@ -XXX,XX +XXX,XX @@ static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
46
#define TCG_TARGET_NEED_LDST_LABELS
47
#endif
48
#define TCG_TARGET_NEED_POOL_LABELS
49
-#define TCG_TARGET_SUPPORT_MIRROR 1
50
51
#endif
52
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
53
index XXXXXXX..XXXXXXX 100644
54
--- a/tcg/mips/tcg-target.h
55
+++ b/tcg/mips/tcg-target.h
56
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
57
58
#define TCG_TARGET_DEFAULT_MO (0)
59
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
60
-#define TCG_TARGET_SUPPORT_MIRROR 1
61
62
void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
63
64
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
65
index XXXXXXX..XXXXXXX 100644
66
--- a/tcg/ppc/tcg-target.h
67
+++ b/tcg/ppc/tcg-target.h
68
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
69
#define TCG_TARGET_NEED_LDST_LABELS
70
#endif
71
#define TCG_TARGET_NEED_POOL_LABELS
72
-#define TCG_TARGET_SUPPORT_MIRROR 1
73
74
#endif
75
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
76
index XXXXXXX..XXXXXXX 100644
77
--- a/tcg/riscv/tcg-target.h
78
+++ b/tcg/riscv/tcg-target.h
79
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
80
#define TCG_TARGET_NEED_POOL_LABELS
81
82
#define TCG_TARGET_HAS_MEMORY_BSWAP 0
83
-#define TCG_TARGET_SUPPORT_MIRROR 1
84
85
#endif
86
diff --git a/tcg/s390/tcg-target.h b/tcg/s390/tcg-target.h
87
index XXXXXXX..XXXXXXX 100644
88
--- a/tcg/s390/tcg-target.h
89
+++ b/tcg/s390/tcg-target.h
90
@@ -XXX,XX +XXX,XX @@ static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
91
#define TCG_TARGET_NEED_LDST_LABELS
92
#endif
93
#define TCG_TARGET_NEED_POOL_LABELS
94
-#define TCG_TARGET_SUPPORT_MIRROR 1
95
96
#endif
97
diff --git a/tcg/sparc/tcg-target.h b/tcg/sparc/tcg-target.h
98
index XXXXXXX..XXXXXXX 100644
99
--- a/tcg/sparc/tcg-target.h
100
+++ b/tcg/sparc/tcg-target.h
101
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
102
void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
103
104
#define TCG_TARGET_NEED_POOL_LABELS
105
-#define TCG_TARGET_SUPPORT_MIRROR 1
106
107
#endif
108
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
109
index XXXXXXX..XXXXXXX 100644
110
--- a/tcg/tci/tcg-target.h
111
+++ b/tcg/tci/tcg-target.h
112
@@ -XXX,XX +XXX,XX @@ void tci_disas(uint8_t opc);
113
#define TCG_TARGET_DEFAULT_MO (0)
114
115
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
116
-#define TCG_TARGET_SUPPORT_MIRROR 0
117
118
static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
119
uintptr_t jmp_rw, uintptr_t addr)
120
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
121
index XXXXXXX..XXXXXXX 100644
122
--- a/accel/tcg/translate-all.c
123
+++ b/accel/tcg/translate-all.c
124
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer_anon(size_t size, int prot,
125
return true;
126
}
127
128
+#ifndef CONFIG_TCG_INTERPRETER
129
#ifdef CONFIG_POSIX
130
#include "qemu/memfd.h"
131
132
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp)
133
return true;
134
}
135
#endif /* CONFIG_DARWIN */
136
+#endif /* CONFIG_TCG_INTERPRETER */
137
138
static bool alloc_code_gen_buffer_splitwx(size_t size, Error **errp)
139
{
140
- if (TCG_TARGET_SUPPORT_MIRROR) {
141
-#ifdef CONFIG_DARWIN
142
- return alloc_code_gen_buffer_splitwx_vmremap(size, errp);
143
+#ifndef CONFIG_TCG_INTERPRETER
144
+# ifdef CONFIG_DARWIN
145
+ return alloc_code_gen_buffer_splitwx_vmremap(size, errp);
146
+# endif
147
+# ifdef CONFIG_POSIX
148
+ return alloc_code_gen_buffer_splitwx_memfd(size, errp);
149
+# endif
150
#endif
151
-#ifdef CONFIG_POSIX
152
- return alloc_code_gen_buffer_splitwx_memfd(size, errp);
153
-#endif
154
- }
155
error_setg(errp, "jit split-wx not supported");
156
return false;
157
}
158
--
159
2.25.1
160
161
diff view generated by jsdifflib
Deleted patch
1
Now that all native tcg hosts support splitwx,
2
make this pointer const.
3
1
4
Reviewed-by: Joelle van Dyne <j@getutm.app>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
include/tcg/tcg.h | 2 +-
9
tcg/tcg.c | 2 +-
10
tcg/aarch64/tcg-target.c.inc | 3 +--
11
tcg/arm/tcg-target.c.inc | 3 +--
12
tcg/i386/tcg-target.c.inc | 3 +--
13
tcg/mips/tcg-target.c.inc | 3 +--
14
tcg/ppc/tcg-target.c.inc | 3 +--
15
tcg/riscv/tcg-target.c.inc | 3 +--
16
tcg/s390/tcg-target.c.inc | 3 +--
17
tcg/sparc/tcg-target.c.inc | 3 +--
18
10 files changed, 10 insertions(+), 18 deletions(-)
19
20
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/include/tcg/tcg.h
23
+++ b/include/tcg/tcg.h
24
@@ -XXX,XX +XXX,XX @@ struct TCGContext {
25
26
extern TCGContext tcg_init_ctx;
27
extern __thread TCGContext *tcg_ctx;
28
-extern void *tcg_code_gen_epilogue;
29
+extern const void *tcg_code_gen_epilogue;
30
extern uintptr_t tcg_splitwx_diff;
31
extern TCGv_env cpu_env;
32
33
diff --git a/tcg/tcg.c b/tcg/tcg.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/tcg/tcg.c
36
+++ b/tcg/tcg.c
37
@@ -XXX,XX +XXX,XX @@ static int tcg_out_ldst_finalize(TCGContext *s);
38
static TCGContext **tcg_ctxs;
39
static unsigned int n_tcg_ctxs;
40
TCGv_env cpu_env = 0;
41
-void *tcg_code_gen_epilogue;
42
+const void *tcg_code_gen_epilogue;
43
uintptr_t tcg_splitwx_diff;
44
45
#ifndef CONFIG_TCG_INTERPRETER
46
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
47
index XXXXXXX..XXXXXXX 100644
48
--- a/tcg/aarch64/tcg-target.c.inc
49
+++ b/tcg/aarch64/tcg-target.c.inc
50
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
51
* Return path for goto_ptr. Set return value to 0, a-la exit_tb,
52
* and fall through to the rest of the epilogue.
53
*/
54
- /* TODO: Cast goes away when all hosts converted */
55
- tcg_code_gen_epilogue = (void *)tcg_splitwx_to_rx(s->code_ptr);
56
+ tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
57
tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_X0, 0);
58
59
/* TB epilogue */
60
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
61
index XXXXXXX..XXXXXXX 100644
62
--- a/tcg/arm/tcg-target.c.inc
63
+++ b/tcg/arm/tcg-target.c.inc
64
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
65
* Return path for goto_ptr. Set return value to 0, a-la exit_tb,
66
* and fall through to the rest of the epilogue.
67
*/
68
- /* TODO: Cast goes away when all hosts converted */
69
- tcg_code_gen_epilogue = (void *)tcg_splitwx_to_rx(s->code_ptr);
70
+ tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
71
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, 0);
72
tcg_out_epilogue(s);
73
}
74
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
75
index XXXXXXX..XXXXXXX 100644
76
--- a/tcg/i386/tcg-target.c.inc
77
+++ b/tcg/i386/tcg-target.c.inc
78
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
79
* Return path for goto_ptr. Set return value to 0, a-la exit_tb,
80
* and fall through to the rest of the epilogue.
81
*/
82
- /* TODO: Cast goes away when all hosts converted */
83
- tcg_code_gen_epilogue = (void *)tcg_splitwx_to_rx(s->code_ptr);
84
+ tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
85
tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_EAX, 0);
86
87
/* TB epilogue */
88
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
89
index XXXXXXX..XXXXXXX 100644
90
--- a/tcg/mips/tcg-target.c.inc
91
+++ b/tcg/mips/tcg-target.c.inc
92
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
93
* Return path for goto_ptr. Set return value to 0, a-la exit_tb,
94
* and fall through to the rest of the epilogue.
95
*/
96
- /* TODO: Cast goes away when all hosts converted */
97
- tcg_code_gen_epilogue = (void *)tcg_splitwx_to_rx(s->code_ptr);
98
+ tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
99
tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_V0, TCG_REG_ZERO);
100
101
/* TB epilogue */
102
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
103
index XXXXXXX..XXXXXXX 100644
104
--- a/tcg/ppc/tcg-target.c.inc
105
+++ b/tcg/ppc/tcg-target.c.inc
106
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
107
tcg_out32(s, BCCTR | BO_ALWAYS);
108
109
/* Epilogue */
110
- /* TODO: Cast goes away when all hosts converted */
111
- tcg_code_gen_epilogue = (void *)tcg_splitwx_to_rx(s->code_ptr);
112
+ tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
113
114
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
115
for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
116
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
117
index XXXXXXX..XXXXXXX 100644
118
--- a/tcg/riscv/tcg-target.c.inc
119
+++ b/tcg/riscv/tcg-target.c.inc
120
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
121
tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0);
122
123
/* Return path for goto_ptr. Set return value to 0 */
124
- /* TODO: Cast goes away when all hosts converted */
125
- tcg_code_gen_epilogue = (void *)tcg_splitwx_to_rx(s->code_ptr);
126
+ tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
127
tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO);
128
129
/* TB epilogue */
130
diff --git a/tcg/s390/tcg-target.c.inc b/tcg/s390/tcg-target.c.inc
131
index XXXXXXX..XXXXXXX 100644
132
--- a/tcg/s390/tcg-target.c.inc
133
+++ b/tcg/s390/tcg-target.c.inc
134
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
135
* Return path for goto_ptr. Set return value to 0, a-la exit_tb,
136
* and fall through to the rest of the epilogue.
137
*/
138
- /* TODO: Cast goes away when all hosts converted */
139
- tcg_code_gen_epilogue = (void *)tcg_splitwx_to_rx(s->code_ptr);
140
+ tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
141
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, 0);
142
143
/* TB epilogue */
144
diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc
145
index XXXXXXX..XXXXXXX 100644
146
--- a/tcg/sparc/tcg-target.c.inc
147
+++ b/tcg/sparc/tcg-target.c.inc
148
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
149
tcg_out_nop(s);
150
151
/* Epilogue for goto_ptr. */
152
- /* TODO: Cast goes away when all hosts converted */
153
- tcg_code_gen_epilogue = (void *)tcg_splitwx_to_rx(s->code_ptr);
154
+ tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
155
tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
156
/* delay slot */
157
tcg_out_movi_imm13(s, TCG_REG_O0, 0);
158
--
159
2.25.1
160
161
diff view generated by jsdifflib
Deleted patch
1
Now that all native tcg hosts support splitwx,
2
make this pointer const.
3
1
4
Reviewed-by: Joelle van Dyne <j@getutm.app>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/aarch64/tcg-target.c.inc | 3 +--
9
tcg/arm/tcg-target.c.inc | 3 +--
10
tcg/i386/tcg-target.c.inc | 3 +--
11
tcg/mips/tcg-target.c.inc | 3 +--
12
tcg/ppc/tcg-target.c.inc | 3 +--
13
tcg/riscv/tcg-target.c.inc | 3 +--
14
tcg/s390/tcg-target.c.inc | 3 +--
15
tcg/tcg-ldst.c.inc | 2 +-
16
8 files changed, 8 insertions(+), 15 deletions(-)
17
18
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
19
index XXXXXXX..XXXXXXX 100644
20
--- a/tcg/aarch64/tcg-target.c.inc
21
+++ b/tcg/aarch64/tcg-target.c.inc
22
@@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
23
label->type = ext;
24
label->datalo_reg = data_reg;
25
label->addrlo_reg = addr_reg;
26
- /* TODO: Cast goes away when all hosts converted */
27
- label->raddr = (void *)tcg_splitwx_to_rx(raddr);
28
+ label->raddr = tcg_splitwx_to_rx(raddr);
29
label->label_ptr[0] = label_ptr;
30
}
31
32
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
33
index XXXXXXX..XXXXXXX 100644
34
--- a/tcg/arm/tcg-target.c.inc
35
+++ b/tcg/arm/tcg-target.c.inc
36
@@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
37
label->datahi_reg = datahi;
38
label->addrlo_reg = addrlo;
39
label->addrhi_reg = addrhi;
40
- /* TODO: Cast goes away when all hosts converted */
41
- label->raddr = (void *)tcg_splitwx_to_rx(raddr);
42
+ label->raddr = tcg_splitwx_to_rx(raddr);
43
label->label_ptr[0] = label_ptr;
44
}
45
46
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
47
index XXXXXXX..XXXXXXX 100644
48
--- a/tcg/i386/tcg-target.c.inc
49
+++ b/tcg/i386/tcg-target.c.inc
50
@@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64,
51
label->datahi_reg = datahi;
52
label->addrlo_reg = addrlo;
53
label->addrhi_reg = addrhi;
54
- /* TODO: Cast goes away when all hosts converted */
55
- label->raddr = (void *)tcg_splitwx_to_rx(raddr);
56
+ label->raddr = tcg_splitwx_to_rx(raddr);
57
label->label_ptr[0] = label_ptr[0];
58
if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
59
label->label_ptr[1] = label_ptr[1];
60
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
61
index XXXXXXX..XXXXXXX 100644
62
--- a/tcg/mips/tcg-target.c.inc
63
+++ b/tcg/mips/tcg-target.c.inc
64
@@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi,
65
label->datahi_reg = datahi;
66
label->addrlo_reg = addrlo;
67
label->addrhi_reg = addrhi;
68
- /* TODO: Cast goes away when all hosts converted */
69
- label->raddr = (void *)tcg_splitwx_to_rx(raddr);
70
+ label->raddr = tcg_splitwx_to_rx(raddr);
71
label->label_ptr[0] = label_ptr[0];
72
if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
73
label->label_ptr[1] = label_ptr[1];
74
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
75
index XXXXXXX..XXXXXXX 100644
76
--- a/tcg/ppc/tcg-target.c.inc
77
+++ b/tcg/ppc/tcg-target.c.inc
78
@@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
79
label->datahi_reg = datahi_reg;
80
label->addrlo_reg = addrlo_reg;
81
label->addrhi_reg = addrhi_reg;
82
- /* TODO: Cast goes away when all hosts converted */
83
- label->raddr = (void *)tcg_splitwx_to_rx(raddr);
84
+ label->raddr = tcg_splitwx_to_rx(raddr);
85
label->label_ptr[0] = lptr;
86
}
87
88
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
89
index XXXXXXX..XXXXXXX 100644
90
--- a/tcg/riscv/tcg-target.c.inc
91
+++ b/tcg/riscv/tcg-target.c.inc
92
@@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi,
93
label->datahi_reg = datahi;
94
label->addrlo_reg = addrlo;
95
label->addrhi_reg = addrhi;
96
- /* TODO: Cast goes away when all hosts converted */
97
- label->raddr = (void *)tcg_splitwx_to_rx(raddr);
98
+ label->raddr = tcg_splitwx_to_rx(raddr);
99
label->label_ptr[0] = label_ptr[0];
100
}
101
102
diff --git a/tcg/s390/tcg-target.c.inc b/tcg/s390/tcg-target.c.inc
103
index XXXXXXX..XXXXXXX 100644
104
--- a/tcg/s390/tcg-target.c.inc
105
+++ b/tcg/s390/tcg-target.c.inc
106
@@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
107
label->oi = oi;
108
label->datalo_reg = data;
109
label->addrlo_reg = addr;
110
- /* TODO: Cast goes away when all hosts converted */
111
- label->raddr = (void *)tcg_splitwx_to_rx(raddr);
112
+ label->raddr = tcg_splitwx_to_rx(raddr);
113
label->label_ptr[0] = label_ptr;
114
}
115
116
diff --git a/tcg/tcg-ldst.c.inc b/tcg/tcg-ldst.c.inc
117
index XXXXXXX..XXXXXXX 100644
118
--- a/tcg/tcg-ldst.c.inc
119
+++ b/tcg/tcg-ldst.c.inc
120
@@ -XXX,XX +XXX,XX @@ typedef struct TCGLabelQemuLdst {
121
TCGReg addrhi_reg; /* reg index for high word of guest virtual addr */
122
TCGReg datalo_reg; /* reg index for low word to be loaded or stored */
123
TCGReg datahi_reg; /* reg index for high word to be loaded or stored */
124
- tcg_insn_unit *raddr; /* gen code addr of the next IR of qemu_ld/st IR */
125
+ const tcg_insn_unit *raddr; /* addr of the next IR of qemu_ld/st IR */
126
tcg_insn_unit *label_ptr[2]; /* label pointers to be updated */
127
QSIMPLEQ_ENTRY(TCGLabelQemuLdst) next;
128
} TCGLabelQemuLdst;
129
--
130
2.25.1
131
132
diff view generated by jsdifflib