1
The following changes since commit 3284aa128153750f14a61e8a96fd085e6f2999b6:
1
The following changes since commit e750a7ace492f0b450653d4ad368a77d6f660fb8:
2
2
3
Merge remote-tracking branch 'remotes/lersek/tags/edk2-pull-2019-04-22' into staging (2019-04-24 13:19:41 +0100)
3
Merge tag 'pull-9p-20221024' of https://github.com/cschoenebeck/qemu into staging (2022-10-24 14:27:12 -0400)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://github.com/rth7680/qemu.git tags/pull-tcg-20190426
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20221026
8
8
9
for you to fetch changes up to ef5dae6805cce7b59d129d801bdc5db71bcbd60d:
9
for you to fetch changes up to 04f105758b0089f73ee47260671580cde35f96cc:
10
10
11
cputlb: Fix io_readx() to respect the access_type (2019-04-25 10:40:06 -0700)
11
accel/tcg: Remove restore_state_to_opc function (2022-10-26 11:11:28 +1000)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Add tcg_gen_extract2_*.
14
Revert incorrect cflags initialization.
15
Deal with overflow of TranslationBlocks.
15
Add direct jumps for tcg/loongarch64.
16
Respect access_type in io_readx.
16
Speed up breakpoint check.
17
Improve assertions for atomic.h.
18
Move restore_state_to_opc to TCGCPUOps.
19
Cleanups to TranslationBlock maintenance.
17
20
18
----------------------------------------------------------------
21
----------------------------------------------------------------
19
David Hildenbrand (1):
22
Leandro Lupori (1):
20
tcg: Implement tcg_gen_extract2_{i32,i64}
23
accel/tcg: Add a quicker check for breakpoints
21
24
22
Richard Henderson (13):
25
Peter Maydell (1):
23
tcg: Add INDEX_op_extract2_{i32,i64}
26
Revert "accel/tcg: Init TCG cflags in vCPU thread handler"
24
tcg: Use deposit and extract2 in tcg_gen_shifti_i64
25
tcg: Use extract2 in tcg_gen_deposit_{i32,i64}
26
tcg/i386: Support INDEX_op_extract2_{i32,i64}
27
tcg/arm: Support INDEX_op_extract2_i32
28
tcg/aarch64: Support INDEX_op_extract2_{i32,i64}
29
tcg: Hoist max_insns computation to tb_gen_code
30
tcg: Restart after TB code generation overflow
31
tcg: Restart TB generation after relocation overflow
32
tcg: Restart TB generation after constant pool overflow
33
tcg: Restart TB generation after out-of-line ldst overflow
34
tcg/ppc: Allow the constant pool to overflow at 32k
35
tcg/arm: Restrict constant pool displacement to 12 bits
36
27
37
Shahab Vahedi (1):
28
Qi Hu (2):
38
cputlb: Fix io_readx() to respect the access_type
29
tcg/loongarch64: Add direct jump support
30
tcg/aarch64: Remove unused code in tcg_out_op
39
31
40
include/exec/exec-all.h | 4 +-
32
Richard Henderson (43):
41
include/exec/translator.h | 3 +-
33
include/qemu/osdep: Add qemu_build_assert
42
tcg/aarch64/tcg-target.h | 2 +
34
include/qemu/atomic: Use qemu_build_assert
43
tcg/arm/tcg-target.h | 1 +
35
include/qemu/thread: Use qatomic_* functions
44
tcg/i386/tcg-target.h | 2 +
36
accel/tcg: Make page_alloc_target_data allocation constant
45
tcg/mips/tcg-target.h | 2 +
37
accel/tcg: Remove disabled debug in translate-all.c
46
tcg/ppc/tcg-target.h | 2 +
38
accel/tcg: Split out PageDesc to internal.h
47
tcg/riscv/tcg-target.h | 2 +
39
accel/tcg: Split out tb-maint.c
48
tcg/s390/tcg-target.h | 2 +
40
accel/tcg: Move assert_no_pages_locked to internal.h
49
tcg/sparc/tcg-target.h | 2 +
41
accel/tcg: Drop cpu_get_tb_cpu_state from TARGET_HAS_PRECISE_SMC
50
tcg/tcg-op.h | 6 ++
42
accel/tcg: Remove duplicate store to tb->page_addr[]
51
tcg/tcg-opc.h | 2 +
43
accel/tcg: Introduce tb_{set_}page_addr{0,1}
52
tcg/tcg.h | 16 +++---
44
accel/tcg: Rename tb_invalidate_phys_page
53
tcg/tci/tcg-target.h | 2 +
45
accel/tcg: Rename tb_invalidate_phys_page_range and drop end parameter
54
accel/tcg/cputlb.c | 5 +-
46
accel/tcg: Unify declarations of tb_invalidate_phys_range
55
accel/tcg/translate-all.c | 53 ++++++++++++++---
47
accel/tcg: Use tb_invalidate_phys_page in page_set_flags
56
accel/tcg/translator.c | 15 +----
48
accel/tcg: Call tb_invalidate_phys_page for PAGE_RESET
57
target/alpha/translate.c | 4 +-
49
accel/tcg: Use page_reset_target_data in page_set_flags
58
target/arm/translate.c | 4 +-
50
accel/tcg: Use tb_invalidate_phys_range in page_set_flags
59
target/cris/translate.c | 10 +---
51
accel/tcg: Move TARGET_PAGE_DATA_SIZE impl to user-exec.c
60
target/hppa/translate.c | 5 +-
52
accel/tcg: Simplify page_get/alloc_target_data
61
target/i386/translate.c | 4 +-
53
accel/tcg: Add restore_state_to_opc to TCGCPUOps
62
target/lm32/translate.c | 10 +---
54
target/alpha: Convert to tcg_ops restore_state_to_opc
63
target/m68k/translate.c | 4 +-
55
target/arm: Convert to tcg_ops restore_state_to_opc
64
target/microblaze/translate.c | 10 +---
56
target/avr: Convert to tcg_ops restore_state_to_opc
65
target/mips/translate.c | 4 +-
57
target/cris: Convert to tcg_ops restore_state_to_opc
66
target/moxie/translate.c | 11 +---
58
target/hexagon: Convert to tcg_ops restore_state_to_opc
67
target/nios2/translate.c | 14 +----
59
target/hppa: Convert to tcg_ops restore_state_to_opc
68
target/openrisc/translate.c | 4 +-
60
target/i386: Convert to tcg_ops restore_state_to_opc
69
target/ppc/translate.c | 4 +-
61
target/loongarch: Convert to tcg_ops restore_state_to_opc
70
target/riscv/translate.c | 4 +-
62
target/m68k: Convert to tcg_ops restore_state_to_opc
71
target/s390x/translate.c | 4 +-
63
target/microblaze: Convert to tcg_ops restore_state_to_opc
72
target/sh4/translate.c | 4 +-
64
target/mips: Convert to tcg_ops restore_state_to_opc
73
target/sparc/translate.c | 4 +-
65
target/nios2: Convert to tcg_ops restore_state_to_opc
74
target/tilegx/translate.c | 12 +---
66
target/openrisc: Convert to tcg_ops restore_state_to_opc
75
target/tricore/translate.c | 16 +-----
67
target/ppc: Convert to tcg_ops restore_state_to_opc
76
target/unicore32/translate.c | 10 +---
68
target/riscv: Convert to tcg_ops restore_state_to_opc
77
target/xtensa/translate.c | 4 +-
69
target/rx: Convert to tcg_ops restore_state_to_opc
78
tcg/aarch64/tcg-target.inc.c | 27 +++++++--
70
target/s390x: Convert to tcg_ops restore_state_to_opc
79
tcg/arm/tcg-target.inc.c | 98 ++++++++++++++++++--------------
71
target/sh4: Convert to tcg_ops restore_state_to_opc
80
tcg/i386/tcg-target.inc.c | 17 +++++-
72
target/sparc: Convert to tcg_ops restore_state_to_opc
81
tcg/mips/tcg-target.inc.c | 6 +-
73
target/tricore: Convert to tcg_ops restore_state_to_opc
82
tcg/optimize.c | 16 ++++++
74
target/xtensa: Convert to tcg_ops restore_state_to_opc
83
tcg/ppc/tcg-target.inc.c | 42 +++++++-------
75
accel/tcg: Remove restore_state_to_opc function
84
tcg/riscv/tcg-target.inc.c | 16 ++++--
85
tcg/s390/tcg-target.inc.c | 20 ++++---
86
tcg/tcg-ldst.inc.c | 18 +++---
87
tcg/tcg-op.c | 129 +++++++++++++++++++++++++++++++++---------
88
tcg/tcg-pool.inc.c | 12 ++--
89
tcg/tcg.c | 85 +++++++++++++++-------------
90
tcg/README | 7 +++
91
51 files changed, 451 insertions(+), 309 deletions(-)
92
76
77
accel/tcg/internal.h | 91 ++++
78
include/exec/cpu-all.h | 22 +-
79
include/exec/exec-all.h | 35 +-
80
include/exec/ram_addr.h | 2 -
81
include/exec/translate-all.h | 2 +-
82
include/hw/core/tcg-cpu-ops.h | 11 +
83
include/qemu/atomic.h | 16 +-
84
include/qemu/osdep.h | 8 +
85
include/qemu/thread.h | 8 +-
86
target/arm/cpu.h | 8 +
87
target/arm/internals.h | 4 -
88
target/mips/tcg/tcg-internal.h | 3 +
89
target/s390x/s390x-internal.h | 4 +-
90
target/sparc/cpu.h | 3 +
91
tcg/loongarch64/tcg-target.h | 9 +-
92
accel/tcg/cpu-exec.c | 24 +-
93
accel/tcg/tb-maint.c | 704 ++++++++++++++++++++++++++
94
accel/tcg/tcg-accel-ops-mttcg.c | 5 +-
95
accel/tcg/tcg-accel-ops-rr.c | 7 +-
96
accel/tcg/translate-all.c | 1017 ++------------------------------------
97
accel/tcg/translator.c | 9 +-
98
accel/tcg/user-exec.c | 42 ++
99
bsd-user/mmap.c | 2 -
100
cpu.c | 4 +-
101
linux-user/mmap.c | 4 -
102
target/alpha/cpu.c | 9 +
103
target/alpha/translate.c | 6 -
104
target/arm/cpu.c | 26 +
105
target/arm/mte_helper.c | 5 -
106
target/arm/translate.c | 22 -
107
target/avr/cpu.c | 11 +
108
target/avr/translate.c | 6 -
109
target/cris/cpu.c | 11 +
110
target/cris/translate.c | 6 -
111
target/hexagon/cpu.c | 9 +-
112
target/hppa/cpu.c | 19 +
113
target/hppa/translate.c | 13 -
114
target/i386/tcg/tcg-cpu.c | 19 +
115
target/i386/tcg/translate.c | 15 -
116
target/loongarch/cpu.c | 11 +
117
target/loongarch/translate.c | 6 -
118
target/m68k/cpu.c | 14 +
119
target/m68k/translate.c | 10 -
120
target/microblaze/cpu.c | 11 +
121
target/microblaze/translate.c | 7 -
122
target/mips/cpu.c | 1 +
123
target/mips/tcg/translate.c | 8 +-
124
target/nios2/cpu.c | 11 +
125
target/nios2/translate.c | 6 -
126
target/openrisc/cpu.c | 13 +
127
target/openrisc/translate.c | 10 -
128
target/ppc/cpu_init.c | 10 +
129
target/ppc/translate.c | 6 -
130
target/riscv/cpu.c | 9 +-
131
target/rx/cpu.c | 10 +
132
target/rx/translate.c | 6 -
133
target/s390x/cpu.c | 1 +
134
target/s390x/tcg/translate.c | 7 +-
135
target/sh4/cpu.c | 16 +
136
target/sh4/translate.c | 10 -
137
target/sparc/cpu.c | 1 +
138
target/sparc/translate.c | 7 +-
139
target/tricore/cpu.c | 11 +
140
target/tricore/translate.c | 6 -
141
target/xtensa/cpu.c | 10 +
142
target/xtensa/translate.c | 6 -
143
tcg/aarch64/tcg-target.c.inc | 31 +-
144
tcg/loongarch64/tcg-target.c.inc | 48 +-
145
accel/tcg/meson.build | 1 +
146
69 files changed, 1304 insertions(+), 1221 deletions(-)
147
create mode 100644 accel/tcg/tb-maint.c
diff view generated by jsdifflib
New patch
1
From: Peter Maydell <peter.maydell@linaro.org>
1
2
3
Commit a82fd5a4ec24d was intended to be a code cleanup, but
4
unfortunately it has a bug. It moves the initialization of the
5
TCG cflags from the "start a new vcpu" function to the
6
thread handler; this is fine when each vcpu has its own thread,
7
but when we are doing round-robin of vcpus on a single thread
8
we end up only initializing the cflags for CPU 0, not for any
9
of the others.
10
11
The most obvious effect of this bug is that running in icount
12
mode with more than one CPU is broken; typically the guest
13
hangs shortly after it brings up the secondary CPUs.
14
15
This reverts commit a82fd5a4ec24d923ff1e6da128c0fd4a74079d99.
16
17
Cc: qemu-stable@nongnu.org
18
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
19
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
20
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
21
Message-Id: <20221021163409.3674911-1-peter.maydell@linaro.org>
22
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
23
---
24
accel/tcg/tcg-accel-ops-mttcg.c | 5 +++--
25
accel/tcg/tcg-accel-ops-rr.c | 7 ++++---
26
2 files changed, 7 insertions(+), 5 deletions(-)
27
28
diff --git a/accel/tcg/tcg-accel-ops-mttcg.c b/accel/tcg/tcg-accel-ops-mttcg.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/accel/tcg/tcg-accel-ops-mttcg.c
31
+++ b/accel/tcg/tcg-accel-ops-mttcg.c
32
@@ -XXX,XX +XXX,XX @@ static void *mttcg_cpu_thread_fn(void *arg)
33
assert(tcg_enabled());
34
g_assert(!icount_enabled());
35
36
- tcg_cpu_init_cflags(cpu, current_machine->smp.max_cpus > 1);
37
-
38
rcu_register_thread();
39
force_rcu.notifier.notify = mttcg_force_rcu;
40
force_rcu.cpu = cpu;
41
@@ -XXX,XX +XXX,XX @@ void mttcg_start_vcpu_thread(CPUState *cpu)
42
{
43
char thread_name[VCPU_THREAD_NAME_SIZE];
44
45
+ g_assert(tcg_enabled());
46
+ tcg_cpu_init_cflags(cpu, current_machine->smp.max_cpus > 1);
47
+
48
cpu->thread = g_new0(QemuThread, 1);
49
cpu->halt_cond = g_malloc0(sizeof(QemuCond));
50
qemu_cond_init(cpu->halt_cond);
51
diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c
52
index XXXXXXX..XXXXXXX 100644
53
--- a/accel/tcg/tcg-accel-ops-rr.c
54
+++ b/accel/tcg/tcg-accel-ops-rr.c
55
@@ -XXX,XX +XXX,XX @@ static void *rr_cpu_thread_fn(void *arg)
56
Notifier force_rcu;
57
CPUState *cpu = arg;
58
59
- g_assert(tcg_enabled());
60
- tcg_cpu_init_cflags(cpu, false);
61
-
62
+ assert(tcg_enabled());
63
rcu_register_thread();
64
force_rcu.notify = rr_force_rcu;
65
rcu_add_force_rcu_notifier(&force_rcu);
66
@@ -XXX,XX +XXX,XX @@ void rr_start_vcpu_thread(CPUState *cpu)
67
static QemuCond *single_tcg_halt_cond;
68
static QemuThread *single_tcg_cpu_thread;
69
70
+ g_assert(tcg_enabled());
71
+ tcg_cpu_init_cflags(cpu, false);
72
+
73
if (!single_tcg_cpu_thread) {
74
cpu->thread = g_new0(QemuThread, 1);
75
cpu->halt_cond = g_new0(QemuCond, 1);
76
--
77
2.34.1
78
79
diff view generated by jsdifflib
1
From: Qi Hu <huqi@loongson.cn>
2
3
Similar to the ARM64, LoongArch has PC-relative instructions such as
4
PCADDU18I. These instructions can be used to support direct jump for
5
LoongArch. Additionally, if instruction "B offset" can cover the target
6
address(target is within ±128MB range), a single "B offset" plus a nop
7
will be used by "tb_target_set_jump_target".
8
9
Signed-off-by: Qi Hu <huqi@loongson.cn>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Reviewed-by: WANG Xuerui <git@xen0n.name>
12
Message-Id: <20221015092754.91971-1-huqi@loongson.cn>
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
---
14
---
3
tcg/i386/tcg-target.h | 4 ++--
15
tcg/loongarch64/tcg-target.h | 9 ++++--
4
tcg/i386/tcg-target.inc.c | 11 +++++++++++
16
tcg/loongarch64/tcg-target.c.inc | 48 +++++++++++++++++++++++++++++---
5
2 files changed, 13 insertions(+), 2 deletions(-)
17
2 files changed, 50 insertions(+), 7 deletions(-)
6
18
7
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
19
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
8
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/i386/tcg-target.h
21
--- a/tcg/loongarch64/tcg-target.h
10
+++ b/tcg/i386/tcg-target.h
22
+++ b/tcg/loongarch64/tcg-target.h
11
@@ -XXX,XX +XXX,XX @@ extern bool have_avx2;
23
@@ -XXX,XX +XXX,XX @@
12
#define TCG_TARGET_HAS_deposit_i32 1
24
13
#define TCG_TARGET_HAS_extract_i32 1
25
#define TCG_TARGET_INSN_UNIT_SIZE 4
14
#define TCG_TARGET_HAS_sextract_i32 1
26
#define TCG_TARGET_NB_REGS 32
15
-#define TCG_TARGET_HAS_extract2_i32 0
27
-#define MAX_CODE_GEN_BUFFER_SIZE SIZE_MAX
16
+#define TCG_TARGET_HAS_extract2_i32 1
28
+/*
17
#define TCG_TARGET_HAS_movcond_i32 1
29
+ * PCADDU18I + JIRL sequence can give 20 + 16 + 2 = 38 bits
18
#define TCG_TARGET_HAS_add2_i32 1
30
+ * signed offset, which is +/- 128 GiB.
19
#define TCG_TARGET_HAS_sub2_i32 1
31
+ */
20
@@ -XXX,XX +XXX,XX @@ extern bool have_avx2;
32
+#define MAX_CODE_GEN_BUFFER_SIZE (128 * GiB)
21
#define TCG_TARGET_HAS_deposit_i64 1
33
22
#define TCG_TARGET_HAS_extract_i64 1
34
typedef enum {
23
#define TCG_TARGET_HAS_sextract_i64 0
35
TCG_REG_ZERO,
24
-#define TCG_TARGET_HAS_extract2_i64 0
36
@@ -XXX,XX +XXX,XX @@ typedef enum {
25
+#define TCG_TARGET_HAS_extract2_i64 1
37
#define TCG_TARGET_HAS_clz_i32 1
26
#define TCG_TARGET_HAS_movcond_i64 1
38
#define TCG_TARGET_HAS_ctz_i32 1
27
#define TCG_TARGET_HAS_add2_i64 1
39
#define TCG_TARGET_HAS_ctpop_i32 0
28
#define TCG_TARGET_HAS_sub2_i64 1
40
-#define TCG_TARGET_HAS_direct_jump 0
29
diff --git a/tcg/i386/tcg-target.inc.c b/tcg/i386/tcg-target.inc.c
41
+#define TCG_TARGET_HAS_direct_jump 1
42
#define TCG_TARGET_HAS_brcond2 0
43
#define TCG_TARGET_HAS_setcond2 0
44
#define TCG_TARGET_HAS_qemu_st8_i32 0
45
@@ -XXX,XX +XXX,XX @@ typedef enum {
46
#define TCG_TARGET_HAS_muluh_i64 1
47
#define TCG_TARGET_HAS_mulsh_i64 1
48
49
-/* not defined -- call should be eliminated at compile time */
50
void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
51
52
#define TCG_TARGET_DEFAULT_MO (0)
53
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
30
index XXXXXXX..XXXXXXX 100644
54
index XXXXXXX..XXXXXXX 100644
31
--- a/tcg/i386/tcg-target.inc.c
55
--- a/tcg/loongarch64/tcg-target.c.inc
32
+++ b/tcg/i386/tcg-target.inc.c
56
+++ b/tcg/loongarch64/tcg-target.c.inc
33
@@ -XXX,XX +XXX,XX @@ static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
57
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args)
34
#define OPC_SHUFPS (0xc6 | P_EXT)
58
#endif
35
#define OPC_SHLX (0xf7 | P_EXT38 | P_DATA16)
59
}
36
#define OPC_SHRX (0xf7 | P_EXT38 | P_SIMDF2)
60
37
+#define OPC_SHRD_Ib (0xac | P_EXT)
61
+/* LoongArch uses `andi zero, zero, 0` as NOP. */
38
#define OPC_TESTL    (0x85)
62
+#define NOP OPC_ANDI
39
#define OPC_TZCNT (0xbc | P_EXT | P_SIMDF3)
63
+static void tcg_out_nop(TCGContext *s)
40
#define OPC_UD2 (0x0b | P_EXT)
64
+{
41
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
65
+ tcg_out32(s, NOP);
42
}
66
+}
67
+
68
+void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
69
+ uintptr_t jmp_rw, uintptr_t addr)
70
+{
71
+ tcg_insn_unit i1, i2;
72
+ ptrdiff_t upper, lower;
73
+ ptrdiff_t offset = (ptrdiff_t)(addr - jmp_rx) >> 2;
74
+
75
+ if (offset == sextreg(offset, 0, 26)) {
76
+ i1 = encode_sd10k16_insn(OPC_B, offset);
77
+ i2 = NOP;
78
+ } else {
79
+ tcg_debug_assert(offset == sextreg(offset, 0, 36));
80
+ lower = (int16_t)offset;
81
+ upper = (offset - lower) >> 16;
82
+
83
+ i1 = encode_dsj20_insn(OPC_PCADDU18I, TCG_REG_TMP0, upper);
84
+ i2 = encode_djsk16_insn(OPC_JIRL, TCG_REG_ZERO, TCG_REG_TMP0, lower);
85
+ }
86
+ uint64_t pair = ((uint64_t)i2 << 32) | i1;
87
+ qatomic_set((uint64_t *)jmp_rw, pair);
88
+ flush_idcache_range(jmp_rx, jmp_rw, 8);
89
+}
90
+
91
/*
92
* Entry-points
93
*/
94
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
43
break;
95
break;
44
96
45
+ OP_32_64(extract2):
97
case INDEX_op_goto_tb:
46
+ /* Note that SHRD outputs to the r/m operand. */
98
- assert(s->tb_jmp_insn_offset == 0);
47
+ tcg_out_modrm(s, OPC_SHRD_Ib + rexw, a2, a0);
99
- /* indirect jump method */
48
+ tcg_out8(s, args[3]);
100
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO,
49
+ break;
101
- (uintptr_t)(s->tb_jmp_target_addr + a0));
50
+
102
+ tcg_debug_assert(s->tb_jmp_insn_offset != NULL);
51
case INDEX_op_mb:
103
+ /*
52
tcg_out_mb(s, a0);
104
+ * Ensure that patch area is 8-byte aligned so that an
105
+ * atomic write can be used to patch the target address.
106
+ */
107
+ if ((uintptr_t)s->code_ptr & 7) {
108
+ tcg_out_nop(s);
109
+ }
110
+ s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
111
+ /*
112
+ * actual branch destination will be patched by
113
+ * tb_target_set_jmp_target later
114
+ */
115
+ tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, 0);
116
tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0);
117
set_jmp_reset_offset(s, a0);
53
break;
118
break;
54
@@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
55
static const TCGTargetOpDef r_0 = { .args_ct_str = { "r", "0" } };
56
static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } };
57
static const TCGTargetOpDef r_r_re = { .args_ct_str = { "r", "r", "re" } };
58
+ static const TCGTargetOpDef r_0_r = { .args_ct_str = { "r", "0", "r" } };
59
static const TCGTargetOpDef r_0_re = { .args_ct_str = { "r", "0", "re" } };
60
static const TCGTargetOpDef r_0_ci = { .args_ct_str = { "r", "0", "ci" } };
61
static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } };
62
@@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
63
case INDEX_op_ctpop_i32:
64
case INDEX_op_ctpop_i64:
65
return &r_r;
66
+ case INDEX_op_extract2_i32:
67
+ case INDEX_op_extract2_i64:
68
+ return &r_0_r;
69
70
case INDEX_op_deposit_i32:
71
case INDEX_op_deposit_i64:
72
--
119
--
73
2.17.1
120
2.34.1
74
121
75
122
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
From: Qi Hu <huqi@loongson.cn>
2
3
AArch64 defines the TCG_TARGET_HAS_direct_jump. So the "else" block is
4
useless in the case of "INDEX_op_goto_tb" in function "tcg_out_op". Add
5
an assertion and delete these codes for clarity.
6
7
Suggested-by: WANG Xuerui <git@xen0n.name>
8
Signed-off-by: Qi Hu <huqi@loongson.cn>
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-Id: <20221017020826.990729-1-huqi@loongson.cn>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
13
---
4
tcg/aarch64/tcg-target.h | 4 ++--
14
tcg/aarch64/tcg-target.c.inc | 31 ++++++++++++++-----------------
5
tcg/aarch64/tcg-target.inc.c | 11 +++++++++++
15
1 file changed, 14 insertions(+), 17 deletions(-)
6
2 files changed, 13 insertions(+), 2 deletions(-)
7
16
8
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
17
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
9
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/aarch64/tcg-target.h
19
--- a/tcg/aarch64/tcg-target.c.inc
11
+++ b/tcg/aarch64/tcg-target.h
20
+++ b/tcg/aarch64/tcg-target.c.inc
12
@@ -XXX,XX +XXX,XX @@ typedef enum {
13
#define TCG_TARGET_HAS_deposit_i32 1
14
#define TCG_TARGET_HAS_extract_i32 1
15
#define TCG_TARGET_HAS_sextract_i32 1
16
-#define TCG_TARGET_HAS_extract2_i32 0
17
+#define TCG_TARGET_HAS_extract2_i32 1
18
#define TCG_TARGET_HAS_movcond_i32 1
19
#define TCG_TARGET_HAS_add2_i32 1
20
#define TCG_TARGET_HAS_sub2_i32 1
21
@@ -XXX,XX +XXX,XX @@ typedef enum {
22
#define TCG_TARGET_HAS_deposit_i64 1
23
#define TCG_TARGET_HAS_extract_i64 1
24
#define TCG_TARGET_HAS_sextract_i64 1
25
-#define TCG_TARGET_HAS_extract2_i64 0
26
+#define TCG_TARGET_HAS_extract2_i64 1
27
#define TCG_TARGET_HAS_movcond_i64 1
28
#define TCG_TARGET_HAS_add2_i64 1
29
#define TCG_TARGET_HAS_sub2_i64 1
30
diff --git a/tcg/aarch64/tcg-target.inc.c b/tcg/aarch64/tcg-target.inc.c
31
index XXXXXXX..XXXXXXX 100644
32
--- a/tcg/aarch64/tcg-target.inc.c
33
+++ b/tcg/aarch64/tcg-target.inc.c
34
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
21
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
35
tcg_out_sbfm(s, ext, a0, a1, a2, a2 + args[3] - 1);
36
break;
22
break;
37
23
38
+ case INDEX_op_extract2_i64:
24
case INDEX_op_goto_tb:
39
+ case INDEX_op_extract2_i32:
25
- if (s->tb_jmp_insn_offset != NULL) {
40
+ tcg_out_extr(s, ext, a0, a1, a2, args[3]);
26
- /* TCG_TARGET_HAS_direct_jump */
41
+ break;
27
- /* Ensure that ADRP+ADD are 8-byte aligned so that an atomic
42
+
28
- write can be used to patch the target address. */
43
case INDEX_op_add2_i32:
29
- if ((uintptr_t)s->code_ptr & 7) {
44
tcg_out_addsub2(s, TCG_TYPE_I32, a0, a1, REG0(2), REG0(3),
30
- tcg_out32(s, NOP);
45
(int32_t)args[4], args[5], const_args[4],
31
- }
46
@@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
32
- s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
47
= { .args_ct_str = { "r", "r", "rAL" } };
33
- /* actual branch destination will be patched by
48
static const TCGTargetOpDef dep
34
- tb_target_set_jmp_target later. */
49
= { .args_ct_str = { "r", "0", "rZ" } };
35
- tcg_out_insn(s, 3406, ADRP, TCG_REG_TMP, 0);
50
+ static const TCGTargetOpDef ext2
36
- tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_TMP, TCG_REG_TMP, 0);
51
+ = { .args_ct_str = { "r", "rZ", "rZ" } };
37
- } else {
52
static const TCGTargetOpDef movc
38
- /* !TCG_TARGET_HAS_direct_jump */
53
= { .args_ct_str = { "r", "r", "rA", "rZ", "rZ" } };
39
- tcg_debug_assert(s->tb_jmp_target_addr != NULL);
54
static const TCGTargetOpDef add2
40
- intptr_t offset = tcg_pcrel_diff(s, (s->tb_jmp_target_addr + a0)) >> 2;
55
@@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
41
- tcg_out_insn(s, 3305, LDR, offset, TCG_REG_TMP);
56
case INDEX_op_deposit_i64:
42
+ tcg_debug_assert(s->tb_jmp_insn_offset != NULL);
57
return &dep;
43
+ /*
58
44
+ * Ensure that ADRP+ADD are 8-byte aligned so that an atomic
59
+ case INDEX_op_extract2_i32:
45
+ * write can be used to patch the target address.
60
+ case INDEX_op_extract2_i64:
46
+ */
61
+ return &ext2;
47
+ if ((uintptr_t)s->code_ptr & 7) {
62
+
48
+ tcg_out32(s, NOP);
63
case INDEX_op_add2_i32:
49
}
64
case INDEX_op_add2_i64:
50
+ s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
65
case INDEX_op_sub2_i32:
51
+ /*
52
+ * actual branch destination will be patched by
53
+ * tb_target_set_jmp_target later
54
+ */
55
+ tcg_out_insn(s, 3406, ADRP, TCG_REG_TMP, 0);
56
+ tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_TMP, TCG_REG_TMP, 0);
57
tcg_out_insn(s, 3207, BR, TCG_REG_TMP);
58
set_jmp_reset_offset(s, a0);
59
break;
66
--
60
--
67
2.17.1
61
2.34.1
68
62
69
63
diff view generated by jsdifflib
New patch
1
From: Leandro Lupori <leandro.lupori@eldorado.org.br>
1
2
3
Profiling QEMU during Fedora 35 for PPC64 boot revealed that a
4
considerable amount of time was being spent in
5
check_for_breakpoints() (0.61% of total time on PPC64 and 2.19% on
6
amd64), even though it was just checking that its queue was empty
7
and returning, when no breakpoints were set. It turns out this
8
function is not inlined by the compiler and it's always called by
9
helper_lookup_tb_ptr(), one of the most called functions.
10
11
By leaving only the check for empty queue in
12
check_for_breakpoints() and moving the remaining code to
13
check_for_breakpoints_slow(), called only when the queue is not
14
empty, it's possible to avoid the call overhead. An improvement of
15
about 3% in total time was measured on POWER9.
16
17
Signed-off-by: Leandro Lupori <leandro.lupori@eldorado.org.br>
18
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
19
Message-Id: <20221025202424.195984-2-leandro.lupori@eldorado.org.br>
20
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
21
---
22
accel/tcg/cpu-exec.c | 15 +++++++++------
23
1 file changed, 9 insertions(+), 6 deletions(-)
24
25
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
26
index XXXXXXX..XXXXXXX 100644
27
--- a/accel/tcg/cpu-exec.c
28
+++ b/accel/tcg/cpu-exec.c
29
@@ -XXX,XX +XXX,XX @@ static void log_cpu_exec(target_ulong pc, CPUState *cpu,
30
}
31
}
32
33
-static bool check_for_breakpoints(CPUState *cpu, target_ulong pc,
34
- uint32_t *cflags)
35
+static bool check_for_breakpoints_slow(CPUState *cpu, target_ulong pc,
36
+ uint32_t *cflags)
37
{
38
CPUBreakpoint *bp;
39
bool match_page = false;
40
41
- if (likely(QTAILQ_EMPTY(&cpu->breakpoints))) {
42
- return false;
43
- }
44
-
45
/*
46
* Singlestep overrides breakpoints.
47
* This requirement is visible in the record-replay tests, where
48
@@ -XXX,XX +XXX,XX @@ static bool check_for_breakpoints(CPUState *cpu, target_ulong pc,
49
return false;
50
}
51
52
+static inline bool check_for_breakpoints(CPUState *cpu, target_ulong pc,
53
+ uint32_t *cflags)
54
+{
55
+ return unlikely(!QTAILQ_EMPTY(&cpu->breakpoints)) &&
56
+ check_for_breakpoints_slow(cpu, pc, cflags);
57
+}
58
+
59
/**
60
* helper_lookup_tb_ptr: quick check for next tb
61
* @env: current cpu state
62
--
63
2.34.1
diff view generated by jsdifflib
New patch
1
This differs from assert, in that with optimization enabled it
2
triggers at build-time. It differs from QEMU_BUILD_BUG_ON,
3
aka _Static_assert, in that it is sensitive to control flow
4
and is subject to dead-code elimination.
1
5
6
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
include/qemu/osdep.h | 8 ++++++++
11
1 file changed, 8 insertions(+)
12
13
diff --git a/include/qemu/osdep.h b/include/qemu/osdep.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/include/qemu/osdep.h
16
+++ b/include/qemu/osdep.h
17
@@ -XXX,XX +XXX,XX @@ void QEMU_ERROR("code path is reachable")
18
#define qemu_build_not_reached() g_assert_not_reached()
19
#endif
20
21
+/**
22
+ * qemu_build_assert()
23
+ *
24
+ * The compiler, during optimization, is expected to prove that the
25
+ * assertion is true.
26
+ */
27
+#define qemu_build_assert(test) while (!(test)) qemu_build_not_reached()
28
+
29
/*
30
* According to waitpid man page:
31
* WCOREDUMP
32
--
33
2.34.1
34
35
diff view generated by jsdifflib
New patch
1
Change from QEMU_BUILD_BUG_ON, which requires ifdefs to avoid
2
problematic code, to qemu_build_assert, which can use C ifs.
1
3
4
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
include/qemu/atomic.h | 16 ++++++++--------
9
1 file changed, 8 insertions(+), 8 deletions(-)
10
11
diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/include/qemu/atomic.h
14
+++ b/include/qemu/atomic.h
15
@@ -XXX,XX +XXX,XX @@
16
17
#define qatomic_read(ptr) \
18
({ \
19
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
20
+ qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
21
qatomic_read__nocheck(ptr); \
22
})
23
24
@@ -XXX,XX +XXX,XX @@
25
__atomic_store_n(ptr, i, __ATOMIC_RELAXED)
26
27
#define qatomic_set(ptr, i) do { \
28
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
29
+ qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
30
qatomic_set__nocheck(ptr, i); \
31
} while(0)
32
33
@@ -XXX,XX +XXX,XX @@
34
35
#define qatomic_rcu_read(ptr) \
36
({ \
37
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
38
+ qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
39
typeof_strip_qual(*ptr) _val; \
40
qatomic_rcu_read__nocheck(ptr, &_val); \
41
_val; \
42
})
43
44
#define qatomic_rcu_set(ptr, i) do { \
45
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
46
+ qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
47
__atomic_store_n(ptr, i, __ATOMIC_RELEASE); \
48
} while(0)
49
50
#define qatomic_load_acquire(ptr) \
51
({ \
52
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
53
+ qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
54
typeof_strip_qual(*ptr) _val; \
55
__atomic_load(ptr, &_val, __ATOMIC_ACQUIRE); \
56
_val; \
57
})
58
59
#define qatomic_store_release(ptr, i) do { \
60
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
61
+ qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
62
__atomic_store_n(ptr, i, __ATOMIC_RELEASE); \
63
} while(0)
64
65
@@ -XXX,XX +XXX,XX @@
66
})
67
68
#define qatomic_xchg(ptr, i) ({ \
69
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
70
+ qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
71
qatomic_xchg__nocheck(ptr, i); \
72
})
73
74
@@ -XXX,XX +XXX,XX @@
75
})
76
77
#define qatomic_cmpxchg(ptr, old, new) ({ \
78
- QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
79
+ qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \
80
qatomic_cmpxchg__nocheck(ptr, old, new); \
81
})
82
83
--
84
2.34.1
85
86
diff view generated by jsdifflib
New patch
1
Use qatomic_*, which expands to __atomic_* in preference
2
to the "legacy" __sync_* functions.
1
3
4
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
include/qemu/thread.h | 8 ++++----
8
1 file changed, 4 insertions(+), 4 deletions(-)
9
10
diff --git a/include/qemu/thread.h b/include/qemu/thread.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/include/qemu/thread.h
13
+++ b/include/qemu/thread.h
14
@@ -XXX,XX +XXX,XX @@ struct QemuSpin {
15
16
static inline void qemu_spin_init(QemuSpin *spin)
17
{
18
- __sync_lock_release(&spin->value);
19
+ qatomic_set(&spin->value, 0);
20
#ifdef CONFIG_TSAN
21
__tsan_mutex_create(spin, __tsan_mutex_not_static);
22
#endif
23
@@ -XXX,XX +XXX,XX @@ static inline void qemu_spin_lock(QemuSpin *spin)
24
#ifdef CONFIG_TSAN
25
__tsan_mutex_pre_lock(spin, 0);
26
#endif
27
- while (unlikely(__sync_lock_test_and_set(&spin->value, true))) {
28
+ while (unlikely(qatomic_xchg(&spin->value, 1))) {
29
while (qatomic_read(&spin->value)) {
30
cpu_relax();
31
}
32
@@ -XXX,XX +XXX,XX @@ static inline bool qemu_spin_trylock(QemuSpin *spin)
33
#ifdef CONFIG_TSAN
34
__tsan_mutex_pre_lock(spin, __tsan_mutex_try_lock);
35
#endif
36
- bool busy = __sync_lock_test_and_set(&spin->value, true);
37
+ bool busy = qatomic_xchg(&spin->value, true);
38
#ifdef CONFIG_TSAN
39
unsigned flags = __tsan_mutex_try_lock;
40
flags |= busy ? __tsan_mutex_try_lock_failed : 0;
41
@@ -XXX,XX +XXX,XX @@ static inline void qemu_spin_unlock(QemuSpin *spin)
42
#ifdef CONFIG_TSAN
43
__tsan_mutex_pre_unlock(spin, 0);
44
#endif
45
- __sync_lock_release(&spin->value);
46
+ qatomic_store_release(&spin->value, 0);
47
#ifdef CONFIG_TSAN
48
__tsan_mutex_post_unlock(spin, 0);
49
#endif
50
--
51
2.34.1
diff view generated by jsdifflib
1
Use a constant target data allocation size for all pages.
2
This will be necessary to reduce overhead of page tracking.
3
Since TARGET_PAGE_DATA_SIZE is now required, we can use this
4
to omit data tracking for targets that don't require it.
5
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
---
8
---
3
tcg/tcg-op.c | 34 ++++++++++++++++++++++++++++++----
9
include/exec/cpu-all.h | 9 ++++-----
4
1 file changed, 30 insertions(+), 4 deletions(-)
10
target/arm/cpu.h | 8 ++++++++
11
target/arm/internals.h | 4 ----
12
accel/tcg/translate-all.c | 8 ++++++--
13
target/arm/mte_helper.c | 3 +--
14
5 files changed, 19 insertions(+), 13 deletions(-)
5
15
6
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
16
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
7
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
8
--- a/tcg/tcg-op.c
18
--- a/include/exec/cpu-all.h
9
+++ b/tcg/tcg-op.c
19
+++ b/include/exec/cpu-all.h
10
@@ -XXX,XX +XXX,XX @@ void tcg_gen_deposit_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2,
20
@@ -XXX,XX +XXX,XX @@ void page_reset_target_data(target_ulong start, target_ulong end);
11
return;
21
int page_check_range(target_ulong start, target_ulong len, int flags);
22
23
/**
24
- * page_alloc_target_data(address, size)
25
+ * page_alloc_target_data(address)
26
* @address: guest virtual address
27
- * @size: size of data to allocate
28
*
29
- * Allocate @size bytes of out-of-band data to associate with the
30
- * guest page at @address. If the page is not mapped, NULL will
31
+ * Allocate TARGET_PAGE_DATA_SIZE bytes of out-of-band data to associate
32
+ * with the guest page at @address. If the page is not mapped, NULL will
33
* be returned. If there is existing data associated with @address,
34
* no new memory will be allocated.
35
*
36
* The memory will be freed when the guest page is deallocated,
37
* e.g. with the munmap system call.
38
*/
39
-void *page_alloc_target_data(target_ulong address, size_t size);
40
+void *page_alloc_target_data(target_ulong address);
41
42
/**
43
* page_get_target_data(address)
44
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
45
index XXXXXXX..XXXXXXX 100644
46
--- a/target/arm/cpu.h
47
+++ b/target/arm/cpu.h
48
@@ -XXX,XX +XXX,XX @@ extern const uint64_t pred_esz_masks[5];
49
#define PAGE_MTE PAGE_TARGET_2
50
#define PAGE_TARGET_STICKY PAGE_MTE
51
52
+/* We associate one allocation tag per 16 bytes, the minimum. */
53
+#define LOG2_TAG_GRANULE 4
54
+#define TAG_GRANULE (1 << LOG2_TAG_GRANULE)
55
+
56
+#ifdef CONFIG_USER_ONLY
57
+#define TARGET_PAGE_DATA_SIZE (TARGET_PAGE_SIZE >> (LOG2_TAG_GRANULE + 1))
58
+#endif
59
+
60
#ifdef TARGET_TAGGED_ADDRESSES
61
/**
62
* cpu_untagged_addr:
63
diff --git a/target/arm/internals.h b/target/arm/internals.h
64
index XXXXXXX..XXXXXXX 100644
65
--- a/target/arm/internals.h
66
+++ b/target/arm/internals.h
67
@@ -XXX,XX +XXX,XX @@ void arm_log_exception(CPUState *cs);
68
*/
69
#define GMID_EL1_BS 6
70
71
-/* We associate one allocation tag per 16 bytes, the minimum. */
72
-#define LOG2_TAG_GRANULE 4
73
-#define TAG_GRANULE (1 << LOG2_TAG_GRANULE)
74
-
75
/*
76
* SVE predicates are 1/8 the size of SVE vectors, and cannot use
77
* the same simd_desc() encoding due to restrictions on size.
78
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
79
index XXXXXXX..XXXXXXX 100644
80
--- a/accel/tcg/translate-all.c
81
+++ b/accel/tcg/translate-all.c
82
@@ -XXX,XX +XXX,XX @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
83
84
void page_reset_target_data(target_ulong start, target_ulong end)
85
{
86
+#ifdef TARGET_PAGE_DATA_SIZE
87
target_ulong addr, len;
88
89
/*
90
@@ -XXX,XX +XXX,XX @@ void page_reset_target_data(target_ulong start, target_ulong end)
91
g_free(p->target_data);
92
p->target_data = NULL;
12
}
93
}
13
94
+#endif
14
- mask = (1u << len) - 1;
15
t1 = tcg_temp_new_i32();
16
17
+ if (TCG_TARGET_HAS_extract2_i32) {
18
+ if (ofs + len == 32) {
19
+ tcg_gen_shli_i32(t1, arg1, len);
20
+ tcg_gen_extract2_i32(ret, t1, arg2, len);
21
+ goto done;
22
+ }
23
+ if (ofs == 0) {
24
+ tcg_gen_extract2_i32(ret, arg1, arg2, len);
25
+ tcg_gen_rotli_i32(ret, ret, len);
26
+ goto done;
27
+ }
28
+ }
29
+
30
+ mask = (1u << len) - 1;
31
if (ofs + len < 32) {
32
tcg_gen_andi_i32(t1, arg2, mask);
33
tcg_gen_shli_i32(t1, t1, ofs);
34
@@ -XXX,XX +XXX,XX @@ void tcg_gen_deposit_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2,
35
}
36
tcg_gen_andi_i32(ret, arg1, ~(mask << ofs));
37
tcg_gen_or_i32(ret, ret, t1);
38
-
39
+ done:
40
tcg_temp_free_i32(t1);
41
}
95
}
42
96
43
@@ -XXX,XX +XXX,XX @@ void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2,
97
+#ifdef TARGET_PAGE_DATA_SIZE
98
void *page_get_target_data(target_ulong address)
99
{
100
PageDesc *p = page_find(address >> TARGET_PAGE_BITS);
101
return p ? p->target_data : NULL;
102
}
103
104
-void *page_alloc_target_data(target_ulong address, size_t size)
105
+void *page_alloc_target_data(target_ulong address)
106
{
107
PageDesc *p = page_find(address >> TARGET_PAGE_BITS);
108
void *ret = NULL;
109
@@ -XXX,XX +XXX,XX @@ void *page_alloc_target_data(target_ulong address, size_t size)
110
if (p->flags & PAGE_VALID) {
111
ret = p->target_data;
112
if (!ret) {
113
- p->target_data = ret = g_malloc0(size);
114
+ p->target_data = ret = g_malloc0(TARGET_PAGE_DATA_SIZE);
44
}
115
}
45
}
116
}
46
117
return ret;
47
- mask = (1ull << len) - 1;
118
}
48
t1 = tcg_temp_new_i64();
119
+#endif /* TARGET_PAGE_DATA_SIZE */
49
120
50
+ if (TCG_TARGET_HAS_extract2_i64) {
121
int page_check_range(target_ulong start, target_ulong len, int flags)
51
+ if (ofs + len == 64) {
122
{
52
+ tcg_gen_shli_i64(t1, arg1, len);
123
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
53
+ tcg_gen_extract2_i64(ret, t1, arg2, len);
124
index XXXXXXX..XXXXXXX 100644
54
+ goto done;
125
--- a/target/arm/mte_helper.c
55
+ }
126
+++ b/target/arm/mte_helper.c
56
+ if (ofs == 0) {
127
@@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
57
+ tcg_gen_extract2_i64(ret, arg1, arg2, len);
128
58
+ tcg_gen_rotli_i64(ret, ret, len);
129
tags = page_get_target_data(clean_ptr);
59
+ goto done;
130
if (tags == NULL) {
60
+ }
131
- size_t alloc_size = TARGET_PAGE_SIZE >> (LOG2_TAG_GRANULE + 1);
61
+ }
132
- tags = page_alloc_target_data(clean_ptr, alloc_size);
62
+
133
+ tags = page_alloc_target_data(clean_ptr);
63
+ mask = (1ull << len) - 1;
134
assert(tags != NULL);
64
if (ofs + len < 64) {
65
tcg_gen_andi_i64(t1, arg2, mask);
66
tcg_gen_shli_i64(t1, t1, ofs);
67
@@ -XXX,XX +XXX,XX @@ void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2,
68
}
135
}
69
tcg_gen_andi_i64(ret, arg1, ~(mask << ofs));
70
tcg_gen_or_i64(ret, ret, t1);
71
-
72
+ done:
73
tcg_temp_free_i64(t1);
74
}
75
136
76
--
137
--
77
2.17.1
138
2.34.1
78
139
79
140
diff view generated by jsdifflib
1
In order to handle TB's that translate to too much code, we
1
These items printf, and could be replaced with proper
2
need to place the control of the length of the translation
2
tracepoints if we really cared.
3
in the hands of the code gen master loop.
4
3
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
6
---
9
include/exec/exec-all.h | 4 ++--
7
accel/tcg/translate-all.c | 109 --------------------------------------
10
include/exec/translator.h | 3 ++-
8
1 file changed, 109 deletions(-)
11
accel/tcg/translate-all.c | 15 +++++++++++++--
12
accel/tcg/translator.c | 15 ++-------------
13
target/alpha/translate.c | 4 ++--
14
target/arm/translate.c | 4 ++--
15
target/cris/translate.c | 10 +---------
16
target/hppa/translate.c | 5 ++---
17
target/i386/translate.c | 4 ++--
18
target/lm32/translate.c | 10 +---------
19
target/m68k/translate.c | 4 ++--
20
target/microblaze/translate.c | 10 +---------
21
target/mips/translate.c | 4 ++--
22
target/moxie/translate.c | 11 ++---------
23
target/nios2/translate.c | 14 ++------------
24
target/openrisc/translate.c | 4 ++--
25
target/ppc/translate.c | 4 ++--
26
target/riscv/translate.c | 4 ++--
27
target/s390x/translate.c | 4 ++--
28
target/sh4/translate.c | 4 ++--
29
target/sparc/translate.c | 4 ++--
30
target/tilegx/translate.c | 12 +-----------
31
target/tricore/translate.c | 16 ++--------------
32
target/unicore32/translate.c | 10 +---------
33
target/xtensa/translate.c | 4 ++--
34
25 files changed, 56 insertions(+), 127 deletions(-)
35
9
36
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
37
index XXXXXXX..XXXXXXX 100644
38
--- a/include/exec/exec-all.h
39
+++ b/include/exec/exec-all.h
40
@@ -XXX,XX +XXX,XX @@ typedef ram_addr_t tb_page_addr_t;
41
42
#include "qemu/log.h"
43
44
-void gen_intermediate_code(CPUState *cpu, struct TranslationBlock *tb);
45
-void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
46
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns);
47
+void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb,
48
target_ulong *data);
49
50
void cpu_gen_init(void);
51
diff --git a/include/exec/translator.h b/include/exec/translator.h
52
index XXXXXXX..XXXXXXX 100644
53
--- a/include/exec/translator.h
54
+++ b/include/exec/translator.h
55
@@ -XXX,XX +XXX,XX @@ typedef struct TranslatorOps {
56
* @db: Disassembly context.
57
* @cpu: Target vCPU.
58
* @tb: Translation block.
59
+ * @max_insns: Maximum number of insns to translate.
60
*
61
* Generic translator loop.
62
*
63
@@ -XXX,XX +XXX,XX @@ typedef struct TranslatorOps {
64
* - When too many instructions have been translated.
65
*/
66
void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
67
- CPUState *cpu, TranslationBlock *tb);
68
+ CPUState *cpu, TranslationBlock *tb, int max_insns);
69
70
void translator_loop_temp_check(DisasContextBase *db);
71
72
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
10
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
73
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
74
--- a/accel/tcg/translate-all.c
12
--- a/accel/tcg/translate-all.c
75
+++ b/accel/tcg/translate-all.c
13
+++ b/accel/tcg/translate-all.c
76
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
14
@@ -XXX,XX +XXX,XX @@
77
tb_page_addr_t phys_pc, phys_page2;
15
#include "tb-context.h"
78
target_ulong virt_page2;
16
#include "internal.h"
79
tcg_insn_unit *gen_code_buf;
17
80
- int gen_code_size, search_size;
18
-/* #define DEBUG_TB_INVALIDATE */
81
+ int gen_code_size, search_size, max_insns;
19
-/* #define DEBUG_TB_FLUSH */
82
#ifdef CONFIG_PROFILER
20
/* make various TB consistency checks */
83
TCGProfile *prof = &tcg_ctx->prof;
21
-/* #define DEBUG_TB_CHECK */
84
int64_t ti;
22
-
85
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
23
-#ifdef DEBUG_TB_INVALIDATE
86
cflags &= ~CF_CLUSTER_MASK;
24
-#define DEBUG_TB_INVALIDATE_GATE 1
87
cflags |= cpu->cluster_index << CF_CLUSTER_SHIFT;
25
-#else
88
26
-#define DEBUG_TB_INVALIDATE_GATE 0
89
+ max_insns = cflags & CF_COUNT_MASK;
27
-#endif
90
+ if (max_insns == 0) {
28
-
91
+ max_insns = CF_COUNT_MASK;
29
-#ifdef DEBUG_TB_FLUSH
92
+ }
30
-#define DEBUG_TB_FLUSH_GATE 1
93
+ if (max_insns > TCG_MAX_INSNS) {
31
-#else
94
+ max_insns = TCG_MAX_INSNS;
32
-#define DEBUG_TB_FLUSH_GATE 0
95
+ }
33
-#endif
96
+ if (cpu->singlestep_enabled || singlestep) {
34
-
97
+ max_insns = 1;
35
-#if !defined(CONFIG_USER_ONLY)
98
+ }
36
-/* TB consistency checks only implemented for usermode emulation. */
99
+
37
-#undef DEBUG_TB_CHECK
100
buffer_overflow:
38
-#endif
101
tb = tb_alloc(pc);
39
-
102
if (unlikely(!tb)) {
40
-#ifdef DEBUG_TB_CHECK
103
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
41
-#define DEBUG_TB_CHECK_GATE 1
104
tcg_func_start(tcg_ctx);
42
-#else
105
43
-#define DEBUG_TB_CHECK_GATE 0
106
tcg_ctx->cpu = ENV_GET_CPU(env);
44
-#endif
107
- gen_intermediate_code(cpu, tb);
45
108
+ gen_intermediate_code(cpu, tb, max_insns);
46
/* Access to the various translations structures need to be serialised via locks
109
tcg_ctx->cpu = NULL;
47
* for consistency.
110
48
@@ -XXX,XX +XXX,XX @@ static void page_flush_tb(void)
111
trace_translate_block(tb, tb->pc, tb->tc.ptr);
49
}
112
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
113
index XXXXXXX..XXXXXXX 100644
114
--- a/accel/tcg/translator.c
115
+++ b/accel/tcg/translator.c
116
@@ -XXX,XX +XXX,XX @@ void translator_loop_temp_check(DisasContextBase *db)
117
}
50
}
118
51
119
void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
52
-static gboolean tb_host_size_iter(gpointer key, gpointer value, gpointer data)
120
- CPUState *cpu, TranslationBlock *tb)
53
-{
121
+ CPUState *cpu, TranslationBlock *tb, int max_insns)
54
- const TranslationBlock *tb = value;
55
- size_t *size = data;
56
-
57
- *size += tb->tc.size;
58
- return false;
59
-}
60
-
61
/* flush all the translation blocks */
62
static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
122
{
63
{
123
int bp_insn = 0;
64
@@ -XXX,XX +XXX,XX @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
124
65
}
125
@@ -XXX,XX +XXX,XX @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
66
did_flush = true;
126
db->pc_next = db->pc_first;
67
127
db->is_jmp = DISAS_NEXT;
68
- if (DEBUG_TB_FLUSH_GATE) {
128
db->num_insns = 0;
69
- size_t nb_tbs = tcg_nb_tbs();
129
+ db->max_insns = max_insns;
70
- size_t host_size = 0;
130
db->singlestep_enabled = cpu->singlestep_enabled;
71
-
131
72
- tcg_tb_foreach(tb_host_size_iter, &host_size);
132
- /* Instruction counting */
73
- printf("qemu: flush code_size=%zu nb_tbs=%zu avg_tb_size=%zu\n",
133
- db->max_insns = tb_cflags(db->tb) & CF_COUNT_MASK;
74
- tcg_code_size(), nb_tbs, nb_tbs > 0 ? host_size / nb_tbs : 0);
134
- if (db->max_insns == 0) {
135
- db->max_insns = CF_COUNT_MASK;
136
- }
137
- if (db->max_insns > TCG_MAX_INSNS) {
138
- db->max_insns = TCG_MAX_INSNS;
139
- }
140
- if (db->singlestep_enabled || singlestep) {
141
- db->max_insns = 1;
142
- }
75
- }
143
-
76
-
144
ops->init_disas_context(db, cpu);
77
CPU_FOREACH(cpu) {
145
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
78
tcg_flush_jmp_cache(cpu);
146
79
}
147
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
80
@@ -XXX,XX +XXX,XX @@ void tb_flush(CPUState *cpu)
148
index XXXXXXX..XXXXXXX 100644
81
}
149
--- a/target/alpha/translate.c
150
+++ b/target/alpha/translate.c
151
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps alpha_tr_ops = {
152
.disas_log = alpha_tr_disas_log,
153
};
154
155
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
156
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
157
{
158
DisasContext dc;
159
- translator_loop(&alpha_tr_ops, &dc.base, cpu, tb);
160
+ translator_loop(&alpha_tr_ops, &dc.base, cpu, tb, max_insns);
161
}
82
}
162
83
163
void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb,
84
-/*
164
diff --git a/target/arm/translate.c b/target/arm/translate.c
85
- * Formerly ifdef DEBUG_TB_CHECK. These debug functions are user-mode-only,
165
index XXXXXXX..XXXXXXX 100644
86
- * so in order to prevent bit rot we compile them unconditionally in user-mode,
166
--- a/target/arm/translate.c
87
- * and let the optimizer get rid of them by wrapping their user-only callers
167
+++ b/target/arm/translate.c
88
- * with if (DEBUG_TB_CHECK_GATE).
168
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps thumb_translator_ops = {
89
- */
169
};
90
-#ifdef CONFIG_USER_ONLY
170
91
-
171
/* generate intermediate code for basic block 'tb'. */
92
-static void do_tb_invalidate_check(void *p, uint32_t hash, void *userp)
172
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
93
-{
173
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
94
- TranslationBlock *tb = p;
174
{
95
- target_ulong addr = *(target_ulong *)userp;
175
DisasContext dc;
96
-
176
const TranslatorOps *ops = &arm_translator_ops;
97
- if (!(addr + TARGET_PAGE_SIZE <= tb_pc(tb) ||
177
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
98
- addr >= tb_pc(tb) + tb->size)) {
99
- printf("ERROR invalidate: address=" TARGET_FMT_lx
100
- " PC=%08lx size=%04x\n", addr, (long)tb_pc(tb), tb->size);
101
- }
102
-}
103
-
104
-/* verify that all the pages have correct rights for code
105
- *
106
- * Called with mmap_lock held.
107
- */
108
-static void tb_invalidate_check(target_ulong address)
109
-{
110
- address &= TARGET_PAGE_MASK;
111
- qht_iter(&tb_ctx.htable, do_tb_invalidate_check, &address);
112
-}
113
-
114
-static void do_tb_page_check(void *p, uint32_t hash, void *userp)
115
-{
116
- TranslationBlock *tb = p;
117
- int flags1, flags2;
118
-
119
- flags1 = page_get_flags(tb_pc(tb));
120
- flags2 = page_get_flags(tb_pc(tb) + tb->size - 1);
121
- if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
122
- printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
123
- (long)tb_pc(tb), tb->size, flags1, flags2);
124
- }
125
-}
126
-
127
-/* verify that all the pages have correct rights for code */
128
-static void tb_page_check(void)
129
-{
130
- qht_iter(&tb_ctx.htable, do_tb_page_check, NULL);
131
-}
132
-
133
-#endif /* CONFIG_USER_ONLY */
134
-
135
/*
136
* user-mode: call with mmap_lock held
137
* !user-mode: call with @pd->lock held
138
@@ -XXX,XX +XXX,XX @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
139
page_unlock(p2);
178
}
140
}
179
#endif
141
page_unlock(p);
180
142
-
181
- translator_loop(ops, &dc.base, cpu, tb);
143
-#ifdef CONFIG_USER_ONLY
182
+ translator_loop(ops, &dc.base, cpu, tb, max_insns);
144
- if (DEBUG_TB_CHECK_GATE) {
145
- tb_page_check();
146
- }
147
-#endif
148
return tb;
183
}
149
}
184
150
185
void arm_cpu_dump_state(CPUState *cs, FILE *f, int flags)
151
@@ -XXX,XX +XXX,XX @@ void page_protect(tb_page_addr_t page_addr)
186
diff --git a/target/cris/translate.c b/target/cris/translate.c
187
index XXXXXXX..XXXXXXX 100644
188
--- a/target/cris/translate.c
189
+++ b/target/cris/translate.c
190
@@ -XXX,XX +XXX,XX @@ static unsigned int crisv32_decoder(CPUCRISState *env, DisasContext *dc)
191
*/
192
193
/* generate intermediate code for basic block 'tb'. */
194
-void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
195
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
196
{
197
CPUCRISState *env = cs->env_ptr;
198
uint32_t pc_start;
199
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
200
uint32_t page_start;
201
target_ulong npc;
202
int num_insns;
203
- int max_insns;
204
205
if (env->pregs[PR_VR] == 32) {
206
dc->decoder = crisv32_decoder;
207
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
208
209
page_start = pc_start & TARGET_PAGE_MASK;
210
num_insns = 0;
211
- max_insns = tb_cflags(tb) & CF_COUNT_MASK;
212
- if (max_insns == 0) {
213
- max_insns = CF_COUNT_MASK;
214
- }
215
- if (max_insns > TCG_MAX_INSNS) {
216
- max_insns = TCG_MAX_INSNS;
217
- }
218
219
gen_tb_start(tb);
220
do {
221
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
222
index XXXXXXX..XXXXXXX 100644
223
--- a/target/hppa/translate.c
224
+++ b/target/hppa/translate.c
225
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hppa_tr_ops = {
226
.disas_log = hppa_tr_disas_log,
227
};
228
229
-void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
230
-
231
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
232
{
233
DisasContext ctx;
234
- translator_loop(&hppa_tr_ops, &ctx.base, cs, tb);
235
+ translator_loop(&hppa_tr_ops, &ctx.base, cs, tb, max_insns);
236
}
237
238
void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
239
diff --git a/target/i386/translate.c b/target/i386/translate.c
240
index XXXXXXX..XXXXXXX 100644
241
--- a/target/i386/translate.c
242
+++ b/target/i386/translate.c
243
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps i386_tr_ops = {
244
};
245
246
/* generate intermediate code for basic block 'tb'. */
247
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
248
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
249
{
250
DisasContext dc;
251
252
- translator_loop(&i386_tr_ops, &dc.base, cpu, tb);
253
+ translator_loop(&i386_tr_ops, &dc.base, cpu, tb, max_insns);
254
}
255
256
void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb,
257
diff --git a/target/lm32/translate.c b/target/lm32/translate.c
258
index XXXXXXX..XXXXXXX 100644
259
--- a/target/lm32/translate.c
260
+++ b/target/lm32/translate.c
261
@@ -XXX,XX +XXX,XX @@ static inline void decode(DisasContext *dc, uint32_t ir)
262
}
263
264
/* generate intermediate code for basic block 'tb'. */
265
-void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
266
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
267
{
268
CPULM32State *env = cs->env_ptr;
269
LM32CPU *cpu = lm32_env_get_cpu(env);
270
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
271
uint32_t pc_start;
272
uint32_t page_start;
273
int num_insns;
274
- int max_insns;
275
276
pc_start = tb->pc;
277
dc->features = cpu->features;
278
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
279
280
page_start = pc_start & TARGET_PAGE_MASK;
281
num_insns = 0;
282
- max_insns = tb_cflags(tb) & CF_COUNT_MASK;
283
- if (max_insns == 0) {
284
- max_insns = CF_COUNT_MASK;
285
- }
286
- if (max_insns > TCG_MAX_INSNS) {
287
- max_insns = TCG_MAX_INSNS;
288
- }
289
290
gen_tb_start(tb);
291
do {
292
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
293
index XXXXXXX..XXXXXXX 100644
294
--- a/target/m68k/translate.c
295
+++ b/target/m68k/translate.c
296
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps m68k_tr_ops = {
297
.disas_log = m68k_tr_disas_log,
298
};
299
300
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
301
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
302
{
303
DisasContext dc;
304
- translator_loop(&m68k_tr_ops, &dc.base, cpu, tb);
305
+ translator_loop(&m68k_tr_ops, &dc.base, cpu, tb, max_insns);
306
}
307
308
static double floatx80_to_double(CPUM68KState *env, uint16_t high, uint64_t low)
309
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
310
index XXXXXXX..XXXXXXX 100644
311
--- a/target/microblaze/translate.c
312
+++ b/target/microblaze/translate.c
313
@@ -XXX,XX +XXX,XX @@ static inline void decode(DisasContext *dc, uint32_t ir)
314
}
315
316
/* generate intermediate code for basic block 'tb'. */
317
-void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
318
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
319
{
320
CPUMBState *env = cs->env_ptr;
321
MicroBlazeCPU *cpu = mb_env_get_cpu(env);
322
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
323
uint32_t page_start, org_flags;
324
uint32_t npc;
325
int num_insns;
326
- int max_insns;
327
328
pc_start = tb->pc;
329
dc->cpu = cpu;
330
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
331
332
page_start = pc_start & TARGET_PAGE_MASK;
333
num_insns = 0;
334
- max_insns = tb_cflags(tb) & CF_COUNT_MASK;
335
- if (max_insns == 0) {
336
- max_insns = CF_COUNT_MASK;
337
- }
338
- if (max_insns > TCG_MAX_INSNS) {
339
- max_insns = TCG_MAX_INSNS;
340
- }
341
342
gen_tb_start(tb);
343
do
344
diff --git a/target/mips/translate.c b/target/mips/translate.c
345
index XXXXXXX..XXXXXXX 100644
346
--- a/target/mips/translate.c
347
+++ b/target/mips/translate.c
348
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mips_tr_ops = {
349
.disas_log = mips_tr_disas_log,
350
};
351
352
-void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
353
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
354
{
355
DisasContext ctx;
356
357
- translator_loop(&mips_tr_ops, &ctx.base, cs, tb);
358
+ translator_loop(&mips_tr_ops, &ctx.base, cs, tb, max_insns);
359
}
360
361
static void fpu_dump_state(CPUMIPSState *env, FILE *f, int flags)
362
diff --git a/target/moxie/translate.c b/target/moxie/translate.c
363
index XXXXXXX..XXXXXXX 100644
364
--- a/target/moxie/translate.c
365
+++ b/target/moxie/translate.c
366
@@ -XXX,XX +XXX,XX @@ static int decode_opc(MoxieCPU *cpu, DisasContext *ctx)
367
}
368
369
/* generate intermediate code for basic block 'tb'. */
370
-void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
371
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
372
{
373
CPUMoxieState *env = cs->env_ptr;
374
MoxieCPU *cpu = moxie_env_get_cpu(env);
375
DisasContext ctx;
376
target_ulong pc_start;
377
- int num_insns, max_insns;
378
+ int num_insns;
379
380
pc_start = tb->pc;
381
ctx.pc = pc_start;
382
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
383
ctx.singlestep_enabled = 0;
384
ctx.bstate = BS_NONE;
385
num_insns = 0;
386
- max_insns = tb_cflags(tb) & CF_COUNT_MASK;
387
- if (max_insns == 0) {
388
- max_insns = CF_COUNT_MASK;
389
- }
390
- if (max_insns > TCG_MAX_INSNS) {
391
- max_insns = TCG_MAX_INSNS;
392
- }
393
394
gen_tb_start(tb);
395
do {
396
diff --git a/target/nios2/translate.c b/target/nios2/translate.c
397
index XXXXXXX..XXXXXXX 100644
398
--- a/target/nios2/translate.c
399
+++ b/target/nios2/translate.c
400
@@ -XXX,XX +XXX,XX @@ static void gen_exception(DisasContext *dc, uint32_t excp)
401
}
402
403
/* generate intermediate code for basic block 'tb'. */
404
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
405
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
406
{
407
CPUNios2State *env = cs->env_ptr;
408
DisasContext dc1, *dc = &dc1;
409
int num_insns;
410
- int max_insns;
411
412
/* Initialize DC */
413
dc->cpu_env = cpu_env;
414
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
415
416
/* Set up instruction counts */
417
num_insns = 0;
418
- if (cs->singlestep_enabled || singlestep) {
419
- max_insns = 1;
420
- } else {
421
+ if (max_insns > 1) {
422
int page_insns = (TARGET_PAGE_SIZE - (tb->pc & TARGET_PAGE_MASK)) / 4;
423
- max_insns = tb_cflags(tb) & CF_COUNT_MASK;
424
- if (max_insns == 0) {
425
- max_insns = CF_COUNT_MASK;
426
- }
427
if (max_insns > page_insns) {
428
max_insns = page_insns;
429
}
152
}
430
- if (max_insns > TCG_MAX_INSNS) {
153
mprotect(g2h_untagged(page_addr), qemu_host_page_size,
431
- max_insns = TCG_MAX_INSNS;
154
(prot & PAGE_BITS) & ~PAGE_WRITE);
155
- if (DEBUG_TB_INVALIDATE_GATE) {
156
- printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
432
- }
157
- }
433
}
158
}
434
435
gen_tb_start(tb);
436
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
437
index XXXXXXX..XXXXXXX 100644
438
--- a/target/openrisc/translate.c
439
+++ b/target/openrisc/translate.c
440
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps openrisc_tr_ops = {
441
.disas_log = openrisc_tr_disas_log,
442
};
443
444
-void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
445
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
446
{
447
DisasContext ctx;
448
449
- translator_loop(&openrisc_tr_ops, &ctx.base, cs, tb);
450
+ translator_loop(&openrisc_tr_ops, &ctx.base, cs, tb, max_insns);
451
}
159
}
452
160
453
void openrisc_cpu_dump_state(CPUState *cs, FILE *f, int flags)
161
@@ -XXX,XX +XXX,XX @@ int page_unprotect(target_ulong address, uintptr_t pc)
454
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
162
/* and since the content will be modified, we must invalidate
455
index XXXXXXX..XXXXXXX 100644
163
the corresponding translated code. */
456
--- a/target/ppc/translate.c
164
current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
457
+++ b/target/ppc/translate.c
165
-#ifdef CONFIG_USER_ONLY
458
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps ppc_tr_ops = {
166
- if (DEBUG_TB_CHECK_GATE) {
459
.disas_log = ppc_tr_disas_log,
167
- tb_invalidate_check(addr);
460
};
168
- }
461
169
-#endif
462
-void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
170
}
463
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
171
mprotect((void *)g2h_untagged(host_start), qemu_host_page_size,
464
{
172
prot & PAGE_BITS);
465
DisasContext ctx;
466
467
- translator_loop(&ppc_tr_ops, &ctx.base, cs, tb);
468
+ translator_loop(&ppc_tr_ops, &ctx.base, cs, tb, max_insns);
469
}
470
471
void restore_state_to_opc(CPUPPCState *env, TranslationBlock *tb,
472
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
473
index XXXXXXX..XXXXXXX 100644
474
--- a/target/riscv/translate.c
475
+++ b/target/riscv/translate.c
476
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps riscv_tr_ops = {
477
.disas_log = riscv_tr_disas_log,
478
};
479
480
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
481
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
482
{
483
DisasContext ctx;
484
485
- translator_loop(&riscv_tr_ops, &ctx.base, cs, tb);
486
+ translator_loop(&riscv_tr_ops, &ctx.base, cs, tb, max_insns);
487
}
488
489
void riscv_translate_init(void)
490
diff --git a/target/s390x/translate.c b/target/s390x/translate.c
491
index XXXXXXX..XXXXXXX 100644
492
--- a/target/s390x/translate.c
493
+++ b/target/s390x/translate.c
494
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps s390x_tr_ops = {
495
.disas_log = s390x_tr_disas_log,
496
};
497
498
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
499
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
500
{
501
DisasContext dc;
502
503
- translator_loop(&s390x_tr_ops, &dc.base, cs, tb);
504
+ translator_loop(&s390x_tr_ops, &dc.base, cs, tb, max_insns);
505
}
506
507
void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
508
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
509
index XXXXXXX..XXXXXXX 100644
510
--- a/target/sh4/translate.c
511
+++ b/target/sh4/translate.c
512
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sh4_tr_ops = {
513
.disas_log = sh4_tr_disas_log,
514
};
515
516
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
517
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
518
{
519
DisasContext ctx;
520
521
- translator_loop(&sh4_tr_ops, &ctx.base, cs, tb);
522
+ translator_loop(&sh4_tr_ops, &ctx.base, cs, tb, max_insns);
523
}
524
525
void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb,
526
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
527
index XXXXXXX..XXXXXXX 100644
528
--- a/target/sparc/translate.c
529
+++ b/target/sparc/translate.c
530
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sparc_tr_ops = {
531
.disas_log = sparc_tr_disas_log,
532
};
533
534
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
535
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
536
{
537
DisasContext dc = {};
538
539
- translator_loop(&sparc_tr_ops, &dc.base, cs, tb);
540
+ translator_loop(&sparc_tr_ops, &dc.base, cs, tb, max_insns);
541
}
542
543
void sparc_tcg_init(void)
544
diff --git a/target/tilegx/translate.c b/target/tilegx/translate.c
545
index XXXXXXX..XXXXXXX 100644
546
--- a/target/tilegx/translate.c
547
+++ b/target/tilegx/translate.c
548
@@ -XXX,XX +XXX,XX @@ static void translate_one_bundle(DisasContext *dc, uint64_t bundle)
549
}
550
}
551
552
-void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
553
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
554
{
555
CPUTLGState *env = cs->env_ptr;
556
DisasContext ctx;
557
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
558
uint64_t pc_start = tb->pc;
559
uint64_t page_start = pc_start & TARGET_PAGE_MASK;
560
int num_insns = 0;
561
- int max_insns = tb_cflags(tb) & CF_COUNT_MASK;
562
563
dc->pc = pc_start;
564
dc->mmuidx = 0;
565
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
566
qemu_log_lock();
567
qemu_log("IN: %s\n", lookup_symbol(pc_start));
568
}
569
- if (!max_insns) {
570
- max_insns = CF_COUNT_MASK;
571
- }
572
- if (cs->singlestep_enabled || singlestep) {
573
- max_insns = 1;
574
- }
575
- if (max_insns > TCG_MAX_INSNS) {
576
- max_insns = TCG_MAX_INSNS;
577
- }
578
gen_tb_start(tb);
579
580
while (1) {
581
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
582
index XXXXXXX..XXXXXXX 100644
583
--- a/target/tricore/translate.c
584
+++ b/target/tricore/translate.c
585
@@ -XXX,XX +XXX,XX @@ static void decode_opc(CPUTriCoreState *env, DisasContext *ctx, int *is_branch)
586
}
587
}
588
589
-void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
590
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
591
{
592
CPUTriCoreState *env = cs->env_ptr;
593
DisasContext ctx;
594
target_ulong pc_start;
595
- int num_insns, max_insns;
596
-
597
- num_insns = 0;
598
- max_insns = tb_cflags(tb) & CF_COUNT_MASK;
599
- if (max_insns == 0) {
600
- max_insns = CF_COUNT_MASK;
601
- }
602
- if (singlestep) {
603
- max_insns = 1;
604
- }
605
- if (max_insns > TCG_MAX_INSNS) {
606
- max_insns = TCG_MAX_INSNS;
607
- }
608
+ int num_insns = 0;
609
610
pc_start = tb->pc;
611
ctx.pc = pc_start;
612
diff --git a/target/unicore32/translate.c b/target/unicore32/translate.c
613
index XXXXXXX..XXXXXXX 100644
614
--- a/target/unicore32/translate.c
615
+++ b/target/unicore32/translate.c
616
@@ -XXX,XX +XXX,XX @@ static void disas_uc32_insn(CPUUniCore32State *env, DisasContext *s)
617
}
618
619
/* generate intermediate code for basic block 'tb'. */
620
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
621
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
622
{
623
CPUUniCore32State *env = cs->env_ptr;
624
DisasContext dc1, *dc = &dc1;
625
target_ulong pc_start;
626
uint32_t page_start;
627
int num_insns;
628
- int max_insns;
629
630
/* generate intermediate code */
631
num_temps = 0;
632
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
633
cpu_F1d = tcg_temp_new_i64();
634
page_start = pc_start & TARGET_PAGE_MASK;
635
num_insns = 0;
636
- max_insns = tb_cflags(tb) & CF_COUNT_MASK;
637
- if (max_insns == 0) {
638
- max_insns = CF_COUNT_MASK;
639
- }
640
- if (max_insns > TCG_MAX_INSNS) {
641
- max_insns = TCG_MAX_INSNS;
642
- }
643
644
#ifndef CONFIG_USER_ONLY
645
if ((env->uncached_asr & ASR_M) == ASR_MODE_USER) {
646
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
647
index XXXXXXX..XXXXXXX 100644
648
--- a/target/xtensa/translate.c
649
+++ b/target/xtensa/translate.c
650
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps xtensa_translator_ops = {
651
.disas_log = xtensa_tr_disas_log,
652
};
653
654
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
655
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
656
{
657
DisasContext dc = {};
658
- translator_loop(&xtensa_translator_ops, &dc.base, cpu, tb);
659
+ translator_loop(&xtensa_translator_ops, &dc.base, cpu, tb, max_insns);
660
}
661
662
void xtensa_cpu_dump_state(CPUState *cs, FILE *f, int flags)
663
--
173
--
664
2.17.1
174
2.34.1
665
175
666
176
diff view generated by jsdifflib
New patch
1
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
accel/tcg/internal.h | 31 +++++++++++++++++++++++++++++++
5
accel/tcg/translate-all.c | 31 +------------------------------
6
2 files changed, 32 insertions(+), 30 deletions(-)
1
7
8
diff --git a/accel/tcg/internal.h b/accel/tcg/internal.h
9
index XXXXXXX..XXXXXXX 100644
10
--- a/accel/tcg/internal.h
11
+++ b/accel/tcg/internal.h
12
@@ -XXX,XX +XXX,XX @@
13
14
#include "exec/exec-all.h"
15
16
+/*
17
+ * Access to the various translations structures need to be serialised
18
+ * via locks for consistency. In user-mode emulation access to the
19
+ * memory related structures are protected with mmap_lock.
20
+ * In !user-mode we use per-page locks.
21
+ */
22
+#ifdef CONFIG_SOFTMMU
23
+#define assert_memory_lock()
24
+#else
25
+#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
26
+#endif
27
+
28
+typedef struct PageDesc {
29
+ /* list of TBs intersecting this ram page */
30
+ uintptr_t first_tb;
31
+#ifdef CONFIG_USER_ONLY
32
+ unsigned long flags;
33
+ void *target_data;
34
+#endif
35
+#ifdef CONFIG_SOFTMMU
36
+ QemuSpin lock;
37
+#endif
38
+} PageDesc;
39
+
40
+PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc);
41
+
42
+static inline PageDesc *page_find(tb_page_addr_t index)
43
+{
44
+ return page_find_alloc(index, false);
45
+}
46
+
47
TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc,
48
target_ulong cs_base, uint32_t flags,
49
int cflags);
50
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
51
index XXXXXXX..XXXXXXX 100644
52
--- a/accel/tcg/translate-all.c
53
+++ b/accel/tcg/translate-all.c
54
@@ -XXX,XX +XXX,XX @@
55
56
/* make various TB consistency checks */
57
58
-/* Access to the various translations structures need to be serialised via locks
59
- * for consistency.
60
- * In user-mode emulation access to the memory related structures are protected
61
- * with mmap_lock.
62
- * In !user-mode we use per-page locks.
63
- */
64
-#ifdef CONFIG_SOFTMMU
65
-#define assert_memory_lock()
66
-#else
67
-#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
68
-#endif
69
-
70
-typedef struct PageDesc {
71
- /* list of TBs intersecting this ram page */
72
- uintptr_t first_tb;
73
-#ifdef CONFIG_USER_ONLY
74
- unsigned long flags;
75
- void *target_data;
76
-#endif
77
-#ifdef CONFIG_SOFTMMU
78
- QemuSpin lock;
79
-#endif
80
-} PageDesc;
81
-
82
/**
83
* struct page_entry - page descriptor entry
84
* @pd: pointer to the &struct PageDesc of the page this entry represents
85
@@ -XXX,XX +XXX,XX @@ void page_init(void)
86
#endif
87
}
88
89
-static PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc)
90
+PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc)
91
{
92
PageDesc *pd;
93
void **lp;
94
@@ -XXX,XX +XXX,XX @@ static PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc)
95
return pd + (index & (V_L2_SIZE - 1));
96
}
97
98
-static inline PageDesc *page_find(tb_page_addr_t index)
99
-{
100
- return page_find_alloc(index, false);
101
-}
102
-
103
static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
104
PageDesc **ret_p2, tb_page_addr_t phys2, bool alloc);
105
106
--
107
2.34.1
108
109
diff view generated by jsdifflib
1
This will not necessarily restrict the size of the TB, since for v7
1
Move all of the TranslationBlock flushing and page linking
2
the majority of constant pool usage is for calls from the out-of-line
2
code from translate-all.c to tb-maint.c.
3
ldst code, which is already at the end of the TB. But this does
4
allow us to save one insn per reference on the off-chance.
5
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
6
---
8
tcg/arm/tcg-target.inc.c | 57 +++++++++++++++-------------------------
7
accel/tcg/internal.h | 55 +++
9
1 file changed, 21 insertions(+), 36 deletions(-)
8
accel/tcg/tb-maint.c | 735 ++++++++++++++++++++++++++++++++++++
9
accel/tcg/translate-all.c | 766 +-------------------------------------
10
accel/tcg/meson.build | 1 +
11
4 files changed, 802 insertions(+), 755 deletions(-)
12
create mode 100644 accel/tcg/tb-maint.c
10
13
11
diff --git a/tcg/arm/tcg-target.inc.c b/tcg/arm/tcg-target.inc.c
14
diff --git a/accel/tcg/internal.h b/accel/tcg/internal.h
12
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/arm/tcg-target.inc.c
16
--- a/accel/tcg/internal.h
14
+++ b/tcg/arm/tcg-target.inc.c
17
+++ b/accel/tcg/internal.h
15
@@ -XXX,XX +XXX,XX @@ static inline bool reloc_pc24(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
18
@@ -XXX,XX +XXX,XX @@ typedef struct PageDesc {
16
return false;
19
#endif
20
} PageDesc;
21
22
+/* Size of the L2 (and L3, etc) page tables. */
23
+#define V_L2_BITS 10
24
+#define V_L2_SIZE (1 << V_L2_BITS)
25
+
26
+/*
27
+ * L1 Mapping properties
28
+ */
29
+extern int v_l1_size;
30
+extern int v_l1_shift;
31
+extern int v_l2_levels;
32
+
33
+/*
34
+ * The bottom level has pointers to PageDesc, and is indexed by
35
+ * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
36
+ */
37
+#define V_L1_MIN_BITS 4
38
+#define V_L1_MAX_BITS (V_L2_BITS + 3)
39
+#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
40
+
41
+extern void *l1_map[V_L1_MAX_SIZE];
42
+
43
PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc);
44
45
static inline PageDesc *page_find(tb_page_addr_t index)
46
@@ -XXX,XX +XXX,XX @@ static inline PageDesc *page_find(tb_page_addr_t index)
47
return page_find_alloc(index, false);
17
}
48
}
18
49
19
+static inline bool reloc_pc13(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
50
+/* list iterators for lists of tagged pointers in TranslationBlock */
20
+{
51
+#define TB_FOR_EACH_TAGGED(head, tb, n, field) \
21
+ ptrdiff_t offset = tcg_ptr_byte_diff(target, code_ptr) - 8;
52
+ for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \
22
+
53
+ tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \
23
+ if (offset >= -0xfff && offset <= 0xfff) {
54
+ tb = (TranslationBlock *)((uintptr_t)tb & ~1))
24
+ tcg_insn_unit insn = *code_ptr;
55
+
25
+ bool u = (offset >= 0);
56
+#define PAGE_FOR_EACH_TB(pagedesc, tb, n) \
26
+ if (!u) {
57
+ TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
27
+ offset = -offset;
58
+
59
+#define TB_FOR_EACH_JMP(head_tb, tb, n) \
60
+ TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next)
61
+
62
+/* In user-mode page locks aren't used; mmap_lock is enough */
63
+#ifdef CONFIG_USER_ONLY
64
+#define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
65
+static inline void page_lock(PageDesc *pd) { }
66
+static inline void page_unlock(PageDesc *pd) { }
67
+#else
68
+#ifdef CONFIG_DEBUG_TCG
69
+void do_assert_page_locked(const PageDesc *pd, const char *file, int line);
70
+#define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__)
71
+#else
72
+#define assert_page_locked(pd)
73
+#endif
74
+void page_lock(PageDesc *pd);
75
+void page_unlock(PageDesc *pd);
76
+#endif
77
+
78
TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc,
79
target_ulong cs_base, uint32_t flags,
80
int cflags);
81
G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
82
void page_init(void);
83
void tb_htable_init(void);
84
+void tb_reset_jump(TranslationBlock *tb, int n);
85
+TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
86
+ tb_page_addr_t phys_page2);
87
+bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc);
88
+int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
89
+ uintptr_t searched_pc, bool reset_icount);
90
91
/* Return the current PC from CPU, which may be cached in TB. */
92
static inline target_ulong log_pc(CPUState *cpu, const TranslationBlock *tb)
93
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
94
new file mode 100644
95
index XXXXXXX..XXXXXXX
96
--- /dev/null
97
+++ b/accel/tcg/tb-maint.c
98
@@ -XXX,XX +XXX,XX @@
99
+/*
100
+ * Translation Block Maintaince
101
+ *
102
+ * Copyright (c) 2003 Fabrice Bellard
103
+ *
104
+ * This library is free software; you can redistribute it and/or
105
+ * modify it under the terms of the GNU Lesser General Public
106
+ * License as published by the Free Software Foundation; either
107
+ * version 2.1 of the License, or (at your option) any later version.
108
+ *
109
+ * This library is distributed in the hope that it will be useful,
110
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
111
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
112
+ * Lesser General Public License for more details.
113
+ *
114
+ * You should have received a copy of the GNU Lesser General Public
115
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
116
+ */
117
+
118
+#include "qemu/osdep.h"
119
+#include "exec/cputlb.h"
120
+#include "exec/log.h"
121
+#include "exec/translate-all.h"
122
+#include "sysemu/tcg.h"
123
+#include "tcg/tcg.h"
124
+#include "tb-hash.h"
125
+#include "tb-context.h"
126
+#include "internal.h"
127
+
128
+/* FIXME: tb_invalidate_phys_range is declared in different places. */
129
+#ifdef CONFIG_USER_ONLY
130
+#include "exec/exec-all.h"
131
+#else
132
+#include "exec/ram_addr.h"
133
+#endif
134
+
135
+static bool tb_cmp(const void *ap, const void *bp)
136
+{
137
+ const TranslationBlock *a = ap;
138
+ const TranslationBlock *b = bp;
139
+
140
+ return ((TARGET_TB_PCREL || tb_pc(a) == tb_pc(b)) &&
141
+ a->cs_base == b->cs_base &&
142
+ a->flags == b->flags &&
143
+ (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) &&
144
+ a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
145
+ a->page_addr[0] == b->page_addr[0] &&
146
+ a->page_addr[1] == b->page_addr[1]);
147
+}
148
+
149
+void tb_htable_init(void)
150
+{
151
+ unsigned int mode = QHT_MODE_AUTO_RESIZE;
152
+
153
+ qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode);
154
+}
155
+
156
+/* Set to NULL all the 'first_tb' fields in all PageDescs. */
157
+static void page_flush_tb_1(int level, void **lp)
158
+{
159
+ int i;
160
+
161
+ if (*lp == NULL) {
162
+ return;
163
+ }
164
+ if (level == 0) {
165
+ PageDesc *pd = *lp;
166
+
167
+ for (i = 0; i < V_L2_SIZE; ++i) {
168
+ page_lock(&pd[i]);
169
+ pd[i].first_tb = (uintptr_t)NULL;
170
+ page_unlock(&pd[i]);
28
+ }
171
+ }
29
+ insn = deposit32(insn, 23, 1, u);
172
+ } else {
30
+ insn = deposit32(insn, 0, 12, offset);
173
+ void **pp = *lp;
31
+ *code_ptr = insn;
174
+
175
+ for (i = 0; i < V_L2_SIZE; ++i) {
176
+ page_flush_tb_1(level - 1, pp + i);
177
+ }
178
+ }
179
+}
180
+
181
+static void page_flush_tb(void)
182
+{
183
+ int i, l1_sz = v_l1_size;
184
+
185
+ for (i = 0; i < l1_sz; i++) {
186
+ page_flush_tb_1(v_l2_levels, l1_map + i);
187
+ }
188
+}
189
+
190
+/* flush all the translation blocks */
191
+static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
192
+{
193
+ bool did_flush = false;
194
+
195
+ mmap_lock();
196
+ /* If it is already been done on request of another CPU, just retry. */
197
+ if (tb_ctx.tb_flush_count != tb_flush_count.host_int) {
198
+ goto done;
199
+ }
200
+ did_flush = true;
201
+
202
+ CPU_FOREACH(cpu) {
203
+ tcg_flush_jmp_cache(cpu);
204
+ }
205
+
206
+ qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
207
+ page_flush_tb();
208
+
209
+ tcg_region_reset_all();
210
+ /* XXX: flush processor icache at this point if cache flush is expensive */
211
+ qatomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
212
+
213
+done:
214
+ mmap_unlock();
215
+ if (did_flush) {
216
+ qemu_plugin_flush_cb();
217
+ }
218
+}
219
+
220
+void tb_flush(CPUState *cpu)
221
+{
222
+ if (tcg_enabled()) {
223
+ unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count);
224
+
225
+ if (cpu_in_exclusive_context(cpu)) {
226
+ do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count));
227
+ } else {
228
+ async_safe_run_on_cpu(cpu, do_tb_flush,
229
+ RUN_ON_CPU_HOST_INT(tb_flush_count));
230
+ }
231
+ }
232
+}
233
+
234
+/*
235
+ * user-mode: call with mmap_lock held
236
+ * !user-mode: call with @pd->lock held
237
+ */
238
+static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
239
+{
240
+ TranslationBlock *tb1;
241
+ uintptr_t *pprev;
242
+ unsigned int n1;
243
+
244
+ assert_page_locked(pd);
245
+ pprev = &pd->first_tb;
246
+ PAGE_FOR_EACH_TB(pd, tb1, n1) {
247
+ if (tb1 == tb) {
248
+ *pprev = tb1->page_next[n1];
249
+ return;
250
+ }
251
+ pprev = &tb1->page_next[n1];
252
+ }
253
+ g_assert_not_reached();
254
+}
255
+
256
+/* remove @orig from its @n_orig-th jump list */
257
+static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig)
258
+{
259
+ uintptr_t ptr, ptr_locked;
260
+ TranslationBlock *dest;
261
+ TranslationBlock *tb;
262
+ uintptr_t *pprev;
263
+ int n;
264
+
265
+ /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */
266
+ ptr = qatomic_or_fetch(&orig->jmp_dest[n_orig], 1);
267
+ dest = (TranslationBlock *)(ptr & ~1);
268
+ if (dest == NULL) {
269
+ return;
270
+ }
271
+
272
+ qemu_spin_lock(&dest->jmp_lock);
273
+ /*
274
+ * While acquiring the lock, the jump might have been removed if the
275
+ * destination TB was invalidated; check again.
276
+ */
277
+ ptr_locked = qatomic_read(&orig->jmp_dest[n_orig]);
278
+ if (ptr_locked != ptr) {
279
+ qemu_spin_unlock(&dest->jmp_lock);
280
+ /*
281
+ * The only possibility is that the jump was unlinked via
282
+ * tb_jump_unlink(dest). Seeing here another destination would be a bug,
283
+ * because we set the LSB above.
284
+ */
285
+ g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID);
286
+ return;
287
+ }
288
+ /*
289
+ * We first acquired the lock, and since the destination pointer matches,
290
+ * we know for sure that @orig is in the jmp list.
291
+ */
292
+ pprev = &dest->jmp_list_head;
293
+ TB_FOR_EACH_JMP(dest, tb, n) {
294
+ if (tb == orig && n == n_orig) {
295
+ *pprev = tb->jmp_list_next[n];
296
+ /* no need to set orig->jmp_dest[n]; setting the LSB was enough */
297
+ qemu_spin_unlock(&dest->jmp_lock);
298
+ return;
299
+ }
300
+ pprev = &tb->jmp_list_next[n];
301
+ }
302
+ g_assert_not_reached();
303
+}
304
+
305
+/*
306
+ * Reset the jump entry 'n' of a TB so that it is not chained to another TB.
307
+ */
308
+void tb_reset_jump(TranslationBlock *tb, int n)
309
+{
310
+ uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]);
311
+ tb_set_jmp_target(tb, n, addr);
312
+}
313
+
314
+/* remove any jumps to the TB */
315
+static inline void tb_jmp_unlink(TranslationBlock *dest)
316
+{
317
+ TranslationBlock *tb;
318
+ int n;
319
+
320
+ qemu_spin_lock(&dest->jmp_lock);
321
+
322
+ TB_FOR_EACH_JMP(dest, tb, n) {
323
+ tb_reset_jump(tb, n);
324
+ qatomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1);
325
+ /* No need to clear the list entry; setting the dest ptr is enough */
326
+ }
327
+ dest->jmp_list_head = (uintptr_t)NULL;
328
+
329
+ qemu_spin_unlock(&dest->jmp_lock);
330
+}
331
+
332
+static void tb_jmp_cache_inval_tb(TranslationBlock *tb)
333
+{
334
+ CPUState *cpu;
335
+
336
+ if (TARGET_TB_PCREL) {
337
+ /* A TB may be at any virtual address */
338
+ CPU_FOREACH(cpu) {
339
+ tcg_flush_jmp_cache(cpu);
340
+ }
341
+ } else {
342
+ uint32_t h = tb_jmp_cache_hash_func(tb_pc(tb));
343
+
344
+ CPU_FOREACH(cpu) {
345
+ CPUJumpCache *jc = cpu->tb_jmp_cache;
346
+
347
+ if (qatomic_read(&jc->array[h].tb) == tb) {
348
+ qatomic_set(&jc->array[h].tb, NULL);
349
+ }
350
+ }
351
+ }
352
+}
353
+
354
+/*
355
+ * In user-mode, call with mmap_lock held.
356
+ * In !user-mode, if @rm_from_page_list is set, call with the TB's pages'
357
+ * locks held.
358
+ */
359
+static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
360
+{
361
+ PageDesc *p;
362
+ uint32_t h;
363
+ tb_page_addr_t phys_pc;
364
+ uint32_t orig_cflags = tb_cflags(tb);
365
+
366
+ assert_memory_lock();
367
+
368
+ /* make sure no further incoming jumps will be chained to this TB */
369
+ qemu_spin_lock(&tb->jmp_lock);
370
+ qatomic_set(&tb->cflags, tb->cflags | CF_INVALID);
371
+ qemu_spin_unlock(&tb->jmp_lock);
372
+
373
+ /* remove the TB from the hash list */
374
+ phys_pc = tb->page_addr[0];
375
+ h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)),
376
+ tb->flags, orig_cflags, tb->trace_vcpu_dstate);
377
+ if (!qht_remove(&tb_ctx.htable, tb, h)) {
378
+ return;
379
+ }
380
+
381
+ /* remove the TB from the page list */
382
+ if (rm_from_page_list) {
383
+ p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
384
+ tb_page_remove(p, tb);
385
+ if (tb->page_addr[1] != -1) {
386
+ p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
387
+ tb_page_remove(p, tb);
388
+ }
389
+ }
390
+
391
+ /* remove the TB from the hash list */
392
+ tb_jmp_cache_inval_tb(tb);
393
+
394
+ /* suppress this TB from the two jump lists */
395
+ tb_remove_from_jmp_list(tb, 0);
396
+ tb_remove_from_jmp_list(tb, 1);
397
+
398
+ /* suppress any remaining jumps to this TB */
399
+ tb_jmp_unlink(tb);
400
+
401
+ qatomic_set(&tb_ctx.tb_phys_invalidate_count,
402
+ tb_ctx.tb_phys_invalidate_count + 1);
403
+}
404
+
405
+static void tb_phys_invalidate__locked(TranslationBlock *tb)
406
+{
407
+ qemu_thread_jit_write();
408
+ do_tb_phys_invalidate(tb, true);
409
+ qemu_thread_jit_execute();
410
+}
411
+
412
+static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
413
+ PageDesc **ret_p2, tb_page_addr_t phys2, bool alloc)
414
+{
415
+ PageDesc *p1, *p2;
416
+ tb_page_addr_t page1;
417
+ tb_page_addr_t page2;
418
+
419
+ assert_memory_lock();
420
+ g_assert(phys1 != -1);
421
+
422
+ page1 = phys1 >> TARGET_PAGE_BITS;
423
+ page2 = phys2 >> TARGET_PAGE_BITS;
424
+
425
+ p1 = page_find_alloc(page1, alloc);
426
+ if (ret_p1) {
427
+ *ret_p1 = p1;
428
+ }
429
+ if (likely(phys2 == -1)) {
430
+ page_lock(p1);
431
+ return;
432
+ } else if (page1 == page2) {
433
+ page_lock(p1);
434
+ if (ret_p2) {
435
+ *ret_p2 = p1;
436
+ }
437
+ return;
438
+ }
439
+ p2 = page_find_alloc(page2, alloc);
440
+ if (ret_p2) {
441
+ *ret_p2 = p2;
442
+ }
443
+ if (page1 < page2) {
444
+ page_lock(p1);
445
+ page_lock(p2);
446
+ } else {
447
+ page_lock(p2);
448
+ page_lock(p1);
449
+ }
450
+}
451
+
452
+#ifdef CONFIG_USER_ONLY
453
+static inline void page_lock_tb(const TranslationBlock *tb) { }
454
+static inline void page_unlock_tb(const TranslationBlock *tb) { }
455
+#else
456
+/* lock the page(s) of a TB in the correct acquisition order */
457
+static void page_lock_tb(const TranslationBlock *tb)
458
+{
459
+ page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], false);
460
+}
461
+
462
+static void page_unlock_tb(const TranslationBlock *tb)
463
+{
464
+ PageDesc *p1 = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
465
+
466
+ page_unlock(p1);
467
+ if (unlikely(tb->page_addr[1] != -1)) {
468
+ PageDesc *p2 = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
469
+
470
+ if (p2 != p1) {
471
+ page_unlock(p2);
472
+ }
473
+ }
474
+}
475
+#endif
476
+
477
+/*
478
+ * Invalidate one TB.
479
+ * Called with mmap_lock held in user-mode.
480
+ */
481
+void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
482
+{
483
+ if (page_addr == -1 && tb->page_addr[0] != -1) {
484
+ page_lock_tb(tb);
485
+ do_tb_phys_invalidate(tb, true);
486
+ page_unlock_tb(tb);
487
+ } else {
488
+ do_tb_phys_invalidate(tb, false);
489
+ }
490
+}
491
+
492
+/*
493
+ * Add the tb in the target page and protect it if necessary.
494
+ * Called with mmap_lock held for user-mode emulation.
495
+ * Called with @p->lock held in !user-mode.
496
+ */
497
+static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
498
+ unsigned int n, tb_page_addr_t page_addr)
499
+{
500
+#ifndef CONFIG_USER_ONLY
501
+ bool page_already_protected;
502
+#endif
503
+
504
+ assert_page_locked(p);
505
+
506
+ tb->page_addr[n] = page_addr;
507
+ tb->page_next[n] = p->first_tb;
508
+#ifndef CONFIG_USER_ONLY
509
+ page_already_protected = p->first_tb != (uintptr_t)NULL;
510
+#endif
511
+ p->first_tb = (uintptr_t)tb | n;
512
+
513
+#if defined(CONFIG_USER_ONLY)
514
+ /* translator_loop() must have made all TB pages non-writable */
515
+ assert(!(p->flags & PAGE_WRITE));
516
+#else
517
+ /*
518
+ * If some code is already present, then the pages are already
519
+ * protected. So we handle the case where only the first TB is
520
+ * allocated in a physical page.
521
+ */
522
+ if (!page_already_protected) {
523
+ tlb_protect_code(page_addr);
524
+ }
525
+#endif
526
+}
527
+
528
+/*
529
+ * Add a new TB and link it to the physical page tables. phys_page2 is
530
+ * (-1) to indicate that only one page contains the TB.
531
+ *
532
+ * Called with mmap_lock held for user-mode emulation.
533
+ *
534
+ * Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
535
+ * Note that in !user-mode, another thread might have already added a TB
536
+ * for the same block of guest code that @tb corresponds to. In that case,
537
+ * the caller should discard the original @tb, and use instead the returned TB.
538
+ */
539
+TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
540
+ tb_page_addr_t phys_page2)
541
+{
542
+ PageDesc *p;
543
+ PageDesc *p2 = NULL;
544
+ void *existing_tb = NULL;
545
+ uint32_t h;
546
+
547
+ assert_memory_lock();
548
+ tcg_debug_assert(!(tb->cflags & CF_INVALID));
549
+
550
+ /*
551
+ * Add the TB to the page list, acquiring first the pages's locks.
552
+ * We keep the locks held until after inserting the TB in the hash table,
553
+ * so that if the insertion fails we know for sure that the TBs are still
554
+ * in the page descriptors.
555
+ * Note that inserting into the hash table first isn't an option, since
556
+ * we can only insert TBs that are fully initialized.
557
+ */
558
+ page_lock_pair(&p, phys_pc, &p2, phys_page2, true);
559
+ tb_page_add(p, tb, 0, phys_pc);
560
+ if (p2) {
561
+ tb_page_add(p2, tb, 1, phys_page2);
562
+ } else {
563
+ tb->page_addr[1] = -1;
564
+ }
565
+
566
+ /* add in the hash table */
567
+ h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)),
568
+ tb->flags, tb->cflags, tb->trace_vcpu_dstate);
569
+ qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
570
+
571
+ /* remove TB from the page(s) if we couldn't insert it */
572
+ if (unlikely(existing_tb)) {
573
+ tb_page_remove(p, tb);
574
+ if (p2) {
575
+ tb_page_remove(p2, tb);
576
+ }
577
+ tb = existing_tb;
578
+ }
579
+
580
+ if (p2 && p2 != p) {
581
+ page_unlock(p2);
582
+ }
583
+ page_unlock(p);
584
+ return tb;
585
+}
586
+
587
+/*
588
+ * @p must be non-NULL.
589
+ * user-mode: call with mmap_lock held.
590
+ * !user-mode: call with all @pages locked.
591
+ */
592
+static void
593
+tb_invalidate_phys_page_range__locked(struct page_collection *pages,
594
+ PageDesc *p, tb_page_addr_t start,
595
+ tb_page_addr_t end,
596
+ uintptr_t retaddr)
597
+{
598
+ TranslationBlock *tb;
599
+ tb_page_addr_t tb_start, tb_end;
600
+ int n;
601
+#ifdef TARGET_HAS_PRECISE_SMC
602
+ CPUState *cpu = current_cpu;
603
+ CPUArchState *env = NULL;
604
+ bool current_tb_not_found = retaddr != 0;
605
+ bool current_tb_modified = false;
606
+ TranslationBlock *current_tb = NULL;
607
+ target_ulong current_pc = 0;
608
+ target_ulong current_cs_base = 0;
609
+ uint32_t current_flags = 0;
610
+#endif /* TARGET_HAS_PRECISE_SMC */
611
+
612
+ assert_page_locked(p);
613
+
614
+#if defined(TARGET_HAS_PRECISE_SMC)
615
+ if (cpu != NULL) {
616
+ env = cpu->env_ptr;
617
+ }
618
+#endif
619
+
620
+ /*
621
+ * We remove all the TBs in the range [start, end[.
622
+ * XXX: see if in some cases it could be faster to invalidate all the code
623
+ */
624
+ PAGE_FOR_EACH_TB(p, tb, n) {
625
+ assert_page_locked(p);
626
+ /* NOTE: this is subtle as a TB may span two physical pages */
627
+ if (n == 0) {
628
+ /* NOTE: tb_end may be after the end of the page, but
629
+ it is not a problem */
630
+ tb_start = tb->page_addr[0];
631
+ tb_end = tb_start + tb->size;
632
+ } else {
633
+ tb_start = tb->page_addr[1];
634
+ tb_end = tb_start + ((tb->page_addr[0] + tb->size)
635
+ & ~TARGET_PAGE_MASK);
636
+ }
637
+ if (!(tb_end <= start || tb_start >= end)) {
638
+#ifdef TARGET_HAS_PRECISE_SMC
639
+ if (current_tb_not_found) {
640
+ current_tb_not_found = false;
641
+ /* now we have a real cpu fault */
642
+ current_tb = tcg_tb_lookup(retaddr);
643
+ }
644
+ if (current_tb == tb &&
645
+ (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
646
+ /*
647
+ * If we are modifying the current TB, we must stop
648
+ * its execution. We could be more precise by checking
649
+ * that the modification is after the current PC, but it
650
+ * would require a specialized function to partially
651
+ * restore the CPU state.
652
+ */
653
+ current_tb_modified = true;
654
+ cpu_restore_state_from_tb(cpu, current_tb, retaddr, true);
655
+ cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
656
+ &current_flags);
657
+ }
658
+#endif /* TARGET_HAS_PRECISE_SMC */
659
+ tb_phys_invalidate__locked(tb);
660
+ }
661
+ }
662
+#if !defined(CONFIG_USER_ONLY)
663
+ /* if no code remaining, no need to continue to use slow writes */
664
+ if (!p->first_tb) {
665
+ tlb_unprotect_code(start);
666
+ }
667
+#endif
668
+#ifdef TARGET_HAS_PRECISE_SMC
669
+ if (current_tb_modified) {
670
+ page_collection_unlock(pages);
671
+ /* Force execution of one insn next time. */
672
+ cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
673
+ mmap_unlock();
674
+ cpu_loop_exit_noexc(cpu);
675
+ }
676
+#endif
677
+}
678
+
679
+/*
680
+ * Invalidate all TBs which intersect with the target physical address range
681
+ * [start;end[. NOTE: start and end must refer to the *same* physical page.
682
+ * 'is_cpu_write_access' should be true if called from a real cpu write
683
+ * access: the virtual CPU will exit the current TB if code is modified inside
684
+ * this TB.
685
+ *
686
+ * Called with mmap_lock held for user-mode emulation
687
+ */
688
+void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end)
689
+{
690
+ struct page_collection *pages;
691
+ PageDesc *p;
692
+
693
+ assert_memory_lock();
694
+
695
+ p = page_find(start >> TARGET_PAGE_BITS);
696
+ if (p == NULL) {
697
+ return;
698
+ }
699
+ pages = page_collection_lock(start, end);
700
+ tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
701
+ page_collection_unlock(pages);
702
+}
703
+
704
+/*
705
+ * Invalidate all TBs which intersect with the target physical address range
706
+ * [start;end[. NOTE: start and end may refer to *different* physical pages.
707
+ * 'is_cpu_write_access' should be true if called from a real cpu write
708
+ * access: the virtual CPU will exit the current TB if code is modified inside
709
+ * this TB.
710
+ *
711
+ * Called with mmap_lock held for user-mode emulation.
712
+ */
713
+#ifdef CONFIG_SOFTMMU
714
+void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end)
715
+#else
716
+void tb_invalidate_phys_range(target_ulong start, target_ulong end)
717
+#endif
718
+{
719
+ struct page_collection *pages;
720
+ tb_page_addr_t next;
721
+
722
+ assert_memory_lock();
723
+
724
+ pages = page_collection_lock(start, end);
725
+ for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
726
+ start < end;
727
+ start = next, next += TARGET_PAGE_SIZE) {
728
+ PageDesc *pd = page_find(start >> TARGET_PAGE_BITS);
729
+ tb_page_addr_t bound = MIN(next, end);
730
+
731
+ if (pd == NULL) {
732
+ continue;
733
+ }
734
+ tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
735
+ }
736
+ page_collection_unlock(pages);
737
+}
738
+
739
+#ifdef CONFIG_SOFTMMU
740
+/*
741
+ * len must be <= 8 and start must be a multiple of len.
742
+ * Called via softmmu_template.h when code areas are written to with
743
+ * iothread mutex not held.
744
+ *
745
+ * Call with all @pages in the range [@start, @start + len[ locked.
746
+ */
747
+void tb_invalidate_phys_page_fast(struct page_collection *pages,
748
+ tb_page_addr_t start, int len,
749
+ uintptr_t retaddr)
750
+{
751
+ PageDesc *p;
752
+
753
+ assert_memory_lock();
754
+
755
+ p = page_find(start >> TARGET_PAGE_BITS);
756
+ if (!p) {
757
+ return;
758
+ }
759
+
760
+ assert_page_locked(p);
761
+ tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
762
+ retaddr);
763
+}
764
+#else
765
+/*
766
+ * Called with mmap_lock held. If pc is not 0 then it indicates the
767
+ * host PC of the faulting store instruction that caused this invalidate.
768
+ * Returns true if the caller needs to abort execution of the current
769
+ * TB (because it was modified by this store and the guest CPU has
770
+ * precise-SMC semantics).
771
+ */
772
+bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
773
+{
774
+ TranslationBlock *tb;
775
+ PageDesc *p;
776
+ int n;
777
+#ifdef TARGET_HAS_PRECISE_SMC
778
+ TranslationBlock *current_tb = NULL;
779
+ CPUState *cpu = current_cpu;
780
+ CPUArchState *env = NULL;
781
+ int current_tb_modified = 0;
782
+ target_ulong current_pc = 0;
783
+ target_ulong current_cs_base = 0;
784
+ uint32_t current_flags = 0;
785
+#endif
786
+
787
+ assert_memory_lock();
788
+
789
+ addr &= TARGET_PAGE_MASK;
790
+ p = page_find(addr >> TARGET_PAGE_BITS);
791
+ if (!p) {
792
+ return false;
793
+ }
794
+
795
+#ifdef TARGET_HAS_PRECISE_SMC
796
+ if (p->first_tb && pc != 0) {
797
+ current_tb = tcg_tb_lookup(pc);
798
+ }
799
+ if (cpu != NULL) {
800
+ env = cpu->env_ptr;
801
+ }
802
+#endif
803
+ assert_page_locked(p);
804
+ PAGE_FOR_EACH_TB(p, tb, n) {
805
+#ifdef TARGET_HAS_PRECISE_SMC
806
+ if (current_tb == tb &&
807
+ (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
808
+ /*
809
+ * If we are modifying the current TB, we must stop its execution.
810
+ * We could be more precise by checking that the modification is
811
+ * after the current PC, but it would require a specialized
812
+ * function to partially restore the CPU state.
813
+ */
814
+ current_tb_modified = 1;
815
+ cpu_restore_state_from_tb(cpu, current_tb, pc, true);
816
+ cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
817
+ &current_flags);
818
+ }
819
+#endif /* TARGET_HAS_PRECISE_SMC */
820
+ tb_phys_invalidate(tb, addr);
821
+ }
822
+ p->first_tb = (uintptr_t)NULL;
823
+#ifdef TARGET_HAS_PRECISE_SMC
824
+ if (current_tb_modified) {
825
+ /* Force execution of one insn next time. */
826
+ cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
32
+ return true;
827
+ return true;
33
+ }
828
+ }
829
+#endif
830
+
34
+ return false;
831
+ return false;
35
+}
832
+}
36
+
833
+#endif
37
static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
834
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
38
intptr_t value, intptr_t addend)
835
index XXXXXXX..XXXXXXX 100644
836
--- a/accel/tcg/translate-all.c
837
+++ b/accel/tcg/translate-all.c
838
@@ -XXX,XX +XXX,XX @@ struct page_collection {
839
struct page_entry *max;
840
};
841
842
-/* list iterators for lists of tagged pointers in TranslationBlock */
843
-#define TB_FOR_EACH_TAGGED(head, tb, n, field) \
844
- for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1); \
845
- tb; tb = (TranslationBlock *)tb->field[n], n = (uintptr_t)tb & 1, \
846
- tb = (TranslationBlock *)((uintptr_t)tb & ~1))
847
-
848
-#define PAGE_FOR_EACH_TB(pagedesc, tb, n) \
849
- TB_FOR_EACH_TAGGED((pagedesc)->first_tb, tb, n, page_next)
850
-
851
-#define TB_FOR_EACH_JMP(head_tb, tb, n) \
852
- TB_FOR_EACH_TAGGED((head_tb)->jmp_list_head, tb, n, jmp_list_next)
853
-
854
/*
855
* In system mode we want L1_MAP to be based on ram offsets,
856
* while in user mode we want it to be based on virtual addresses.
857
@@ -XXX,XX +XXX,XX @@ struct page_collection {
858
# define L1_MAP_ADDR_SPACE_BITS MIN(HOST_LONG_BITS, TARGET_ABI_BITS)
859
#endif
860
861
-/* Size of the L2 (and L3, etc) page tables. */
862
-#define V_L2_BITS 10
863
-#define V_L2_SIZE (1 << V_L2_BITS)
864
-
865
/* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
866
QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
867
sizeof_field(TranslationBlock, trace_vcpu_dstate)
868
@@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
869
/*
870
* L1 Mapping properties
871
*/
872
-static int v_l1_size;
873
-static int v_l1_shift;
874
-static int v_l2_levels;
875
+int v_l1_size;
876
+int v_l1_shift;
877
+int v_l2_levels;
878
879
-/* The bottom level has pointers to PageDesc, and is indexed by
880
- * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
881
- */
882
-#define V_L1_MIN_BITS 4
883
-#define V_L1_MAX_BITS (V_L2_BITS + 3)
884
-#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
885
-
886
-static void *l1_map[V_L1_MAX_SIZE];
887
+void *l1_map[V_L1_MAX_SIZE];
888
889
TBContext tb_ctx;
890
891
@@ -XXX,XX +XXX,XX @@ static int encode_search(TranslationBlock *tb, uint8_t *block)
892
* When reset_icount is true, current TB will be interrupted and
893
* icount should be recalculated.
894
*/
895
-static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
896
- uintptr_t searched_pc, bool reset_icount)
897
+int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
898
+ uintptr_t searched_pc, bool reset_icount)
39
{
899
{
40
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
900
target_ulong data[TARGET_INSN_START_WORDS];
41
if (type == R_ARM_PC24) {
901
uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
42
return reloc_pc24(code_ptr, (tcg_insn_unit *)value);
902
@@ -XXX,XX +XXX,XX @@ PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc)
43
} else if (type == R_ARM_PC13) {
903
return pd + (index & (V_L2_SIZE - 1));
44
- intptr_t diff = value - (uintptr_t)(code_ptr + 2);
45
- tcg_insn_unit insn = *code_ptr;
46
- bool u;
47
-
48
- if (diff >= -0xfff && diff <= 0xfff) {
49
- u = (diff >= 0);
50
- if (!u) {
51
- diff = -diff;
52
- }
53
- } else {
54
- int rd = extract32(insn, 12, 4);
55
- int rt = rd == TCG_REG_PC ? TCG_REG_TMP : rd;
56
-
57
- if (diff < 0x1000 || diff >= 0x100000) {
58
- return false;
59
- }
60
-
61
- /* add rt, pc, #high */
62
- *code_ptr++ = ((insn & 0xf0000000) | (1 << 25) | ARITH_ADD
63
- | (TCG_REG_PC << 16) | (rt << 12)
64
- | (20 << 7) | (diff >> 12));
65
- /* ldr rd, [rt, #low] */
66
- insn = deposit32(insn, 12, 4, rt);
67
- diff &= 0xfff;
68
- u = 1;
69
- }
70
- insn = deposit32(insn, 23, 1, u);
71
- insn = deposit32(insn, 0, 12, diff);
72
- *code_ptr = insn;
73
+ return reloc_pc13(code_ptr, (tcg_insn_unit *)value);
74
} else {
75
g_assert_not_reached();
76
}
77
- return true;
78
}
904
}
79
905
80
#define TCG_CT_CONST_ARM 0x100
906
-static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
81
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_ld8s_r(TCGContext *s, int cond, TCGReg rt,
907
- PageDesc **ret_p2, tb_page_addr_t phys2, bool alloc);
82
908
-
83
static void tcg_out_movi_pool(TCGContext *s, int cond, int rd, uint32_t arg)
909
/* In user-mode page locks aren't used; mmap_lock is enough */
910
#ifdef CONFIG_USER_ONLY
911
-
912
-#define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
913
-
914
-static inline void page_lock(PageDesc *pd)
915
-{ }
916
-
917
-static inline void page_unlock(PageDesc *pd)
918
-{ }
919
-
920
-static inline void page_lock_tb(const TranslationBlock *tb)
921
-{ }
922
-
923
-static inline void page_unlock_tb(const TranslationBlock *tb)
924
-{ }
925
-
926
struct page_collection *
927
page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
84
{
928
{
85
- /* The 12-bit range on the ldr insn is sometimes a bit too small.
929
@@ -XXX,XX +XXX,XX @@ static void page_unlock__debug(const PageDesc *pd)
86
- In order to get around that we require two insns, one of which
930
g_assert(removed);
87
- will usually be a nop, but may be replaced in patch_reloc. */
88
new_pool_label(s, arg, R_ARM_PC13, s->code_ptr, 0);
89
tcg_out_ld32_12(s, cond, rd, TCG_REG_PC, 0);
90
- tcg_out_nop(s);
91
}
931
}
92
932
93
static void tcg_out_movi32(TCGContext *s, int cond, int rd, uint32_t arg)
933
-static void
94
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, tcg_insn_unit *addr)
934
-do_assert_page_locked(const PageDesc *pd, const char *file, int line)
95
tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
935
+void do_assert_page_locked(const PageDesc *pd, const char *file, int line)
96
tcg_out_blx(s, COND_AL, TCG_REG_TMP);
936
{
97
} else {
937
if (unlikely(!page_is_locked(pd))) {
98
- /* ??? Know that movi_pool emits exactly 2 insns. */
938
error_report("assert_page_lock: PageDesc %p not locked @ %s:%d",
99
- tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R14, TCG_REG_PC, 4);
939
@@ -XXX,XX +XXX,XX @@ do_assert_page_locked(const PageDesc *pd, const char *file, int line)
100
+ /* ??? Know that movi_pool emits exactly 1 insn. */
101
+ tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R14, TCG_REG_PC, 0);
102
tcg_out_movi_pool(s, COND_AL, TCG_REG_PC, addri);
103
}
940
}
104
}
941
}
942
943
-#define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__)
944
-
945
void assert_no_pages_locked(void)
946
{
947
ht_pages_locked_debug_init();
948
@@ -XXX,XX +XXX,XX @@ void assert_no_pages_locked(void)
949
950
#else /* !CONFIG_DEBUG_TCG */
951
952
-#define assert_page_locked(pd)
953
-
954
-static inline void page_lock__debug(const PageDesc *pd)
955
-{
956
-}
957
-
958
-static inline void page_unlock__debug(const PageDesc *pd)
959
-{
960
-}
961
+static inline void page_lock__debug(const PageDesc *pd) { }
962
+static inline void page_unlock__debug(const PageDesc *pd) { }
963
964
#endif /* CONFIG_DEBUG_TCG */
965
966
-static inline void page_lock(PageDesc *pd)
967
+void page_lock(PageDesc *pd)
968
{
969
page_lock__debug(pd);
970
qemu_spin_lock(&pd->lock);
971
}
972
973
-static inline void page_unlock(PageDesc *pd)
974
+void page_unlock(PageDesc *pd)
975
{
976
qemu_spin_unlock(&pd->lock);
977
page_unlock__debug(pd);
978
}
979
980
-/* lock the page(s) of a TB in the correct acquisition order */
981
-static inline void page_lock_tb(const TranslationBlock *tb)
982
-{
983
- page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], false);
984
-}
985
-
986
-static inline void page_unlock_tb(const TranslationBlock *tb)
987
-{
988
- PageDesc *p1 = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
989
-
990
- page_unlock(p1);
991
- if (unlikely(tb->page_addr[1] != -1)) {
992
- PageDesc *p2 = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
993
-
994
- if (p2 != p1) {
995
- page_unlock(p2);
996
- }
997
- }
998
-}
999
-
1000
static inline struct page_entry *
1001
page_entry_new(PageDesc *pd, tb_page_addr_t index)
1002
{
1003
@@ -XXX,XX +XXX,XX @@ void page_collection_unlock(struct page_collection *set)
1004
1005
#endif /* !CONFIG_USER_ONLY */
1006
1007
-static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
1008
- PageDesc **ret_p2, tb_page_addr_t phys2, bool alloc)
1009
-{
1010
- PageDesc *p1, *p2;
1011
- tb_page_addr_t page1;
1012
- tb_page_addr_t page2;
1013
-
1014
- assert_memory_lock();
1015
- g_assert(phys1 != -1);
1016
-
1017
- page1 = phys1 >> TARGET_PAGE_BITS;
1018
- page2 = phys2 >> TARGET_PAGE_BITS;
1019
-
1020
- p1 = page_find_alloc(page1, alloc);
1021
- if (ret_p1) {
1022
- *ret_p1 = p1;
1023
- }
1024
- if (likely(phys2 == -1)) {
1025
- page_lock(p1);
1026
- return;
1027
- } else if (page1 == page2) {
1028
- page_lock(p1);
1029
- if (ret_p2) {
1030
- *ret_p2 = p1;
1031
- }
1032
- return;
1033
- }
1034
- p2 = page_find_alloc(page2, alloc);
1035
- if (ret_p2) {
1036
- *ret_p2 = p2;
1037
- }
1038
- if (page1 < page2) {
1039
- page_lock(p1);
1040
- page_lock(p2);
1041
- } else {
1042
- page_lock(p2);
1043
- page_lock(p1);
1044
- }
1045
-}
1046
-
1047
-static bool tb_cmp(const void *ap, const void *bp)
1048
-{
1049
- const TranslationBlock *a = ap;
1050
- const TranslationBlock *b = bp;
1051
-
1052
- return ((TARGET_TB_PCREL || tb_pc(a) == tb_pc(b)) &&
1053
- a->cs_base == b->cs_base &&
1054
- a->flags == b->flags &&
1055
- (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) &&
1056
- a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
1057
- a->page_addr[0] == b->page_addr[0] &&
1058
- a->page_addr[1] == b->page_addr[1]);
1059
-}
1060
-
1061
-void tb_htable_init(void)
1062
-{
1063
- unsigned int mode = QHT_MODE_AUTO_RESIZE;
1064
-
1065
- qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode);
1066
-}
1067
-
1068
-/* Set to NULL all the 'first_tb' fields in all PageDescs. */
1069
-static void page_flush_tb_1(int level, void **lp)
1070
-{
1071
- int i;
1072
-
1073
- if (*lp == NULL) {
1074
- return;
1075
- }
1076
- if (level == 0) {
1077
- PageDesc *pd = *lp;
1078
-
1079
- for (i = 0; i < V_L2_SIZE; ++i) {
1080
- page_lock(&pd[i]);
1081
- pd[i].first_tb = (uintptr_t)NULL;
1082
- page_unlock(&pd[i]);
1083
- }
1084
- } else {
1085
- void **pp = *lp;
1086
-
1087
- for (i = 0; i < V_L2_SIZE; ++i) {
1088
- page_flush_tb_1(level - 1, pp + i);
1089
- }
1090
- }
1091
-}
1092
-
1093
-static void page_flush_tb(void)
1094
-{
1095
- int i, l1_sz = v_l1_size;
1096
-
1097
- for (i = 0; i < l1_sz; i++) {
1098
- page_flush_tb_1(v_l2_levels, l1_map + i);
1099
- }
1100
-}
1101
-
1102
-/* flush all the translation blocks */
1103
-static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
1104
-{
1105
- bool did_flush = false;
1106
-
1107
- mmap_lock();
1108
- /* If it is already been done on request of another CPU,
1109
- * just retry.
1110
- */
1111
- if (tb_ctx.tb_flush_count != tb_flush_count.host_int) {
1112
- goto done;
1113
- }
1114
- did_flush = true;
1115
-
1116
- CPU_FOREACH(cpu) {
1117
- tcg_flush_jmp_cache(cpu);
1118
- }
1119
-
1120
- qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
1121
- page_flush_tb();
1122
-
1123
- tcg_region_reset_all();
1124
- /* XXX: flush processor icache at this point if cache flush is
1125
- expensive */
1126
- qatomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
1127
-
1128
-done:
1129
- mmap_unlock();
1130
- if (did_flush) {
1131
- qemu_plugin_flush_cb();
1132
- }
1133
-}
1134
-
1135
-void tb_flush(CPUState *cpu)
1136
-{
1137
- if (tcg_enabled()) {
1138
- unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count);
1139
-
1140
- if (cpu_in_exclusive_context(cpu)) {
1141
- do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count));
1142
- } else {
1143
- async_safe_run_on_cpu(cpu, do_tb_flush,
1144
- RUN_ON_CPU_HOST_INT(tb_flush_count));
1145
- }
1146
- }
1147
-}
1148
-
1149
-/*
1150
- * user-mode: call with mmap_lock held
1151
- * !user-mode: call with @pd->lock held
1152
- */
1153
-static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
1154
-{
1155
- TranslationBlock *tb1;
1156
- uintptr_t *pprev;
1157
- unsigned int n1;
1158
-
1159
- assert_page_locked(pd);
1160
- pprev = &pd->first_tb;
1161
- PAGE_FOR_EACH_TB(pd, tb1, n1) {
1162
- if (tb1 == tb) {
1163
- *pprev = tb1->page_next[n1];
1164
- return;
1165
- }
1166
- pprev = &tb1->page_next[n1];
1167
- }
1168
- g_assert_not_reached();
1169
-}
1170
-
1171
-/* remove @orig from its @n_orig-th jump list */
1172
-static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig)
1173
-{
1174
- uintptr_t ptr, ptr_locked;
1175
- TranslationBlock *dest;
1176
- TranslationBlock *tb;
1177
- uintptr_t *pprev;
1178
- int n;
1179
-
1180
- /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */
1181
- ptr = qatomic_or_fetch(&orig->jmp_dest[n_orig], 1);
1182
- dest = (TranslationBlock *)(ptr & ~1);
1183
- if (dest == NULL) {
1184
- return;
1185
- }
1186
-
1187
- qemu_spin_lock(&dest->jmp_lock);
1188
- /*
1189
- * While acquiring the lock, the jump might have been removed if the
1190
- * destination TB was invalidated; check again.
1191
- */
1192
- ptr_locked = qatomic_read(&orig->jmp_dest[n_orig]);
1193
- if (ptr_locked != ptr) {
1194
- qemu_spin_unlock(&dest->jmp_lock);
1195
- /*
1196
- * The only possibility is that the jump was unlinked via
1197
- * tb_jump_unlink(dest). Seeing here another destination would be a bug,
1198
- * because we set the LSB above.
1199
- */
1200
- g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID);
1201
- return;
1202
- }
1203
- /*
1204
- * We first acquired the lock, and since the destination pointer matches,
1205
- * we know for sure that @orig is in the jmp list.
1206
- */
1207
- pprev = &dest->jmp_list_head;
1208
- TB_FOR_EACH_JMP(dest, tb, n) {
1209
- if (tb == orig && n == n_orig) {
1210
- *pprev = tb->jmp_list_next[n];
1211
- /* no need to set orig->jmp_dest[n]; setting the LSB was enough */
1212
- qemu_spin_unlock(&dest->jmp_lock);
1213
- return;
1214
- }
1215
- pprev = &tb->jmp_list_next[n];
1216
- }
1217
- g_assert_not_reached();
1218
-}
1219
-
1220
-/* reset the jump entry 'n' of a TB so that it is not chained to
1221
- another TB */
1222
-static inline void tb_reset_jump(TranslationBlock *tb, int n)
1223
-{
1224
- uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]);
1225
- tb_set_jmp_target(tb, n, addr);
1226
-}
1227
-
1228
-/* remove any jumps to the TB */
1229
-static inline void tb_jmp_unlink(TranslationBlock *dest)
1230
-{
1231
- TranslationBlock *tb;
1232
- int n;
1233
-
1234
- qemu_spin_lock(&dest->jmp_lock);
1235
-
1236
- TB_FOR_EACH_JMP(dest, tb, n) {
1237
- tb_reset_jump(tb, n);
1238
- qatomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1);
1239
- /* No need to clear the list entry; setting the dest ptr is enough */
1240
- }
1241
- dest->jmp_list_head = (uintptr_t)NULL;
1242
-
1243
- qemu_spin_unlock(&dest->jmp_lock);
1244
-}
1245
-
1246
-static void tb_jmp_cache_inval_tb(TranslationBlock *tb)
1247
-{
1248
- CPUState *cpu;
1249
-
1250
- if (TARGET_TB_PCREL) {
1251
- /* A TB may be at any virtual address */
1252
- CPU_FOREACH(cpu) {
1253
- tcg_flush_jmp_cache(cpu);
1254
- }
1255
- } else {
1256
- uint32_t h = tb_jmp_cache_hash_func(tb_pc(tb));
1257
-
1258
- CPU_FOREACH(cpu) {
1259
- CPUJumpCache *jc = cpu->tb_jmp_cache;
1260
-
1261
- if (qatomic_read(&jc->array[h].tb) == tb) {
1262
- qatomic_set(&jc->array[h].tb, NULL);
1263
- }
1264
- }
1265
- }
1266
-}
1267
-
1268
-/*
1269
- * In user-mode, call with mmap_lock held.
1270
- * In !user-mode, if @rm_from_page_list is set, call with the TB's pages'
1271
- * locks held.
1272
- */
1273
-static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
1274
-{
1275
- PageDesc *p;
1276
- uint32_t h;
1277
- tb_page_addr_t phys_pc;
1278
- uint32_t orig_cflags = tb_cflags(tb);
1279
-
1280
- assert_memory_lock();
1281
-
1282
- /* make sure no further incoming jumps will be chained to this TB */
1283
- qemu_spin_lock(&tb->jmp_lock);
1284
- qatomic_set(&tb->cflags, tb->cflags | CF_INVALID);
1285
- qemu_spin_unlock(&tb->jmp_lock);
1286
-
1287
- /* remove the TB from the hash list */
1288
- phys_pc = tb->page_addr[0];
1289
- h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)),
1290
- tb->flags, orig_cflags, tb->trace_vcpu_dstate);
1291
- if (!qht_remove(&tb_ctx.htable, tb, h)) {
1292
- return;
1293
- }
1294
-
1295
- /* remove the TB from the page list */
1296
- if (rm_from_page_list) {
1297
- p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1298
- tb_page_remove(p, tb);
1299
- if (tb->page_addr[1] != -1) {
1300
- p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1301
- tb_page_remove(p, tb);
1302
- }
1303
- }
1304
-
1305
- /* remove the TB from the hash list */
1306
- tb_jmp_cache_inval_tb(tb);
1307
-
1308
- /* suppress this TB from the two jump lists */
1309
- tb_remove_from_jmp_list(tb, 0);
1310
- tb_remove_from_jmp_list(tb, 1);
1311
-
1312
- /* suppress any remaining jumps to this TB */
1313
- tb_jmp_unlink(tb);
1314
-
1315
- qatomic_set(&tb_ctx.tb_phys_invalidate_count,
1316
- tb_ctx.tb_phys_invalidate_count + 1);
1317
-}
1318
-
1319
-static void tb_phys_invalidate__locked(TranslationBlock *tb)
1320
-{
1321
- qemu_thread_jit_write();
1322
- do_tb_phys_invalidate(tb, true);
1323
- qemu_thread_jit_execute();
1324
-}
1325
-
1326
-/* invalidate one TB
1327
- *
1328
- * Called with mmap_lock held in user-mode.
1329
- */
1330
-void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1331
-{
1332
- if (page_addr == -1 && tb->page_addr[0] != -1) {
1333
- page_lock_tb(tb);
1334
- do_tb_phys_invalidate(tb, true);
1335
- page_unlock_tb(tb);
1336
- } else {
1337
- do_tb_phys_invalidate(tb, false);
1338
- }
1339
-}
1340
-
1341
-/* add the tb in the target page and protect it if necessary
1342
- *
1343
- * Called with mmap_lock held for user-mode emulation.
1344
- * Called with @p->lock held in !user-mode.
1345
- */
1346
-static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
1347
- unsigned int n, tb_page_addr_t page_addr)
1348
-{
1349
-#ifndef CONFIG_USER_ONLY
1350
- bool page_already_protected;
1351
-#endif
1352
-
1353
- assert_page_locked(p);
1354
-
1355
- tb->page_addr[n] = page_addr;
1356
- tb->page_next[n] = p->first_tb;
1357
-#ifndef CONFIG_USER_ONLY
1358
- page_already_protected = p->first_tb != (uintptr_t)NULL;
1359
-#endif
1360
- p->first_tb = (uintptr_t)tb | n;
1361
-
1362
-#if defined(CONFIG_USER_ONLY)
1363
- /* translator_loop() must have made all TB pages non-writable */
1364
- assert(!(p->flags & PAGE_WRITE));
1365
-#else
1366
- /* if some code is already present, then the pages are already
1367
- protected. So we handle the case where only the first TB is
1368
- allocated in a physical page */
1369
- if (!page_already_protected) {
1370
- tlb_protect_code(page_addr);
1371
- }
1372
-#endif
1373
-}
1374
-
1375
-/*
1376
- * Add a new TB and link it to the physical page tables. phys_page2 is
1377
- * (-1) to indicate that only one page contains the TB.
1378
- *
1379
- * Called with mmap_lock held for user-mode emulation.
1380
- *
1381
- * Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
1382
- * Note that in !user-mode, another thread might have already added a TB
1383
- * for the same block of guest code that @tb corresponds to. In that case,
1384
- * the caller should discard the original @tb, and use instead the returned TB.
1385
- */
1386
-static TranslationBlock *
1387
-tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1388
- tb_page_addr_t phys_page2)
1389
-{
1390
- PageDesc *p;
1391
- PageDesc *p2 = NULL;
1392
- void *existing_tb = NULL;
1393
- uint32_t h;
1394
-
1395
- assert_memory_lock();
1396
- tcg_debug_assert(!(tb->cflags & CF_INVALID));
1397
-
1398
- /*
1399
- * Add the TB to the page list, acquiring first the pages's locks.
1400
- * We keep the locks held until after inserting the TB in the hash table,
1401
- * so that if the insertion fails we know for sure that the TBs are still
1402
- * in the page descriptors.
1403
- * Note that inserting into the hash table first isn't an option, since
1404
- * we can only insert TBs that are fully initialized.
1405
- */
1406
- page_lock_pair(&p, phys_pc, &p2, phys_page2, true);
1407
- tb_page_add(p, tb, 0, phys_pc);
1408
- if (p2) {
1409
- tb_page_add(p2, tb, 1, phys_page2);
1410
- } else {
1411
- tb->page_addr[1] = -1;
1412
- }
1413
-
1414
- /* add in the hash table */
1415
- h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)),
1416
- tb->flags, tb->cflags, tb->trace_vcpu_dstate);
1417
- qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
1418
-
1419
- /* remove TB from the page(s) if we couldn't insert it */
1420
- if (unlikely(existing_tb)) {
1421
- tb_page_remove(p, tb);
1422
- if (p2) {
1423
- tb_page_remove(p2, tb);
1424
- }
1425
- tb = existing_tb;
1426
- }
1427
-
1428
- if (p2 && p2 != p) {
1429
- page_unlock(p2);
1430
- }
1431
- page_unlock(p);
1432
- return tb;
1433
-}
1434
-
1435
/* Called with mmap_lock held for user mode emulation. */
1436
TranslationBlock *tb_gen_code(CPUState *cpu,
1437
target_ulong pc, target_ulong cs_base,
1438
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
1439
return tb;
1440
}
1441
1442
-/*
1443
- * @p must be non-NULL.
1444
- * user-mode: call with mmap_lock held.
1445
- * !user-mode: call with all @pages locked.
1446
- */
1447
-static void
1448
-tb_invalidate_phys_page_range__locked(struct page_collection *pages,
1449
- PageDesc *p, tb_page_addr_t start,
1450
- tb_page_addr_t end,
1451
- uintptr_t retaddr)
1452
-{
1453
- TranslationBlock *tb;
1454
- tb_page_addr_t tb_start, tb_end;
1455
- int n;
1456
-#ifdef TARGET_HAS_PRECISE_SMC
1457
- CPUState *cpu = current_cpu;
1458
- CPUArchState *env = NULL;
1459
- bool current_tb_not_found = retaddr != 0;
1460
- bool current_tb_modified = false;
1461
- TranslationBlock *current_tb = NULL;
1462
- target_ulong current_pc = 0;
1463
- target_ulong current_cs_base = 0;
1464
- uint32_t current_flags = 0;
1465
-#endif /* TARGET_HAS_PRECISE_SMC */
1466
-
1467
- assert_page_locked(p);
1468
-
1469
-#if defined(TARGET_HAS_PRECISE_SMC)
1470
- if (cpu != NULL) {
1471
- env = cpu->env_ptr;
1472
- }
1473
-#endif
1474
-
1475
- /* we remove all the TBs in the range [start, end[ */
1476
- /* XXX: see if in some cases it could be faster to invalidate all
1477
- the code */
1478
- PAGE_FOR_EACH_TB(p, tb, n) {
1479
- assert_page_locked(p);
1480
- /* NOTE: this is subtle as a TB may span two physical pages */
1481
- if (n == 0) {
1482
- /* NOTE: tb_end may be after the end of the page, but
1483
- it is not a problem */
1484
- tb_start = tb->page_addr[0];
1485
- tb_end = tb_start + tb->size;
1486
- } else {
1487
- tb_start = tb->page_addr[1];
1488
- tb_end = tb_start + ((tb->page_addr[0] + tb->size)
1489
- & ~TARGET_PAGE_MASK);
1490
- }
1491
- if (!(tb_end <= start || tb_start >= end)) {
1492
-#ifdef TARGET_HAS_PRECISE_SMC
1493
- if (current_tb_not_found) {
1494
- current_tb_not_found = false;
1495
- /* now we have a real cpu fault */
1496
- current_tb = tcg_tb_lookup(retaddr);
1497
- }
1498
- if (current_tb == tb &&
1499
- (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
1500
- /*
1501
- * If we are modifying the current TB, we must stop
1502
- * its execution. We could be more precise by checking
1503
- * that the modification is after the current PC, but it
1504
- * would require a specialized function to partially
1505
- * restore the CPU state.
1506
- */
1507
- current_tb_modified = true;
1508
- cpu_restore_state_from_tb(cpu, current_tb, retaddr, true);
1509
- cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1510
- &current_flags);
1511
- }
1512
-#endif /* TARGET_HAS_PRECISE_SMC */
1513
- tb_phys_invalidate__locked(tb);
1514
- }
1515
- }
1516
-#if !defined(CONFIG_USER_ONLY)
1517
- /* if no code remaining, no need to continue to use slow writes */
1518
- if (!p->first_tb) {
1519
- tlb_unprotect_code(start);
1520
- }
1521
-#endif
1522
-#ifdef TARGET_HAS_PRECISE_SMC
1523
- if (current_tb_modified) {
1524
- page_collection_unlock(pages);
1525
- /* Force execution of one insn next time. */
1526
- cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
1527
- mmap_unlock();
1528
- cpu_loop_exit_noexc(cpu);
1529
- }
1530
-#endif
1531
-}
1532
-
1533
-/*
1534
- * Invalidate all TBs which intersect with the target physical address range
1535
- * [start;end[. NOTE: start and end must refer to the *same* physical page.
1536
- * 'is_cpu_write_access' should be true if called from a real cpu write
1537
- * access: the virtual CPU will exit the current TB if code is modified inside
1538
- * this TB.
1539
- *
1540
- * Called with mmap_lock held for user-mode emulation
1541
- */
1542
-void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end)
1543
-{
1544
- struct page_collection *pages;
1545
- PageDesc *p;
1546
-
1547
- assert_memory_lock();
1548
-
1549
- p = page_find(start >> TARGET_PAGE_BITS);
1550
- if (p == NULL) {
1551
- return;
1552
- }
1553
- pages = page_collection_lock(start, end);
1554
- tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
1555
- page_collection_unlock(pages);
1556
-}
1557
-
1558
-/*
1559
- * Invalidate all TBs which intersect with the target physical address range
1560
- * [start;end[. NOTE: start and end may refer to *different* physical pages.
1561
- * 'is_cpu_write_access' should be true if called from a real cpu write
1562
- * access: the virtual CPU will exit the current TB if code is modified inside
1563
- * this TB.
1564
- *
1565
- * Called with mmap_lock held for user-mode emulation.
1566
- */
1567
-#ifdef CONFIG_SOFTMMU
1568
-void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end)
1569
-#else
1570
-void tb_invalidate_phys_range(target_ulong start, target_ulong end)
1571
-#endif
1572
-{
1573
- struct page_collection *pages;
1574
- tb_page_addr_t next;
1575
-
1576
- assert_memory_lock();
1577
-
1578
- pages = page_collection_lock(start, end);
1579
- for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1580
- start < end;
1581
- start = next, next += TARGET_PAGE_SIZE) {
1582
- PageDesc *pd = page_find(start >> TARGET_PAGE_BITS);
1583
- tb_page_addr_t bound = MIN(next, end);
1584
-
1585
- if (pd == NULL) {
1586
- continue;
1587
- }
1588
- tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
1589
- }
1590
- page_collection_unlock(pages);
1591
-}
1592
-
1593
-#ifdef CONFIG_SOFTMMU
1594
-/* len must be <= 8 and start must be a multiple of len.
1595
- * Called via softmmu_template.h when code areas are written to with
1596
- * iothread mutex not held.
1597
- *
1598
- * Call with all @pages in the range [@start, @start + len[ locked.
1599
- */
1600
-void tb_invalidate_phys_page_fast(struct page_collection *pages,
1601
- tb_page_addr_t start, int len,
1602
- uintptr_t retaddr)
1603
-{
1604
- PageDesc *p;
1605
-
1606
- assert_memory_lock();
1607
-
1608
- p = page_find(start >> TARGET_PAGE_BITS);
1609
- if (!p) {
1610
- return;
1611
- }
1612
-
1613
- assert_page_locked(p);
1614
- tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
1615
- retaddr);
1616
-}
1617
-#else
1618
-/* Called with mmap_lock held. If pc is not 0 then it indicates the
1619
- * host PC of the faulting store instruction that caused this invalidate.
1620
- * Returns true if the caller needs to abort execution of the current
1621
- * TB (because it was modified by this store and the guest CPU has
1622
- * precise-SMC semantics).
1623
- */
1624
-static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
1625
-{
1626
- TranslationBlock *tb;
1627
- PageDesc *p;
1628
- int n;
1629
-#ifdef TARGET_HAS_PRECISE_SMC
1630
- TranslationBlock *current_tb = NULL;
1631
- CPUState *cpu = current_cpu;
1632
- CPUArchState *env = NULL;
1633
- int current_tb_modified = 0;
1634
- target_ulong current_pc = 0;
1635
- target_ulong current_cs_base = 0;
1636
- uint32_t current_flags = 0;
1637
-#endif
1638
-
1639
- assert_memory_lock();
1640
-
1641
- addr &= TARGET_PAGE_MASK;
1642
- p = page_find(addr >> TARGET_PAGE_BITS);
1643
- if (!p) {
1644
- return false;
1645
- }
1646
-
1647
-#ifdef TARGET_HAS_PRECISE_SMC
1648
- if (p->first_tb && pc != 0) {
1649
- current_tb = tcg_tb_lookup(pc);
1650
- }
1651
- if (cpu != NULL) {
1652
- env = cpu->env_ptr;
1653
- }
1654
-#endif
1655
- assert_page_locked(p);
1656
- PAGE_FOR_EACH_TB(p, tb, n) {
1657
-#ifdef TARGET_HAS_PRECISE_SMC
1658
- if (current_tb == tb &&
1659
- (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
1660
- /* If we are modifying the current TB, we must stop
1661
- its execution. We could be more precise by checking
1662
- that the modification is after the current PC, but it
1663
- would require a specialized function to partially
1664
- restore the CPU state */
1665
-
1666
- current_tb_modified = 1;
1667
- cpu_restore_state_from_tb(cpu, current_tb, pc, true);
1668
- cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1669
- &current_flags);
1670
- }
1671
-#endif /* TARGET_HAS_PRECISE_SMC */
1672
- tb_phys_invalidate(tb, addr);
1673
- }
1674
- p->first_tb = (uintptr_t)NULL;
1675
-#ifdef TARGET_HAS_PRECISE_SMC
1676
- if (current_tb_modified) {
1677
- /* Force execution of one insn next time. */
1678
- cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
1679
- return true;
1680
- }
1681
-#endif
1682
-
1683
- return false;
1684
-}
1685
-#endif
1686
-
1687
/* user-mode: call with mmap_lock held */
1688
void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
1689
{
1690
diff --git a/accel/tcg/meson.build b/accel/tcg/meson.build
1691
index XXXXXXX..XXXXXXX 100644
1692
--- a/accel/tcg/meson.build
1693
+++ b/accel/tcg/meson.build
1694
@@ -XXX,XX +XXX,XX @@ tcg_ss.add(files(
1695
'tcg-all.c',
1696
'cpu-exec-common.c',
1697
'cpu-exec.c',
1698
+ 'tb-maint.c',
1699
'tcg-runtime-gvec.c',
1700
'tcg-runtime.c',
1701
'translate-all.c',
105
--
1702
--
106
2.17.1
1703
2.34.1
107
1704
108
1705
diff view generated by jsdifflib
New patch
1
There are no users outside of accel/tcg; this function
2
does not need to be defined in exec-all.h.
1
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
accel/tcg/internal.h | 5 +++++
8
include/exec/exec-all.h | 8 --------
9
2 files changed, 5 insertions(+), 8 deletions(-)
10
11
diff --git a/accel/tcg/internal.h b/accel/tcg/internal.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/internal.h
14
+++ b/accel/tcg/internal.h
15
@@ -XXX,XX +XXX,XX @@ void do_assert_page_locked(const PageDesc *pd, const char *file, int line);
16
void page_lock(PageDesc *pd);
17
void page_unlock(PageDesc *pd);
18
#endif
19
+#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG)
20
+void assert_no_pages_locked(void);
21
+#else
22
+static inline void assert_no_pages_locked(void) { }
23
+#endif
24
25
TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc,
26
target_ulong cs_base, uint32_t flags,
27
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
28
index XXXXXXX..XXXXXXX 100644
29
--- a/include/exec/exec-all.h
30
+++ b/include/exec/exec-all.h
31
@@ -XXX,XX +XXX,XX @@ extern __thread uintptr_t tci_tb_ptr;
32
smaller than 4 bytes, so we don't worry about special-casing this. */
33
#define GETPC_ADJ 2
34
35
-#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG)
36
-void assert_no_pages_locked(void);
37
-#else
38
-static inline void assert_no_pages_locked(void)
39
-{
40
-}
41
-#endif
42
-
43
#if !defined(CONFIG_USER_ONLY)
44
45
/**
46
--
47
2.34.1
48
49
diff view generated by jsdifflib
New patch
1
The results of the calls to cpu_get_tb_cpu_state,
2
current_{pc,cs_base,flags}, are not used.
3
In tb_invalidate_phys_page, use bool for current_tb_modified.
1
4
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
accel/tcg/tb-maint.c | 25 ++-----------------------
9
1 file changed, 2 insertions(+), 23 deletions(-)
10
11
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/tb-maint.c
14
+++ b/accel/tcg/tb-maint.c
15
@@ -XXX,XX +XXX,XX @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
16
int n;
17
#ifdef TARGET_HAS_PRECISE_SMC
18
CPUState *cpu = current_cpu;
19
- CPUArchState *env = NULL;
20
bool current_tb_not_found = retaddr != 0;
21
bool current_tb_modified = false;
22
TranslationBlock *current_tb = NULL;
23
- target_ulong current_pc = 0;
24
- target_ulong current_cs_base = 0;
25
- uint32_t current_flags = 0;
26
#endif /* TARGET_HAS_PRECISE_SMC */
27
28
assert_page_locked(p);
29
30
-#if defined(TARGET_HAS_PRECISE_SMC)
31
- if (cpu != NULL) {
32
- env = cpu->env_ptr;
33
- }
34
-#endif
35
-
36
/*
37
* We remove all the TBs in the range [start, end[.
38
* XXX: see if in some cases it could be faster to invalidate all the code
39
@@ -XXX,XX +XXX,XX @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
40
*/
41
current_tb_modified = true;
42
cpu_restore_state_from_tb(cpu, current_tb, retaddr, true);
43
- cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
44
- &current_flags);
45
}
46
#endif /* TARGET_HAS_PRECISE_SMC */
47
tb_phys_invalidate__locked(tb);
48
@@ -XXX,XX +XXX,XX @@ bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
49
#ifdef TARGET_HAS_PRECISE_SMC
50
TranslationBlock *current_tb = NULL;
51
CPUState *cpu = current_cpu;
52
- CPUArchState *env = NULL;
53
- int current_tb_modified = 0;
54
- target_ulong current_pc = 0;
55
- target_ulong current_cs_base = 0;
56
- uint32_t current_flags = 0;
57
+ bool current_tb_modified = false;
58
#endif
59
60
assert_memory_lock();
61
@@ -XXX,XX +XXX,XX @@ bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
62
if (p->first_tb && pc != 0) {
63
current_tb = tcg_tb_lookup(pc);
64
}
65
- if (cpu != NULL) {
66
- env = cpu->env_ptr;
67
- }
68
#endif
69
assert_page_locked(p);
70
PAGE_FOR_EACH_TB(p, tb, n) {
71
@@ -XXX,XX +XXX,XX @@ bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
72
* after the current PC, but it would require a specialized
73
* function to partially restore the CPU state.
74
*/
75
- current_tb_modified = 1;
76
+ current_tb_modified = true;
77
cpu_restore_state_from_tb(cpu, current_tb, pc, true);
78
- cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
79
- &current_flags);
80
}
81
#endif /* TARGET_HAS_PRECISE_SMC */
82
tb_phys_invalidate(tb, addr);
83
--
84
2.34.1
85
86
diff view generated by jsdifflib
New patch
1
When we added the fast path, we initialized page_addr[] early.
2
These stores in and around tb_page_add() are redundant; remove them.
1
3
4
Fixes: 50627f1b7b1 ("accel/tcg: Add fast path for translator_ld*")
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
accel/tcg/tb-maint.c | 3 ---
9
1 file changed, 3 deletions(-)
10
11
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/tb-maint.c
14
+++ b/accel/tcg/tb-maint.c
15
@@ -XXX,XX +XXX,XX @@ static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
16
17
assert_page_locked(p);
18
19
- tb->page_addr[n] = page_addr;
20
tb->page_next[n] = p->first_tb;
21
#ifndef CONFIG_USER_ONLY
22
page_already_protected = p->first_tb != (uintptr_t)NULL;
23
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
24
tb_page_add(p, tb, 0, phys_pc);
25
if (p2) {
26
tb_page_add(p2, tb, 1, phys_page2);
27
- } else {
28
- tb->page_addr[1] = -1;
29
}
30
31
/* add in the hash table */
32
--
33
2.34.1
34
35
diff view generated by jsdifflib
New patch
1
1
This data structure will be replaced for user-only: add accessors.
2
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
include/exec/exec-all.h | 22 ++++++++++++++++++++++
7
accel/tcg/cpu-exec.c | 9 +++++----
8
accel/tcg/tb-maint.c | 29 +++++++++++++++--------------
9
accel/tcg/translate-all.c | 16 ++++++++--------
10
accel/tcg/translator.c | 9 +++++----
11
5 files changed, 55 insertions(+), 30 deletions(-)
12
13
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/include/exec/exec-all.h
16
+++ b/include/exec/exec-all.h
17
@@ -XXX,XX +XXX,XX @@ static inline uint32_t tb_cflags(const TranslationBlock *tb)
18
return qatomic_read(&tb->cflags);
19
}
20
21
+static inline tb_page_addr_t tb_page_addr0(const TranslationBlock *tb)
22
+{
23
+ return tb->page_addr[0];
24
+}
25
+
26
+static inline tb_page_addr_t tb_page_addr1(const TranslationBlock *tb)
27
+{
28
+ return tb->page_addr[1];
29
+}
30
+
31
+static inline void tb_set_page_addr0(TranslationBlock *tb,
32
+ tb_page_addr_t addr)
33
+{
34
+ tb->page_addr[0] = addr;
35
+}
36
+
37
+static inline void tb_set_page_addr1(TranslationBlock *tb,
38
+ tb_page_addr_t addr)
39
+{
40
+ tb->page_addr[1] = addr;
41
+}
42
+
43
/* current cflags for hashing/comparison */
44
uint32_t curr_cflags(CPUState *cpu);
45
46
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/accel/tcg/cpu-exec.c
49
+++ b/accel/tcg/cpu-exec.c
50
@@ -XXX,XX +XXX,XX @@ static bool tb_lookup_cmp(const void *p, const void *d)
51
const struct tb_desc *desc = d;
52
53
if ((TARGET_TB_PCREL || tb_pc(tb) == desc->pc) &&
54
- tb->page_addr[0] == desc->page_addr0 &&
55
+ tb_page_addr0(tb) == desc->page_addr0 &&
56
tb->cs_base == desc->cs_base &&
57
tb->flags == desc->flags &&
58
tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
59
tb_cflags(tb) == desc->cflags) {
60
/* check next page if needed */
61
- if (tb->page_addr[1] == -1) {
62
+ tb_page_addr_t tb_phys_page1 = tb_page_addr1(tb);
63
+ if (tb_phys_page1 == -1) {
64
return true;
65
} else {
66
tb_page_addr_t phys_page1;
67
@@ -XXX,XX +XXX,XX @@ static bool tb_lookup_cmp(const void *p, const void *d)
68
*/
69
virt_page1 = TARGET_PAGE_ALIGN(desc->pc);
70
phys_page1 = get_page_addr_code(desc->env, virt_page1);
71
- if (tb->page_addr[1] == phys_page1) {
72
+ if (tb_phys_page1 == phys_page1) {
73
return true;
74
}
75
}
76
@@ -XXX,XX +XXX,XX @@ int cpu_exec(CPUState *cpu)
77
* direct jump to a TB spanning two pages because the mapping
78
* for the second page can change.
79
*/
80
- if (tb->page_addr[1] != -1) {
81
+ if (tb_page_addr1(tb) != -1) {
82
last_tb = NULL;
83
}
84
#endif
85
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
86
index XXXXXXX..XXXXXXX 100644
87
--- a/accel/tcg/tb-maint.c
88
+++ b/accel/tcg/tb-maint.c
89
@@ -XXX,XX +XXX,XX @@ static bool tb_cmp(const void *ap, const void *bp)
90
a->flags == b->flags &&
91
(tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) &&
92
a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
93
- a->page_addr[0] == b->page_addr[0] &&
94
- a->page_addr[1] == b->page_addr[1]);
95
+ tb_page_addr0(a) == tb_page_addr0(b) &&
96
+ tb_page_addr1(a) == tb_page_addr1(b));
97
}
98
99
void tb_htable_init(void)
100
@@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
101
qemu_spin_unlock(&tb->jmp_lock);
102
103
/* remove the TB from the hash list */
104
- phys_pc = tb->page_addr[0];
105
+ phys_pc = tb_page_addr0(tb);
106
h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)),
107
tb->flags, orig_cflags, tb->trace_vcpu_dstate);
108
if (!qht_remove(&tb_ctx.htable, tb, h)) {
109
@@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
110
111
/* remove the TB from the page list */
112
if (rm_from_page_list) {
113
- p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
114
+ p = page_find(phys_pc >> TARGET_PAGE_BITS);
115
tb_page_remove(p, tb);
116
- if (tb->page_addr[1] != -1) {
117
- p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
118
+ phys_pc = tb_page_addr1(tb);
119
+ if (phys_pc != -1) {
120
+ p = page_find(phys_pc >> TARGET_PAGE_BITS);
121
tb_page_remove(p, tb);
122
}
123
}
124
@@ -XXX,XX +XXX,XX @@ static inline void page_unlock_tb(const TranslationBlock *tb) { }
125
/* lock the page(s) of a TB in the correct acquisition order */
126
static void page_lock_tb(const TranslationBlock *tb)
127
{
128
- page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], false);
129
+ page_lock_pair(NULL, tb_page_addr0(tb), NULL, tb_page_addr1(tb), false);
130
}
131
132
static void page_unlock_tb(const TranslationBlock *tb)
133
{
134
- PageDesc *p1 = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
135
+ PageDesc *p1 = page_find(tb_page_addr0(tb) >> TARGET_PAGE_BITS);
136
137
page_unlock(p1);
138
- if (unlikely(tb->page_addr[1] != -1)) {
139
- PageDesc *p2 = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
140
+ if (unlikely(tb_page_addr1(tb) != -1)) {
141
+ PageDesc *p2 = page_find(tb_page_addr1(tb) >> TARGET_PAGE_BITS);
142
143
if (p2 != p1) {
144
page_unlock(p2);
145
@@ -XXX,XX +XXX,XX @@ static void page_unlock_tb(const TranslationBlock *tb)
146
*/
147
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
148
{
149
- if (page_addr == -1 && tb->page_addr[0] != -1) {
150
+ if (page_addr == -1 && tb_page_addr0(tb) != -1) {
151
page_lock_tb(tb);
152
do_tb_phys_invalidate(tb, true);
153
page_unlock_tb(tb);
154
@@ -XXX,XX +XXX,XX @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
155
if (n == 0) {
156
/* NOTE: tb_end may be after the end of the page, but
157
it is not a problem */
158
- tb_start = tb->page_addr[0];
159
+ tb_start = tb_page_addr0(tb);
160
tb_end = tb_start + tb->size;
161
} else {
162
- tb_start = tb->page_addr[1];
163
- tb_end = tb_start + ((tb->page_addr[0] + tb->size)
164
+ tb_start = tb_page_addr1(tb);
165
+ tb_end = tb_start + ((tb_page_addr0(tb) + tb->size)
166
& ~TARGET_PAGE_MASK);
167
}
168
if (!(tb_end <= start || tb_start >= end)) {
169
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
170
index XXXXXXX..XXXXXXX 100644
171
--- a/accel/tcg/translate-all.c
172
+++ b/accel/tcg/translate-all.c
173
@@ -XXX,XX +XXX,XX @@ page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
174
}
175
assert_page_locked(pd);
176
PAGE_FOR_EACH_TB(pd, tb, n) {
177
- if (page_trylock_add(set, tb->page_addr[0]) ||
178
- (tb->page_addr[1] != -1 &&
179
- page_trylock_add(set, tb->page_addr[1]))) {
180
+ if (page_trylock_add(set, tb_page_addr0(tb)) ||
181
+ (tb_page_addr1(tb) != -1 &&
182
+ page_trylock_add(set, tb_page_addr1(tb)))) {
183
/* drop all locks, and reacquire in order */
184
g_tree_foreach(set->tree, page_entry_unlock, NULL);
185
goto retry;
186
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
187
tb->flags = flags;
188
tb->cflags = cflags;
189
tb->trace_vcpu_dstate = *cpu->trace_dstate;
190
- tb->page_addr[0] = phys_pc;
191
- tb->page_addr[1] = -1;
192
+ tb_set_page_addr0(tb, phys_pc);
193
+ tb_set_page_addr1(tb, -1);
194
tcg_ctx->tb_cflags = cflags;
195
tb_overflow:
196
197
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
198
* a temporary one-insn TB, and we have nothing left to do. Return early
199
* before attempting to link to other TBs or add to the lookup table.
200
*/
201
- if (tb->page_addr[0] == -1) {
202
+ if (tb_page_addr0(tb) == -1) {
203
return tb;
204
}
205
206
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
207
* No explicit memory barrier is required -- tb_link_page() makes the
208
* TB visible in a consistent state.
209
*/
210
- existing_tb = tb_link_page(tb, tb->page_addr[0], tb->page_addr[1]);
211
+ existing_tb = tb_link_page(tb, tb_page_addr0(tb), tb_page_addr1(tb));
212
/* if the TB already exists, discard what we just translated */
213
if (unlikely(existing_tb != tb)) {
214
uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
215
@@ -XXX,XX +XXX,XX @@ static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
216
if (tb->size > tst->max_target_size) {
217
tst->max_target_size = tb->size;
218
}
219
- if (tb->page_addr[1] != -1) {
220
+ if (tb_page_addr1(tb) != -1) {
221
tst->cross_page++;
222
}
223
if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
224
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
225
index XXXXXXX..XXXXXXX 100644
226
--- a/accel/tcg/translator.c
227
+++ b/accel/tcg/translator.c
228
@@ -XXX,XX +XXX,XX @@ static void *translator_access(CPUArchState *env, DisasContextBase *db,
229
tb = db->tb;
230
231
/* Use slow path if first page is MMIO. */
232
- if (unlikely(tb->page_addr[0] == -1)) {
233
+ if (unlikely(tb_page_addr0(tb) == -1)) {
234
return NULL;
235
}
236
237
@@ -XXX,XX +XXX,XX @@ static void *translator_access(CPUArchState *env, DisasContextBase *db,
238
host = db->host_addr[1];
239
base = TARGET_PAGE_ALIGN(db->pc_first);
240
if (host == NULL) {
241
- tb->page_addr[1] =
242
+ tb_page_addr_t phys_page =
243
get_page_addr_code_hostp(env, base, &db->host_addr[1]);
244
+ /* We cannot handle MMIO as second page. */
245
+ assert(phys_page != -1);
246
+ tb_set_page_addr1(tb, phys_page);
247
#ifdef CONFIG_USER_ONLY
248
page_protect(end);
249
#endif
250
- /* We cannot handle MMIO as second page. */
251
- assert(tb->page_addr[1] != -1);
252
host = db->host_addr[1];
253
}
254
255
--
256
2.34.1
257
258
diff view generated by jsdifflib
New patch
1
Rename to tb_invalidate_phys_page_unwind to emphasize that
2
we also detect invalidating the current TB, and also to free
3
up that name for other usage.
1
4
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
accel/tcg/internal.h | 2 +-
9
accel/tcg/tb-maint.c | 2 +-
10
accel/tcg/translate-all.c | 5 +++--
11
3 files changed, 5 insertions(+), 4 deletions(-)
12
13
diff --git a/accel/tcg/internal.h b/accel/tcg/internal.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/accel/tcg/internal.h
16
+++ b/accel/tcg/internal.h
17
@@ -XXX,XX +XXX,XX @@ void tb_htable_init(void);
18
void tb_reset_jump(TranslationBlock *tb, int n);
19
TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
20
tb_page_addr_t phys_page2);
21
-bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc);
22
+bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc);
23
int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
24
uintptr_t searched_pc, bool reset_icount);
25
26
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
27
index XXXXXXX..XXXXXXX 100644
28
--- a/accel/tcg/tb-maint.c
29
+++ b/accel/tcg/tb-maint.c
30
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_page_fast(struct page_collection *pages,
31
* TB (because it was modified by this store and the guest CPU has
32
* precise-SMC semantics).
33
*/
34
-bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
35
+bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
36
{
37
TranslationBlock *tb;
38
PageDesc *p;
39
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/accel/tcg/translate-all.c
42
+++ b/accel/tcg/translate-all.c
43
@@ -XXX,XX +XXX,XX @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
44
if (!(p->flags & PAGE_WRITE) &&
45
(flags & PAGE_WRITE) &&
46
p->first_tb) {
47
- tb_invalidate_phys_page(addr, 0);
48
+ tb_invalidate_phys_page_unwind(addr, 0);
49
}
50
if (reset_target_data) {
51
g_free(p->target_data);
52
@@ -XXX,XX +XXX,XX @@ int page_unprotect(target_ulong address, uintptr_t pc)
53
54
/* and since the content will be modified, we must invalidate
55
the corresponding translated code. */
56
- current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
57
+ current_tb_invalidated |=
58
+ tb_invalidate_phys_page_unwind(addr, pc);
59
}
60
mprotect((void *)g2h_untagged(host_start), qemu_host_page_size,
61
prot & PAGE_BITS);
62
--
63
2.34.1
64
65
diff view generated by jsdifflib
1
There is no point in coding for a 2GB offset when the max TB size
1
This function is is never called with a real range,
2
is already limited to 64k. If we further restrict to 32k then we
2
only for a single page. Drop the second parameter
3
can eliminate the extra ADDIS instruction.
3
and rename to tb_invalidate_phys_page.
4
4
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
---
7
tcg/ppc/tcg-target.inc.c | 28 ++++++++++------------------
8
include/exec/translate-all.h | 2 +-
8
1 file changed, 10 insertions(+), 18 deletions(-)
9
accel/tcg/tb-maint.c | 15 ++++++++-------
10
cpu.c | 4 ++--
11
3 files changed, 11 insertions(+), 10 deletions(-)
9
12
10
diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c
13
diff --git a/include/exec/translate-all.h b/include/exec/translate-all.h
11
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/ppc/tcg-target.inc.c
15
--- a/include/exec/translate-all.h
13
+++ b/tcg/ppc/tcg-target.inc.c
16
+++ b/include/exec/translate-all.h
14
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
17
@@ -XXX,XX +XXX,XX @@ void page_collection_unlock(struct page_collection *set);
15
intptr_t value, intptr_t addend)
18
void tb_invalidate_phys_page_fast(struct page_collection *pages,
19
tb_page_addr_t start, int len,
20
uintptr_t retaddr);
21
-void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end);
22
+void tb_invalidate_phys_page(tb_page_addr_t addr);
23
void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr);
24
25
#ifdef CONFIG_USER_ONLY
26
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
27
index XXXXXXX..XXXXXXX 100644
28
--- a/accel/tcg/tb-maint.c
29
+++ b/accel/tcg/tb-maint.c
30
@@ -XXX,XX +XXX,XX @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
31
}
32
33
/*
34
- * Invalidate all TBs which intersect with the target physical address range
35
- * [start;end[. NOTE: start and end must refer to the *same* physical page.
36
- * 'is_cpu_write_access' should be true if called from a real cpu write
37
- * access: the virtual CPU will exit the current TB if code is modified inside
38
- * this TB.
39
+ * Invalidate all TBs which intersect with the target physical
40
+ * address page @addr.
41
*
42
* Called with mmap_lock held for user-mode emulation
43
*/
44
-void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end)
45
+void tb_invalidate_phys_page(tb_page_addr_t addr)
16
{
46
{
17
tcg_insn_unit *target;
47
struct page_collection *pages;
18
- tcg_insn_unit old;
48
+ tb_page_addr_t start, end;
19
49
PageDesc *p;
20
value += addend;
50
21
target = (tcg_insn_unit *)value;
51
assert_memory_lock();
22
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
52
23
case R_PPC_REL24:
53
- p = page_find(start >> TARGET_PAGE_BITS);
24
return reloc_pc24(code_ptr, target);
54
+ p = page_find(addr >> TARGET_PAGE_BITS);
25
case R_PPC_ADDR16:
55
if (p == NULL) {
26
- /* We are abusing this relocation type. This points to a pair
27
- of insns, addis + load. If the displacement is small, we
28
- can nop out the addis. */
29
- if (value == (int16_t)value) {
30
- code_ptr[0] = NOP;
31
- old = deposit32(code_ptr[1], 0, 16, value);
32
- code_ptr[1] = deposit32(old, 16, 5, TCG_REG_TB);
33
- } else {
34
- int16_t lo = value;
35
- int hi = value - lo;
36
- if (hi + lo != value) {
37
- return false;
38
- }
39
- code_ptr[0] = deposit32(code_ptr[0], 0, 16, hi >> 16);
40
- code_ptr[1] = deposit32(code_ptr[1], 0, 16, lo);
41
+ /*
42
+ * We are (slightly) abusing this relocation type. In particular,
43
+ * assert that the low 2 bits are zero, and do not modify them.
44
+ * That way we can use this with LD et al that have opcode bits
45
+ * in the low 2 bits of the insn.
46
+ */
47
+ if ((value & 3) || value != (int16_t)value) {
48
+ return false;
49
}
50
+ *code_ptr = (*code_ptr & ~0xfffc) | (value & 0xfffc);
51
break;
52
default:
53
g_assert_not_reached();
54
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
55
if (!in_prologue && USE_REG_TB) {
56
new_pool_label(s, arg, R_PPC_ADDR16, s->code_ptr,
57
-(intptr_t)s->code_gen_ptr);
58
- tcg_out32(s, ADDIS | TAI(ret, TCG_REG_TB, 0));
59
- tcg_out32(s, LD | TAI(ret, ret, 0));
60
+ tcg_out32(s, LD | TAI(ret, TCG_REG_TB, 0));
61
return;
56
return;
62
}
57
}
58
+
59
+ start = addr & TARGET_PAGE_MASK;
60
+ end = start + TARGET_PAGE_SIZE;
61
pages = page_collection_lock(start, end);
62
tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
63
page_collection_unlock(pages);
64
diff --git a/cpu.c b/cpu.c
65
index XXXXXXX..XXXXXXX 100644
66
--- a/cpu.c
67
+++ b/cpu.c
68
@@ -XXX,XX +XXX,XX @@ void list_cpus(const char *optarg)
69
void tb_invalidate_phys_addr(target_ulong addr)
70
{
71
mmap_lock();
72
- tb_invalidate_phys_page_range(addr, addr + 1);
73
+ tb_invalidate_phys_page(addr);
74
mmap_unlock();
75
}
76
#else
77
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs)
78
return;
79
}
80
ram_addr = memory_region_get_ram_addr(mr) + addr;
81
- tb_invalidate_phys_page_range(ram_addr, ram_addr + 1);
82
+ tb_invalidate_phys_page(ram_addr);
83
}
84
#endif
63
85
64
--
86
--
65
2.17.1
87
2.34.1
66
88
67
89
diff view generated by jsdifflib
New patch
1
We missed this function when we introduced tb_page_addr_t.
1
2
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
include/exec/exec-all.h | 2 +-
7
include/exec/ram_addr.h | 2 --
8
accel/tcg/tb-maint.c | 13 ++-----------
9
3 files changed, 3 insertions(+), 14 deletions(-)
10
11
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/include/exec/exec-all.h
14
+++ b/include/exec/exec-all.h
15
@@ -XXX,XX +XXX,XX @@ uint32_t curr_cflags(CPUState *cpu);
16
/* TranslationBlock invalidate API */
17
#if defined(CONFIG_USER_ONLY)
18
void tb_invalidate_phys_addr(target_ulong addr);
19
-void tb_invalidate_phys_range(target_ulong start, target_ulong end);
20
#else
21
void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
22
#endif
23
void tb_flush(CPUState *cpu);
24
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
25
+void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end);
26
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
27
28
/* GETPC is the true target of the return instruction that we'll execute. */
29
diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
30
index XXXXXXX..XXXXXXX 100644
31
--- a/include/exec/ram_addr.h
32
+++ b/include/exec/ram_addr.h
33
@@ -XXX,XX +XXX,XX @@ static inline void qemu_ram_block_writeback(RAMBlock *block)
34
#define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1)
35
#define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
36
37
-void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end);
38
-
39
static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
40
ram_addr_t length,
41
unsigned client)
42
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
43
index XXXXXXX..XXXXXXX 100644
44
--- a/accel/tcg/tb-maint.c
45
+++ b/accel/tcg/tb-maint.c
46
@@ -XXX,XX +XXX,XX @@
47
#include "qemu/osdep.h"
48
#include "exec/cputlb.h"
49
#include "exec/log.h"
50
+#include "exec/exec-all.h"
51
#include "exec/translate-all.h"
52
#include "sysemu/tcg.h"
53
#include "tcg/tcg.h"
54
@@ -XXX,XX +XXX,XX @@
55
#include "tb-context.h"
56
#include "internal.h"
57
58
-/* FIXME: tb_invalidate_phys_range is declared in different places. */
59
-#ifdef CONFIG_USER_ONLY
60
-#include "exec/exec-all.h"
61
-#else
62
-#include "exec/ram_addr.h"
63
-#endif
64
65
static bool tb_cmp(const void *ap, const void *bp)
66
{
67
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_page(tb_page_addr_t addr)
68
*
69
* Called with mmap_lock held for user-mode emulation.
70
*/
71
-#ifdef CONFIG_SOFTMMU
72
-void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end)
73
-#else
74
-void tb_invalidate_phys_range(target_ulong start, target_ulong end)
75
-#endif
76
+void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
77
{
78
struct page_collection *pages;
79
tb_page_addr_t next;
80
--
81
2.34.1
82
83
diff view generated by jsdifflib
New patch
1
We do not require detection of overlapping TBs here,
2
so use the more appropriate function.
1
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
accel/tcg/translate-all.c | 2 +-
8
1 file changed, 1 insertion(+), 1 deletion(-)
9
10
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/accel/tcg/translate-all.c
13
+++ b/accel/tcg/translate-all.c
14
@@ -XXX,XX +XXX,XX @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
15
if (!(p->flags & PAGE_WRITE) &&
16
(flags & PAGE_WRITE) &&
17
p->first_tb) {
18
- tb_invalidate_phys_page_unwind(addr, 0);
19
+ tb_invalidate_phys_page(addr);
20
}
21
if (reset_target_data) {
22
g_free(p->target_data);
23
--
24
2.34.1
25
26
diff view generated by jsdifflib
New patch
1
When PAGE_RESET is set, we are replacing pages with new
2
content, which means that we need to invalidate existing
3
cached data, such as TranslationBlocks. Perform the
4
reset invalidate while we're doing other invalidates,
5
which allows us to remove the separate invalidates from
6
the user-only mmap/munmap/mprotect routines.
1
7
8
In addition, restrict invalidation to PAGE_EXEC pages.
9
Since cdf713085131, we have validated PAGE_EXEC is present
10
before translation, which means we can assume that if the
11
bit is not present, there are no translations to invalidate.
12
13
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
---
16
accel/tcg/translate-all.c | 19 +++++++++++--------
17
bsd-user/mmap.c | 2 --
18
linux-user/mmap.c | 4 ----
19
3 files changed, 11 insertions(+), 14 deletions(-)
20
21
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
22
index XXXXXXX..XXXXXXX 100644
23
--- a/accel/tcg/translate-all.c
24
+++ b/accel/tcg/translate-all.c
25
@@ -XXX,XX +XXX,XX @@ int page_get_flags(target_ulong address)
26
void page_set_flags(target_ulong start, target_ulong end, int flags)
27
{
28
target_ulong addr, len;
29
- bool reset_target_data;
30
+ bool reset;
31
32
/* This function should never be called with addresses outside the
33
guest address space. If this assert fires, it probably indicates
34
@@ -XXX,XX +XXX,XX @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
35
if (flags & PAGE_WRITE) {
36
flags |= PAGE_WRITE_ORG;
37
}
38
- reset_target_data = !(flags & PAGE_VALID) || (flags & PAGE_RESET);
39
+ reset = !(flags & PAGE_VALID) || (flags & PAGE_RESET);
40
flags &= ~PAGE_RESET;
41
42
for (addr = start, len = end - start;
43
@@ -XXX,XX +XXX,XX @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
44
len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
45
PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, true);
46
47
- /* If the write protection bit is set, then we invalidate
48
- the code inside. */
49
- if (!(p->flags & PAGE_WRITE) &&
50
- (flags & PAGE_WRITE) &&
51
- p->first_tb) {
52
+ /*
53
+ * If the page was executable, but is reset, or is no longer
54
+ * executable, or has become writable, then invalidate any code.
55
+ */
56
+ if ((p->flags & PAGE_EXEC)
57
+ && (reset ||
58
+ !(flags & PAGE_EXEC) ||
59
+ (flags & ~p->flags & PAGE_WRITE))) {
60
tb_invalidate_phys_page(addr);
61
}
62
- if (reset_target_data) {
63
+ if (reset) {
64
g_free(p->target_data);
65
p->target_data = NULL;
66
p->flags = flags;
67
diff --git a/bsd-user/mmap.c b/bsd-user/mmap.c
68
index XXXXXXX..XXXXXXX 100644
69
--- a/bsd-user/mmap.c
70
+++ b/bsd-user/mmap.c
71
@@ -XXX,XX +XXX,XX @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
72
page_dump(stdout);
73
printf("\n");
74
#endif
75
- tb_invalidate_phys_range(start, start + len);
76
mmap_unlock();
77
return start;
78
fail:
79
@@ -XXX,XX +XXX,XX @@ int target_munmap(abi_ulong start, abi_ulong len)
80
81
if (ret == 0) {
82
page_set_flags(start, start + len, 0);
83
- tb_invalidate_phys_range(start, start + len);
84
}
85
mmap_unlock();
86
return ret;
87
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
88
index XXXXXXX..XXXXXXX 100644
89
--- a/linux-user/mmap.c
90
+++ b/linux-user/mmap.c
91
@@ -XXX,XX +XXX,XX @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
92
}
93
94
page_set_flags(start, start + len, page_flags);
95
- tb_invalidate_phys_range(start, start + len);
96
ret = 0;
97
98
error:
99
@@ -XXX,XX +XXX,XX @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
100
qemu_log_unlock(f);
101
}
102
}
103
- tb_invalidate_phys_range(start, start + len);
104
mmap_unlock();
105
return start;
106
fail:
107
@@ -XXX,XX +XXX,XX @@ int target_munmap(abi_ulong start, abi_ulong len)
108
109
if (ret == 0) {
110
page_set_flags(start, start + len, 0);
111
- tb_invalidate_phys_range(start, start + len);
112
}
113
mmap_unlock();
114
return ret;
115
@@ -XXX,XX +XXX,XX @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
116
page_set_flags(new_addr, new_addr + new_size,
117
prot | PAGE_VALID | PAGE_RESET);
118
}
119
- tb_invalidate_phys_range(new_addr, new_addr + new_size);
120
mmap_unlock();
121
return new_addr;
122
}
123
--
124
2.34.1
125
126
diff view generated by jsdifflib
1
Use the existing function for clearing target data.
2
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
---
5
---
3
tcg/tcg-op.c | 47 ++++++++++++++++++++++++-----------------------
6
accel/tcg/translate-all.c | 13 +++++--------
4
1 file changed, 24 insertions(+), 23 deletions(-)
7
1 file changed, 5 insertions(+), 8 deletions(-)
5
8
6
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
9
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
7
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
8
--- a/tcg/tcg-op.c
11
--- a/accel/tcg/translate-all.c
9
+++ b/tcg/tcg-op.c
12
+++ b/accel/tcg/translate-all.c
10
@@ -XXX,XX +XXX,XX @@ static inline void tcg_gen_shifti_i64(TCGv_i64 ret, TCGv_i64 arg1,
13
@@ -XXX,XX +XXX,XX @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
11
tcg_gen_shli_i32(TCGV_HIGH(ret), TCGV_LOW(arg1), c);
14
flags |= PAGE_WRITE_ORG;
12
tcg_gen_movi_i32(TCGV_LOW(ret), 0);
15
}
16
reset = !(flags & PAGE_VALID) || (flags & PAGE_RESET);
17
+ if (reset) {
18
+ page_reset_target_data(start, end);
19
+ }
20
flags &= ~PAGE_RESET;
21
22
for (addr = start, len = end - start;
23
@@ -XXX,XX +XXX,XX @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
24
(flags & ~p->flags & PAGE_WRITE))) {
25
tb_invalidate_phys_page(addr);
13
}
26
}
14
- } else {
27
- if (reset) {
15
- TCGv_i32 t0, t1;
28
- g_free(p->target_data);
16
-
29
- p->target_data = NULL;
17
- t0 = tcg_temp_new_i32();
30
- p->flags = flags;
18
- t1 = tcg_temp_new_i32();
31
- } else {
19
- if (right) {
32
- /* Using mprotect on a page does not change sticky bits. */
20
- tcg_gen_shli_i32(t0, TCGV_HIGH(arg1), 32 - c);
33
- p->flags = (p->flags & PAGE_STICKY) | flags;
21
- if (arith) {
34
- }
22
- tcg_gen_sari_i32(t1, TCGV_HIGH(arg1), c);
35
+ /* Using mprotect on a page does not change sticky bits. */
23
- } else {
36
+ p->flags = (reset ? 0 : p->flags & PAGE_STICKY) | flags;
24
- tcg_gen_shri_i32(t1, TCGV_HIGH(arg1), c);
25
- }
26
- tcg_gen_shri_i32(TCGV_LOW(ret), TCGV_LOW(arg1), c);
27
- tcg_gen_or_i32(TCGV_LOW(ret), TCGV_LOW(ret), t0);
28
- tcg_gen_mov_i32(TCGV_HIGH(ret), t1);
29
+ } else if (right) {
30
+ if (TCG_TARGET_HAS_extract2_i32) {
31
+ tcg_gen_extract2_i32(TCGV_LOW(ret),
32
+ TCGV_LOW(arg1), TCGV_HIGH(arg1), c);
33
} else {
34
- tcg_gen_shri_i32(t0, TCGV_LOW(arg1), 32 - c);
35
- /* Note: ret can be the same as arg1, so we use t1 */
36
- tcg_gen_shli_i32(t1, TCGV_LOW(arg1), c);
37
- tcg_gen_shli_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), c);
38
- tcg_gen_or_i32(TCGV_HIGH(ret), TCGV_HIGH(ret), t0);
39
- tcg_gen_mov_i32(TCGV_LOW(ret), t1);
40
+ tcg_gen_shri_i32(TCGV_LOW(ret), TCGV_LOW(arg1), c);
41
+ tcg_gen_deposit_i32(TCGV_LOW(ret), TCGV_LOW(ret),
42
+ TCGV_HIGH(arg1), 32 - c, c);
43
}
44
- tcg_temp_free_i32(t0);
45
- tcg_temp_free_i32(t1);
46
+ if (arith) {
47
+ tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), c);
48
+ } else {
49
+ tcg_gen_shri_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), c);
50
+ }
51
+ } else {
52
+ if (TCG_TARGET_HAS_extract2_i32) {
53
+ tcg_gen_extract2_i32(TCGV_HIGH(ret),
54
+ TCGV_LOW(arg1), TCGV_HIGH(arg1), 32 - c);
55
+ } else {
56
+ TCGv_i32 t0 = tcg_temp_new_i32();
57
+ tcg_gen_shri_i32(t0, TCGV_LOW(arg1), 32 - c);
58
+ tcg_gen_deposit_i32(TCGV_HIGH(ret), t0,
59
+ TCGV_HIGH(arg1), c, 32 - c);
60
+ tcg_temp_free_i32(t0);
61
+ }
62
+ tcg_gen_shli_i32(TCGV_LOW(ret), TCGV_LOW(arg1), c);
63
}
37
}
64
}
38
}
65
39
66
--
40
--
67
2.17.1
41
2.34.1
68
42
69
43
diff view generated by jsdifflib
1
If a TB generates too much code, try again with fewer insns.
1
Flush translation blocks in bulk, rather than page-by-page.
2
2
3
Fixes: https://bugs.launchpad.net/bugs/1824853
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
accel/tcg/translate-all.c | 38 ++++++++++++++++++++++++++++++++------
6
accel/tcg/translate-all.c | 8 ++++++--
8
tcg/tcg.c | 4 ++++
7
1 file changed, 6 insertions(+), 2 deletions(-)
9
2 files changed, 36 insertions(+), 6 deletions(-)
10
8
11
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
9
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
12
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/translate-all.c
11
--- a/accel/tcg/translate-all.c
14
+++ b/accel/tcg/translate-all.c
12
+++ b/accel/tcg/translate-all.c
15
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
13
@@ -XXX,XX +XXX,XX @@ int page_get_flags(target_ulong address)
16
tb->cflags = cflags;
14
void page_set_flags(target_ulong start, target_ulong end, int flags)
17
tb->trace_vcpu_dstate = *cpu->trace_dstate;
15
{
18
tcg_ctx->tb_cflags = cflags;
16
target_ulong addr, len;
19
+ tb_overflow:
17
- bool reset;
20
18
+ bool reset, inval_tb = false;
21
#ifdef CONFIG_PROFILER
19
22
/* includes aborted translations because of exceptions */
20
/* This function should never be called with addresses outside the
23
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
21
guest address space. If this assert fires, it probably indicates
24
ti = profile_getclock();
22
@@ -XXX,XX +XXX,XX @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
25
#endif
23
&& (reset ||
26
24
!(flags & PAGE_EXEC) ||
27
- /* ??? Overflow could be handled better here. In particular, we
25
(flags & ~p->flags & PAGE_WRITE))) {
28
- don't need to re-do gen_intermediate_code, nor should we re-do
26
- tb_invalidate_phys_page(addr);
29
- the tcg optimization currently hidden inside tcg_gen_code. All
27
+ inval_tb = true;
30
- that should be required is to flush the TBs, allocate a new TB,
28
}
31
- re-initialize it per above, and re-do the actual code generation. */
29
/* Using mprotect on a page does not change sticky bits. */
32
gen_code_size = tcg_gen_code(tcg_ctx, tb);
30
p->flags = (reset ? 0 : p->flags & PAGE_STICKY) | flags;
33
if (unlikely(gen_code_size < 0)) {
31
}
34
- goto buffer_overflow;
35
+ switch (gen_code_size) {
36
+ case -1:
37
+ /*
38
+ * Overflow of code_gen_buffer, or the current slice of it.
39
+ *
40
+ * TODO: We don't need to re-do gen_intermediate_code, nor
41
+ * should we re-do the tcg optimization currently hidden
42
+ * inside tcg_gen_code. All that should be required is to
43
+ * flush the TBs, allocate a new TB, re-initialize it per
44
+ * above, and re-do the actual code generation.
45
+ */
46
+ goto buffer_overflow;
47
+
32
+
48
+ case -2:
33
+ if (inval_tb) {
49
+ /*
34
+ tb_invalidate_phys_range(start, end);
50
+ * The code generated for the TranslationBlock is too large.
35
+ }
51
+ * The maximum size allowed by the unwind info is 64k.
36
}
52
+ * There may be stricter constraints from relocations
37
53
+ * in the tcg backend.
38
void page_reset_target_data(target_ulong start, target_ulong end)
54
+ *
55
+ * Try again with half as many insns as we attempted this time.
56
+ * If a single insn overflows, there's a bug somewhere...
57
+ */
58
+ max_insns = tb->icount;
59
+ assert(max_insns > 1);
60
+ max_insns /= 2;
61
+ goto tb_overflow;
62
+
63
+ default:
64
+ g_assert_not_reached();
65
+ }
66
}
67
search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
68
if (unlikely(search_size < 0)) {
69
diff --git a/tcg/tcg.c b/tcg/tcg.c
70
index XXXXXXX..XXXXXXX 100644
71
--- a/tcg/tcg.c
72
+++ b/tcg/tcg.c
73
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
74
if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
75
return -1;
76
}
77
+ /* Test for TB overflow, as seen by gen_insn_end_off. */
78
+ if (unlikely(tcg_current_code_size(s) > UINT16_MAX)) {
79
+ return -2;
80
+ }
81
}
82
tcg_debug_assert(num_insns >= 0);
83
s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
84
--
39
--
85
2.17.1
40
2.34.1
86
41
87
42
diff view generated by jsdifflib
1
This is part c of relocation overflow handling.
1
Since "target data" is always user-only, move it out of
2
translate-all.c to user-exec.c.
2
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
6
---
5
tcg/aarch64/tcg-target.inc.c | 16 ++++++++++------
7
accel/tcg/translate-all.c | 50 ---------------------------------------
6
tcg/arm/tcg-target.inc.c | 16 ++++++++++------
8
accel/tcg/user-exec.c | 50 +++++++++++++++++++++++++++++++++++++++
7
tcg/i386/tcg-target.inc.c | 6 ++++--
9
2 files changed, 50 insertions(+), 50 deletions(-)
8
tcg/mips/tcg-target.inc.c | 6 ++++--
9
tcg/ppc/tcg-target.inc.c | 14 ++++++++++----
10
tcg/riscv/tcg-target.inc.c | 16 ++++++++++++----
11
tcg/s390/tcg-target.inc.c | 20 ++++++++++++--------
12
tcg/tcg-ldst.inc.c | 18 +++++++++---------
13
tcg/tcg.c | 7 ++++---
14
9 files changed, 75 insertions(+), 44 deletions(-)
15
10
16
diff --git a/tcg/aarch64/tcg-target.inc.c b/tcg/aarch64/tcg-target.inc.c
11
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
17
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
18
--- a/tcg/aarch64/tcg-target.inc.c
13
--- a/accel/tcg/translate-all.c
19
+++ b/tcg/aarch64/tcg-target.inc.c
14
+++ b/accel/tcg/translate-all.c
20
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_adr(TCGContext *s, TCGReg rd, void *target)
15
@@ -XXX,XX +XXX,XX @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
21
tcg_out_insn(s, 3406, ADR, rd, offset);
22
}
23
24
-static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
25
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
26
{
27
TCGMemOpIdx oi = lb->oi;
28
TCGMemOp opc = get_memop(oi);
29
TCGMemOp size = opc & MO_SIZE;
30
31
- bool ok = reloc_pc19(lb->label_ptr[0], s->code_ptr);
32
- tcg_debug_assert(ok);
33
+ if (!reloc_pc19(lb->label_ptr[0], s->code_ptr)) {
34
+ return false;
35
+ }
36
37
tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_X0, TCG_AREG0);
38
tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg);
39
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
40
}
41
42
tcg_out_goto(s, lb->raddr);
43
+ return true;
44
}
45
46
-static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
47
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
48
{
49
TCGMemOpIdx oi = lb->oi;
50
TCGMemOp opc = get_memop(oi);
51
TCGMemOp size = opc & MO_SIZE;
52
53
- bool ok = reloc_pc19(lb->label_ptr[0], s->code_ptr);
54
- tcg_debug_assert(ok);
55
+ if (!reloc_pc19(lb->label_ptr[0], s->code_ptr)) {
56
+ return false;
57
+ }
58
59
tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_X0, TCG_AREG0);
60
tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg);
61
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
62
tcg_out_adr(s, TCG_REG_X4, lb->raddr);
63
tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
64
tcg_out_goto(s, lb->raddr);
65
+ return true;
66
}
67
68
static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
69
diff --git a/tcg/arm/tcg-target.inc.c b/tcg/arm/tcg-target.inc.c
70
index XXXXXXX..XXXXXXX 100644
71
--- a/tcg/arm/tcg-target.inc.c
72
+++ b/tcg/arm/tcg-target.inc.c
73
@@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
74
label->label_ptr[0] = label_ptr;
75
}
76
77
-static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
78
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
79
{
80
TCGReg argreg, datalo, datahi;
81
TCGMemOpIdx oi = lb->oi;
82
TCGMemOp opc = get_memop(oi);
83
void *func;
84
85
- bool ok = reloc_pc24(lb->label_ptr[0], s->code_ptr);
86
- tcg_debug_assert(ok);
87
+ if (!reloc_pc24(lb->label_ptr[0], s->code_ptr)) {
88
+ return false;
89
+ }
90
91
argreg = tcg_out_arg_reg32(s, TCG_REG_R0, TCG_AREG0);
92
if (TARGET_LONG_BITS == 64) {
93
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
94
}
95
96
tcg_out_goto(s, COND_AL, lb->raddr);
97
+ return true;
98
}
99
100
-static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
101
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
102
{
103
TCGReg argreg, datalo, datahi;
104
TCGMemOpIdx oi = lb->oi;
105
TCGMemOp opc = get_memop(oi);
106
107
- bool ok = reloc_pc24(lb->label_ptr[0], s->code_ptr);
108
- tcg_debug_assert(ok);
109
+ if (!reloc_pc24(lb->label_ptr[0], s->code_ptr)) {
110
+ return false;
111
+ }
112
113
argreg = TCG_REG_R0;
114
argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0);
115
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
116
117
/* Tail-call to the helper, which will return to the fast path. */
118
tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
119
+ return true;
120
}
121
#endif /* SOFTMMU */
122
123
diff --git a/tcg/i386/tcg-target.inc.c b/tcg/i386/tcg-target.inc.c
124
index XXXXXXX..XXXXXXX 100644
125
--- a/tcg/i386/tcg-target.inc.c
126
+++ b/tcg/i386/tcg-target.inc.c
127
@@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64,
128
/*
129
* Generate code for the slow path for a load at the end of block
130
*/
131
-static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
132
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
133
{
134
TCGMemOpIdx oi = l->oi;
135
TCGMemOp opc = get_memop(oi);
136
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
137
138
/* Jump to the code corresponding to next IR of qemu_st */
139
tcg_out_jmp(s, l->raddr);
140
+ return true;
141
}
142
143
/*
144
* Generate code for the slow path for a store at the end of block
145
*/
146
-static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
147
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
148
{
149
TCGMemOpIdx oi = l->oi;
150
TCGMemOp opc = get_memop(oi);
151
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
152
/* "Tail call" to the helper, with the return address back inline. */
153
tcg_out_push(s, retaddr);
154
tcg_out_jmp(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
155
+ return true;
156
}
157
#elif TCG_TARGET_REG_BITS == 32
158
# define x86_guest_base_seg 0
159
diff --git a/tcg/mips/tcg-target.inc.c b/tcg/mips/tcg-target.inc.c
160
index XXXXXXX..XXXXXXX 100644
161
--- a/tcg/mips/tcg-target.inc.c
162
+++ b/tcg/mips/tcg-target.inc.c
163
@@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi,
164
}
16
}
165
}
17
}
166
18
167
-static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
19
-void page_reset_target_data(target_ulong start, target_ulong end)
168
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
20
-{
21
-#ifdef TARGET_PAGE_DATA_SIZE
22
- target_ulong addr, len;
23
-
24
- /*
25
- * This function should never be called with addresses outside the
26
- * guest address space. If this assert fires, it probably indicates
27
- * a missing call to h2g_valid.
28
- */
29
- assert(end - 1 <= GUEST_ADDR_MAX);
30
- assert(start < end);
31
- assert_memory_lock();
32
-
33
- start = start & TARGET_PAGE_MASK;
34
- end = TARGET_PAGE_ALIGN(end);
35
-
36
- for (addr = start, len = end - start;
37
- len != 0;
38
- len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
39
- PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
40
-
41
- g_free(p->target_data);
42
- p->target_data = NULL;
43
- }
44
-#endif
45
-}
46
-
47
-#ifdef TARGET_PAGE_DATA_SIZE
48
-void *page_get_target_data(target_ulong address)
49
-{
50
- PageDesc *p = page_find(address >> TARGET_PAGE_BITS);
51
- return p ? p->target_data : NULL;
52
-}
53
-
54
-void *page_alloc_target_data(target_ulong address)
55
-{
56
- PageDesc *p = page_find(address >> TARGET_PAGE_BITS);
57
- void *ret = NULL;
58
-
59
- if (p->flags & PAGE_VALID) {
60
- ret = p->target_data;
61
- if (!ret) {
62
- p->target_data = ret = g_malloc0(TARGET_PAGE_DATA_SIZE);
63
- }
64
- }
65
- return ret;
66
-}
67
-#endif /* TARGET_PAGE_DATA_SIZE */
68
-
69
int page_check_range(target_ulong start, target_ulong len, int flags)
169
{
70
{
170
TCGMemOpIdx oi = l->oi;
71
PageDesc *p;
171
TCGMemOp opc = get_memop(oi);
72
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
172
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
73
index XXXXXXX..XXXXXXX 100644
173
} else {
74
--- a/accel/tcg/user-exec.c
174
tcg_out_opc_reg(s, OPC_OR, v0, TCG_REG_V0, TCG_REG_ZERO);
75
+++ b/accel/tcg/user-exec.c
175
}
76
@@ -XXX,XX +XXX,XX @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
176
+ return true;
77
return addr;
177
}
78
}
178
79
179
-static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
80
+void page_reset_target_data(target_ulong start, target_ulong end)
180
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
81
+{
181
{
82
+#ifdef TARGET_PAGE_DATA_SIZE
182
TCGMemOpIdx oi = l->oi;
83
+ target_ulong addr, len;
183
TCGMemOp opc = get_memop(oi);
84
+
184
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
85
+ /*
185
tcg_out_call_int(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)], true);
86
+ * This function should never be called with addresses outside the
186
/* delay slot */
87
+ * guest address space. If this assert fires, it probably indicates
187
tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
88
+ * a missing call to h2g_valid.
188
+ return true;
89
+ */
189
}
90
+ assert(end - 1 <= GUEST_ADDR_MAX);
190
#endif
91
+ assert(start < end);
191
92
+ assert_memory_lock();
192
diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c
93
+
193
index XXXXXXX..XXXXXXX 100644
94
+ start = start & TARGET_PAGE_MASK;
194
--- a/tcg/ppc/tcg-target.inc.c
95
+ end = TARGET_PAGE_ALIGN(end);
195
+++ b/tcg/ppc/tcg-target.inc.c
96
+
196
@@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
97
+ for (addr = start, len = end - start;
197
label->label_ptr[0] = lptr;
98
+ len != 0;
198
}
99
+ len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
199
100
+ PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
200
-static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
101
+
201
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
102
+ g_free(p->target_data);
202
{
103
+ p->target_data = NULL;
203
TCGMemOpIdx oi = lb->oi;
204
TCGMemOp opc = get_memop(oi);
205
TCGReg hi, lo, arg = TCG_REG_R3;
206
207
- **lb->label_ptr |= reloc_pc14_val(*lb->label_ptr, s->code_ptr);
208
+ if (!reloc_pc14(lb->label_ptr[0], s->code_ptr)) {
209
+ return false;
210
+ }
104
+ }
211
105
+#endif
212
tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0);
106
+}
213
107
+
214
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
108
+#ifdef TARGET_PAGE_DATA_SIZE
215
}
109
+void *page_get_target_data(target_ulong address)
216
110
+{
217
tcg_out_b(s, 0, lb->raddr);
111
+ PageDesc *p = page_find(address >> TARGET_PAGE_BITS);
218
+ return true;
112
+ return p ? p->target_data : NULL;
219
}
113
+}
220
114
+
221
-static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
115
+void *page_alloc_target_data(target_ulong address)
222
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
116
+{
223
{
117
+ PageDesc *p = page_find(address >> TARGET_PAGE_BITS);
224
TCGMemOpIdx oi = lb->oi;
118
+ void *ret = NULL;
225
TCGMemOp opc = get_memop(oi);
119
+
226
TCGMemOp s_bits = opc & MO_SIZE;
120
+ if (p->flags & PAGE_VALID) {
227
TCGReg hi, lo, arg = TCG_REG_R3;
121
+ ret = p->target_data;
228
122
+ if (!ret) {
229
- **lb->label_ptr |= reloc_pc14_val(*lb->label_ptr, s->code_ptr);
123
+ p->target_data = ret = g_malloc0(TARGET_PAGE_DATA_SIZE);
230
+ if (!reloc_pc14(lb->label_ptr[0], s->code_ptr)) {
124
+ }
231
+ return false;
232
+ }
125
+ }
233
126
+ return ret;
234
tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0);
127
+}
235
128
+#endif
236
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
129
+
237
tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
130
/* The softmmu versions of these helpers are in cputlb.c. */
238
239
tcg_out_b(s, 0, lb->raddr);
240
+ return true;
241
}
242
#endif /* SOFTMMU */
243
244
diff --git a/tcg/riscv/tcg-target.inc.c b/tcg/riscv/tcg-target.inc.c
245
index XXXXXXX..XXXXXXX 100644
246
--- a/tcg/riscv/tcg-target.inc.c
247
+++ b/tcg/riscv/tcg-target.inc.c
248
@@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi,
249
label->label_ptr[0] = label_ptr[0];
250
}
251
252
-static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
253
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
254
{
255
TCGMemOpIdx oi = l->oi;
256
TCGMemOp opc = get_memop(oi);
257
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
258
}
259
260
/* resolve label address */
261
- patch_reloc(l->label_ptr[0], R_RISCV_BRANCH, (intptr_t) s->code_ptr, 0);
262
+ if (!patch_reloc(l->label_ptr[0], R_RISCV_BRANCH,
263
+ (intptr_t) s->code_ptr, 0)) {
264
+ return false;
265
+ }
266
267
/* call load helper */
268
tcg_out_mov(s, TCG_TYPE_PTR, a0, TCG_AREG0);
269
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
270
tcg_out_mov(s, (opc & MO_SIZE) == MO_64, l->datalo_reg, a0);
271
272
tcg_out_goto(s, l->raddr);
273
+ return true;
274
}
275
276
-static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
277
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
278
{
279
TCGMemOpIdx oi = l->oi;
280
TCGMemOp opc = get_memop(oi);
281
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
282
}
283
284
/* resolve label address */
285
- patch_reloc(l->label_ptr[0], R_RISCV_BRANCH, (intptr_t) s->code_ptr, 0);
286
+ if (!patch_reloc(l->label_ptr[0], R_RISCV_BRANCH,
287
+ (intptr_t) s->code_ptr, 0)) {
288
+ return false;
289
+ }
290
291
/* call store helper */
292
tcg_out_mov(s, TCG_TYPE_PTR, a0, TCG_AREG0);
293
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
294
tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SSIZE)]);
295
296
tcg_out_goto(s, l->raddr);
297
+ return true;
298
}
299
#endif /* CONFIG_SOFTMMU */
300
301
diff --git a/tcg/s390/tcg-target.inc.c b/tcg/s390/tcg-target.inc.c
302
index XXXXXXX..XXXXXXX 100644
303
--- a/tcg/s390/tcg-target.inc.c
304
+++ b/tcg/s390/tcg-target.inc.c
305
@@ -XXX,XX +XXX,XX @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
306
label->label_ptr[0] = label_ptr;
307
}
308
309
-static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
310
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
311
{
312
TCGReg addr_reg = lb->addrlo_reg;
313
TCGReg data_reg = lb->datalo_reg;
314
TCGMemOpIdx oi = lb->oi;
315
TCGMemOp opc = get_memop(oi);
316
317
- bool ok = patch_reloc(lb->label_ptr[0], R_390_PC16DBL,
318
- (intptr_t)s->code_ptr, 2);
319
- tcg_debug_assert(ok);
320
+ if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL,
321
+ (intptr_t)s->code_ptr, 2)) {
322
+ return false;
323
+ }
324
325
tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0);
326
if (TARGET_LONG_BITS == 64) {
327
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
328
tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_R2);
329
330
tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
331
+ return true;
332
}
333
334
-static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
335
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
336
{
337
TCGReg addr_reg = lb->addrlo_reg;
338
TCGReg data_reg = lb->datalo_reg;
339
TCGMemOpIdx oi = lb->oi;
340
TCGMemOp opc = get_memop(oi);
341
342
- bool ok = patch_reloc(lb->label_ptr[0], R_390_PC16DBL,
343
- (intptr_t)s->code_ptr, 2);
344
- tcg_debug_assert(ok);
345
+ if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL,
346
+ (intptr_t)s->code_ptr, 2)) {
347
+ return false;
348
+ }
349
350
tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0);
351
if (TARGET_LONG_BITS == 64) {
352
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
353
tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
354
355
tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
356
+ return true;
357
}
358
#else
359
static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
360
diff --git a/tcg/tcg-ldst.inc.c b/tcg/tcg-ldst.inc.c
361
index XXXXXXX..XXXXXXX 100644
362
--- a/tcg/tcg-ldst.inc.c
363
+++ b/tcg/tcg-ldst.inc.c
364
@@ -XXX,XX +XXX,XX @@ typedef struct TCGLabelQemuLdst {
365
* Generate TB finalization at the end of block
366
*/
367
368
-static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l);
369
-static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l);
370
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l);
371
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l);
372
373
-static bool tcg_out_ldst_finalize(TCGContext *s)
374
+static int tcg_out_ldst_finalize(TCGContext *s)
375
{
376
TCGLabelQemuLdst *lb;
377
378
/* qemu_ld/st slow paths */
379
QSIMPLEQ_FOREACH(lb, &s->ldst_labels, next) {
380
- if (lb->is_ld) {
381
- tcg_out_qemu_ld_slow_path(s, lb);
382
- } else {
383
- tcg_out_qemu_st_slow_path(s, lb);
384
+ if (lb->is_ld
385
+ ? !tcg_out_qemu_ld_slow_path(s, lb)
386
+ : !tcg_out_qemu_st_slow_path(s, lb)) {
387
+ return -2;
388
}
389
390
/* Test for (pending) buffer overflow. The assumption is that any
391
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_ldst_finalize(TCGContext *s)
392
the buffer completely. Thus we can test for overflow after
393
generating code without having to check during generation. */
394
if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
395
- return false;
396
+ return -1;
397
}
398
}
399
- return true;
400
+ return 0;
401
}
402
131
403
/*
132
/*
404
diff --git a/tcg/tcg.c b/tcg/tcg.c
405
index XXXXXXX..XXXXXXX 100644
406
--- a/tcg/tcg.c
407
+++ b/tcg/tcg.c
408
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, tcg_insn_unit *target);
409
static int tcg_target_const_match(tcg_target_long val, TCGType type,
410
const TCGArgConstraint *arg_ct);
411
#ifdef TCG_TARGET_NEED_LDST_LABELS
412
-static bool tcg_out_ldst_finalize(TCGContext *s);
413
+static int tcg_out_ldst_finalize(TCGContext *s);
414
#endif
415
416
#define TCG_HIGHWATER 1024
417
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
418
419
/* Generate TB finalization at the end of block */
420
#ifdef TCG_TARGET_NEED_LDST_LABELS
421
- if (!tcg_out_ldst_finalize(s)) {
422
- return -1;
423
+ i = tcg_out_ldst_finalize(s);
424
+ if (i < 0) {
425
+ return i;
426
}
427
#endif
428
#ifdef TCG_TARGET_NEED_POOL_LABELS
429
--
133
--
430
2.17.1
134
2.34.1
431
135
432
136
diff view generated by jsdifflib
1
This is part b of relocation overflow handling.
1
Since the only user, Arm MTE, always requires allocation,
2
merge the get and alloc functions to always produce a
3
non-null result. Also assume that the user has already
4
checked page validity.
2
5
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
8
---
5
tcg/tcg-pool.inc.c | 12 +++++++-----
9
include/exec/cpu-all.h | 21 ++++++---------------
6
tcg/tcg.c | 9 +++++----
10
accel/tcg/user-exec.c | 16 ++++------------
7
2 files changed, 12 insertions(+), 9 deletions(-)
11
target/arm/mte_helper.c | 4 ----
12
3 files changed, 10 insertions(+), 31 deletions(-)
8
13
9
diff --git a/tcg/tcg-pool.inc.c b/tcg/tcg-pool.inc.c
14
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
10
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/tcg-pool.inc.c
16
--- a/include/exec/cpu-all.h
12
+++ b/tcg/tcg-pool.inc.c
17
+++ b/include/exec/cpu-all.h
13
@@ -XXX,XX +XXX,XX @@ static inline void new_pool_l8(TCGContext *s, int rtype, tcg_insn_unit *label,
18
@@ -XXX,XX +XXX,XX @@ void page_reset_target_data(target_ulong start, target_ulong end);
14
/* To be provided by cpu/tcg-target.inc.c. */
19
int page_check_range(target_ulong start, target_ulong len, int flags);
15
static void tcg_out_nop_fill(tcg_insn_unit *p, int count);
20
16
21
/**
17
-static bool tcg_out_pool_finalize(TCGContext *s)
22
- * page_alloc_target_data(address)
18
+static int tcg_out_pool_finalize(TCGContext *s)
23
+ * page_get_target_data(address)
24
* @address: guest virtual address
25
*
26
- * Allocate TARGET_PAGE_DATA_SIZE bytes of out-of-band data to associate
27
- * with the guest page at @address. If the page is not mapped, NULL will
28
- * be returned. If there is existing data associated with @address,
29
- * no new memory will be allocated.
30
+ * Return TARGET_PAGE_DATA_SIZE bytes of out-of-band data to associate
31
+ * with the guest page at @address, allocating it if necessary. The
32
+ * caller should already have verified that the address is valid.
33
*
34
* The memory will be freed when the guest page is deallocated,
35
* e.g. with the munmap system call.
36
*/
37
-void *page_alloc_target_data(target_ulong address);
38
-
39
-/**
40
- * page_get_target_data(address)
41
- * @address: guest virtual address
42
- *
43
- * Return any out-of-bound memory assocated with the guest page
44
- * at @address, as per page_alloc_target_data.
45
- */
46
-void *page_get_target_data(target_ulong address);
47
+void *page_get_target_data(target_ulong address)
48
+ __attribute__((returns_nonnull));
49
#endif
50
51
CPUArchState *cpu_copy(CPUArchState *env);
52
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/accel/tcg/user-exec.c
55
+++ b/accel/tcg/user-exec.c
56
@@ -XXX,XX +XXX,XX @@ void page_reset_target_data(target_ulong start, target_ulong end)
57
void *page_get_target_data(target_ulong address)
19
{
58
{
20
TCGLabelPoolData *p = s->pool_labels;
59
PageDesc *p = page_find(address >> TARGET_PAGE_BITS);
21
TCGLabelPoolData *l = NULL;
60
- return p ? p->target_data : NULL;
22
void *a;
61
-}
23
62
+ void *ret = p->target_data;
24
if (p == NULL) {
63
25
- return true;
64
-void *page_alloc_target_data(target_ulong address)
26
+ return 0;
65
-{
66
- PageDesc *p = page_find(address >> TARGET_PAGE_BITS);
67
- void *ret = NULL;
68
-
69
- if (p->flags & PAGE_VALID) {
70
- ret = p->target_data;
71
- if (!ret) {
72
- p->target_data = ret = g_malloc0(TARGET_PAGE_DATA_SIZE);
73
- }
74
+ if (!ret) {
75
+ ret = g_malloc0(TARGET_PAGE_DATA_SIZE);
76
+ p->target_data = ret;
27
}
77
}
28
78
return ret;
29
/* ??? Round up to qemu_icache_linesize, but then do not round
79
}
30
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_pool_finalize(TCGContext *s)
80
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
31
size_t size = sizeof(tcg_target_ulong) * p->nlong;
81
index XXXXXXX..XXXXXXX 100644
32
if (!l || l->nlong != p->nlong || memcmp(l->data, p->data, size)) {
82
--- a/target/arm/mte_helper.c
33
if (unlikely(a > s->code_gen_highwater)) {
83
+++ b/target/arm/mte_helper.c
34
- return false;
84
@@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
35
+ return -1;
36
}
37
memcpy(a, p->data, size);
38
a += size;
39
l = p;
40
}
41
- patch_reloc(p->label, p->rtype, (intptr_t)a - size, p->addend);
42
+ if (!patch_reloc(p->label, p->rtype, (intptr_t)a - size, p->addend)) {
43
+ return -2;
44
+ }
45
}
85
}
46
86
47
s->code_ptr = a;
87
tags = page_get_target_data(clean_ptr);
48
- return true;
88
- if (tags == NULL) {
49
+ return 0;
89
- tags = page_alloc_target_data(clean_ptr);
50
}
90
- assert(tags != NULL);
51
diff --git a/tcg/tcg.c b/tcg/tcg.c
91
- }
52
index XXXXXXX..XXXXXXX 100644
92
53
--- a/tcg/tcg.c
93
index = extract32(ptr, LOG2_TAG_GRANULE + 1,
54
+++ b/tcg/tcg.c
94
TARGET_PAGE_BITS - LOG2_TAG_GRANULE - 1);
55
@@ -XXX,XX +XXX,XX @@ void tcg_prologue_init(TCGContext *s)
56
#ifdef TCG_TARGET_NEED_POOL_LABELS
57
/* Allow the prologue to put e.g. guest_base into a pool entry. */
58
{
59
- bool ok = tcg_out_pool_finalize(s);
60
- tcg_debug_assert(ok);
61
+ int result = tcg_out_pool_finalize(s);
62
+ tcg_debug_assert(result == 0);
63
}
64
#endif
65
66
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
67
}
68
#endif
69
#ifdef TCG_TARGET_NEED_POOL_LABELS
70
- if (!tcg_out_pool_finalize(s)) {
71
- return -1;
72
+ i = tcg_out_pool_finalize(s);
73
+ if (i < 0) {
74
+ return i;
75
}
76
#endif
77
if (!tcg_resolve_relocs(s)) {
78
--
95
--
79
2.17.1
96
2.34.1
80
97
81
98
diff view generated by jsdifflib
New patch
1
Add a tcg_ops hook to replace the restore_state_to_opc
2
function call. Because these generic hooks cannot depend
3
on target-specific types, temporarily, copy the current
4
target_ulong data[] into uint64_t d64[].
1
5
6
Reviewed-by: Claudio Fontana <cfontana@suse.de>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
include/exec/exec-all.h | 2 +-
10
include/hw/core/tcg-cpu-ops.h | 11 +++++++++++
11
accel/tcg/translate-all.c | 24 ++++++++++++++++++++++--
12
3 files changed, 34 insertions(+), 3 deletions(-)
13
14
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/include/exec/exec-all.h
17
+++ b/include/exec/exec-all.h
18
@@ -XXX,XX +XXX,XX @@ typedef ram_addr_t tb_page_addr_t;
19
#endif
20
21
void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb,
22
- target_ulong *data);
23
+ target_ulong *data) __attribute__((weak));
24
25
/**
26
* cpu_restore_state:
27
diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h
28
index XXXXXXX..XXXXXXX 100644
29
--- a/include/hw/core/tcg-cpu-ops.h
30
+++ b/include/hw/core/tcg-cpu-ops.h
31
@@ -XXX,XX +XXX,XX @@ struct TCGCPUOps {
32
* function to restore all the state, and register it here.
33
*/
34
void (*synchronize_from_tb)(CPUState *cpu, const TranslationBlock *tb);
35
+ /**
36
+ * @restore_state_to_opc: Synchronize state from INDEX_op_start_insn
37
+ *
38
+ * This is called when we unwind state in the middle of a TB,
39
+ * usually before raising an exception. Set all part of the CPU
40
+ * state which are tracked insn-by-insn in the target-specific
41
+ * arguments to start_insn, passed as @data.
42
+ */
43
+ void (*restore_state_to_opc)(CPUState *cpu, const TranslationBlock *tb,
44
+ const uint64_t *data);
45
+
46
/** @cpu_exec_enter: Callback for cpu_exec preparation */
47
void (*cpu_exec_enter)(CPUState *cpu);
48
/** @cpu_exec_exit: Callback for cpu_exec cleanup */
49
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
50
index XXXXXXX..XXXXXXX 100644
51
--- a/accel/tcg/translate-all.c
52
+++ b/accel/tcg/translate-all.c
53
@@ -XXX,XX +XXX,XX @@ int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
54
{
55
target_ulong data[TARGET_INSN_START_WORDS];
56
uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
57
- CPUArchState *env = cpu->env_ptr;
58
const uint8_t *p = tb->tc.ptr + tb->tc.size;
59
int i, j, num_insns = tb->icount;
60
#ifdef CONFIG_PROFILER
61
@@ -XXX,XX +XXX,XX @@ int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
62
and shift if to the number of actually executed instructions */
63
cpu_neg(cpu)->icount_decr.u16.low += num_insns - i;
64
}
65
- restore_state_to_opc(env, tb, data);
66
+
67
+ {
68
+ const struct TCGCPUOps *ops = cpu->cc->tcg_ops;
69
+ __typeof(ops->restore_state_to_opc) restore = ops->restore_state_to_opc;
70
+ if (restore) {
71
+ uint64_t d64[TARGET_INSN_START_WORDS];
72
+ for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
73
+ d64[i] = data[i];
74
+ }
75
+ restore(cpu, tb, d64);
76
+ } else {
77
+ restore_state_to_opc(cpu->env_ptr, tb, data);
78
+ }
79
+ }
80
81
#ifdef CONFIG_PROFILER
82
qatomic_set(&prof->restore_time,
83
@@ -XXX,XX +XXX,XX @@ int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
84
85
bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
86
{
87
+ /*
88
+ * The pc update associated with restore without exit will
89
+ * break the relative pc adjustments performed by TARGET_TB_PCREL.
90
+ */
91
+ if (TARGET_TB_PCREL) {
92
+ assert(will_exit);
93
+ }
94
+
95
/*
96
* The host_pc has to be in the rx region of the code buffer.
97
* If it is not we will not be able to resolve it here.
98
--
99
2.34.1
diff view generated by jsdifflib
New patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
target/alpha/cpu.c | 9 +++++++++
5
target/alpha/translate.c | 6 ------
6
2 files changed, 9 insertions(+), 6 deletions(-)
1
7
8
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/target/alpha/cpu.c
11
+++ b/target/alpha/cpu.c
12
@@ -XXX,XX +XXX,XX @@ static vaddr alpha_cpu_get_pc(CPUState *cs)
13
return cpu->env.pc;
14
}
15
16
+static void alpha_restore_state_to_opc(CPUState *cs,
17
+ const TranslationBlock *tb,
18
+ const uint64_t *data)
19
+{
20
+ AlphaCPU *cpu = ALPHA_CPU(cs);
21
+
22
+ cpu->env.pc = data[0];
23
+}
24
25
static bool alpha_cpu_has_work(CPUState *cs)
26
{
27
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps alpha_sysemu_ops = {
28
29
static const struct TCGCPUOps alpha_tcg_ops = {
30
.initialize = alpha_translate_init,
31
+ .restore_state_to_opc = alpha_restore_state_to_opc,
32
33
#ifdef CONFIG_USER_ONLY
34
.record_sigsegv = alpha_cpu_record_sigsegv,
35
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
36
index XXXXXXX..XXXXXXX 100644
37
--- a/target/alpha/translate.c
38
+++ b/target/alpha/translate.c
39
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
40
DisasContext dc;
41
translator_loop(cpu, tb, max_insns, pc, host_pc, &alpha_tr_ops, &dc.base);
42
}
43
-
44
-void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb,
45
- target_ulong *data)
46
-{
47
- env->pc = data[0];
48
-}
49
--
50
2.34.1
51
52
diff view generated by jsdifflib
New patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
target/arm/cpu.c | 26 ++++++++++++++++++++++++++
5
target/arm/translate.c | 22 ----------------------
6
2 files changed, 26 insertions(+), 22 deletions(-)
1
7
8
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/target/arm/cpu.c
11
+++ b/target/arm/cpu.c
12
@@ -XXX,XX +XXX,XX @@ void arm_cpu_synchronize_from_tb(CPUState *cs,
13
}
14
}
15
}
16
+
17
+static void arm_restore_state_to_opc(CPUState *cs,
18
+ const TranslationBlock *tb,
19
+ const uint64_t *data)
20
+{
21
+ CPUARMState *env = cs->env_ptr;
22
+
23
+ if (is_a64(env)) {
24
+ if (TARGET_TB_PCREL) {
25
+ env->pc = (env->pc & TARGET_PAGE_MASK) | data[0];
26
+ } else {
27
+ env->pc = data[0];
28
+ }
29
+ env->condexec_bits = 0;
30
+ env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
31
+ } else {
32
+ if (TARGET_TB_PCREL) {
33
+ env->regs[15] = (env->regs[15] & TARGET_PAGE_MASK) | data[0];
34
+ } else {
35
+ env->regs[15] = data[0];
36
+ }
37
+ env->condexec_bits = data[1];
38
+ env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
39
+ }
40
+}
41
#endif /* CONFIG_TCG */
42
43
static bool arm_cpu_has_work(CPUState *cs)
44
@@ -XXX,XX +XXX,XX @@ static const struct TCGCPUOps arm_tcg_ops = {
45
.initialize = arm_translate_init,
46
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
47
.debug_excp_handler = arm_debug_excp_handler,
48
+ .restore_state_to_opc = arm_restore_state_to_opc,
49
50
#ifdef CONFIG_USER_ONLY
51
.record_sigsegv = arm_cpu_record_sigsegv,
52
diff --git a/target/arm/translate.c b/target/arm/translate.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/target/arm/translate.c
55
+++ b/target/arm/translate.c
56
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
57
58
translator_loop(cpu, tb, max_insns, pc, host_pc, ops, &dc.base);
59
}
60
-
61
-void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
62
- target_ulong *data)
63
-{
64
- if (is_a64(env)) {
65
- if (TARGET_TB_PCREL) {
66
- env->pc = (env->pc & TARGET_PAGE_MASK) | data[0];
67
- } else {
68
- env->pc = data[0];
69
- }
70
- env->condexec_bits = 0;
71
- env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
72
- } else {
73
- if (TARGET_TB_PCREL) {
74
- env->regs[15] = (env->regs[15] & TARGET_PAGE_MASK) | data[0];
75
- } else {
76
- env->regs[15] = data[0];
77
- }
78
- env->condexec_bits = data[1];
79
- env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
80
- }
81
-}
82
--
83
2.34.1
84
85
diff view generated by jsdifflib
New patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
target/avr/cpu.c | 11 +++++++++++
5
target/avr/translate.c | 6 ------
6
2 files changed, 11 insertions(+), 6 deletions(-)
1
7
8
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/target/avr/cpu.c
11
+++ b/target/avr/cpu.c
12
@@ -XXX,XX +XXX,XX @@ static void avr_cpu_synchronize_from_tb(CPUState *cs,
13
env->pc_w = tb_pc(tb) / 2; /* internally PC points to words */
14
}
15
16
+static void avr_restore_state_to_opc(CPUState *cs,
17
+ const TranslationBlock *tb,
18
+ const uint64_t *data)
19
+{
20
+ AVRCPU *cpu = AVR_CPU(cs);
21
+ CPUAVRState *env = &cpu->env;
22
+
23
+ env->pc_w = data[0];
24
+}
25
+
26
static void avr_cpu_reset(DeviceState *ds)
27
{
28
CPUState *cs = CPU(ds);
29
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps avr_sysemu_ops = {
30
static const struct TCGCPUOps avr_tcg_ops = {
31
.initialize = avr_cpu_tcg_init,
32
.synchronize_from_tb = avr_cpu_synchronize_from_tb,
33
+ .restore_state_to_opc = avr_restore_state_to_opc,
34
.cpu_exec_interrupt = avr_cpu_exec_interrupt,
35
.tlb_fill = avr_cpu_tlb_fill,
36
.do_interrupt = avr_cpu_do_interrupt,
37
diff --git a/target/avr/translate.c b/target/avr/translate.c
38
index XXXXXXX..XXXXXXX 100644
39
--- a/target/avr/translate.c
40
+++ b/target/avr/translate.c
41
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
42
DisasContext dc = { };
43
translator_loop(cs, tb, max_insns, pc, host_pc, &avr_tr_ops, &dc.base);
44
}
45
-
46
-void restore_state_to_opc(CPUAVRState *env, TranslationBlock *tb,
47
- target_ulong *data)
48
-{
49
- env->pc_w = data[0];
50
-}
51
--
52
2.34.1
53
54
diff view generated by jsdifflib
New patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
target/cris/cpu.c | 11 +++++++++++
5
target/cris/translate.c | 6 ------
6
2 files changed, 11 insertions(+), 6 deletions(-)
1
7
8
diff --git a/target/cris/cpu.c b/target/cris/cpu.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/target/cris/cpu.c
11
+++ b/target/cris/cpu.c
12
@@ -XXX,XX +XXX,XX @@ static vaddr cris_cpu_get_pc(CPUState *cs)
13
return cpu->env.pc;
14
}
15
16
+static void cris_restore_state_to_opc(CPUState *cs,
17
+ const TranslationBlock *tb,
18
+ const uint64_t *data)
19
+{
20
+ CRISCPU *cpu = CRIS_CPU(cs);
21
+
22
+ cpu->env.pc = data[0];
23
+}
24
+
25
static bool cris_cpu_has_work(CPUState *cs)
26
{
27
return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI);
28
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps cris_sysemu_ops = {
29
30
static const struct TCGCPUOps crisv10_tcg_ops = {
31
.initialize = cris_initialize_crisv10_tcg,
32
+ .restore_state_to_opc = cris_restore_state_to_opc,
33
34
#ifndef CONFIG_USER_ONLY
35
.tlb_fill = cris_cpu_tlb_fill,
36
@@ -XXX,XX +XXX,XX @@ static const struct TCGCPUOps crisv10_tcg_ops = {
37
38
static const struct TCGCPUOps crisv32_tcg_ops = {
39
.initialize = cris_initialize_tcg,
40
+ .restore_state_to_opc = cris_restore_state_to_opc,
41
42
#ifndef CONFIG_USER_ONLY
43
.tlb_fill = cris_cpu_tlb_fill,
44
diff --git a/target/cris/translate.c b/target/cris/translate.c
45
index XXXXXXX..XXXXXXX 100644
46
--- a/target/cris/translate.c
47
+++ b/target/cris/translate.c
48
@@ -XXX,XX +XXX,XX @@ void cris_initialize_tcg(void)
49
pregnames_v32[i]);
50
}
51
}
52
-
53
-void restore_state_to_opc(CPUCRISState *env, TranslationBlock *tb,
54
- target_ulong *data)
55
-{
56
- env->pc = data[0];
57
-}
58
--
59
2.34.1
60
61
diff view generated by jsdifflib
New patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
target/hexagon/cpu.c | 9 +++++++--
5
1 file changed, 7 insertions(+), 2 deletions(-)
1
6
7
diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/target/hexagon/cpu.c
10
+++ b/target/hexagon/cpu.c
11
@@ -XXX,XX +XXX,XX @@ static bool hexagon_cpu_has_work(CPUState *cs)
12
return true;
13
}
14
15
-void restore_state_to_opc(CPUHexagonState *env, TranslationBlock *tb,
16
- target_ulong *data)
17
+static void hexagon_restore_state_to_opc(CPUState *cs,
18
+ const TranslationBlock *tb,
19
+ const uint64_t *data)
20
{
21
+ HexagonCPU *cpu = HEXAGON_CPU(cs);
22
+ CPUHexagonState *env = &cpu->env;
23
+
24
env->gpr[HEX_REG_PC] = data[0];
25
}
26
27
@@ -XXX,XX +XXX,XX @@ static void hexagon_cpu_init(Object *obj)
28
static const struct TCGCPUOps hexagon_tcg_ops = {
29
.initialize = hexagon_translate_init,
30
.synchronize_from_tb = hexagon_cpu_synchronize_from_tb,
31
+ .restore_state_to_opc = hexagon_restore_state_to_opc,
32
};
33
34
static void hexagon_cpu_class_init(ObjectClass *c, void *data)
35
--
36
2.34.1
37
38
diff view generated by jsdifflib
New patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
target/hppa/cpu.c | 19 +++++++++++++++++++
5
target/hppa/translate.c | 13 -------------
6
2 files changed, 19 insertions(+), 13 deletions(-)
1
7
8
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/target/hppa/cpu.c
11
+++ b/target/hppa/cpu.c
12
@@ -XXX,XX +XXX,XX @@ static void hppa_cpu_synchronize_from_tb(CPUState *cs,
13
cpu->env.psw_n = (tb->flags & PSW_N) != 0;
14
}
15
16
+static void hppa_restore_state_to_opc(CPUState *cs,
17
+ const TranslationBlock *tb,
18
+ const uint64_t *data)
19
+{
20
+ HPPACPU *cpu = HPPA_CPU(cs);
21
+
22
+ cpu->env.iaoq_f = data[0];
23
+ if (data[1] != (target_ureg)-1) {
24
+ cpu->env.iaoq_b = data[1];
25
+ }
26
+ /*
27
+ * Since we were executing the instruction at IAOQ_F, and took some
28
+ * sort of action that provoked the cpu_restore_state, we can infer
29
+ * that the instruction was not nullified.
30
+ */
31
+ cpu->env.psw_n = 0;
32
+}
33
+
34
static bool hppa_cpu_has_work(CPUState *cs)
35
{
36
return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI);
37
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps hppa_sysemu_ops = {
38
static const struct TCGCPUOps hppa_tcg_ops = {
39
.initialize = hppa_translate_init,
40
.synchronize_from_tb = hppa_cpu_synchronize_from_tb,
41
+ .restore_state_to_opc = hppa_restore_state_to_opc,
42
43
#ifndef CONFIG_USER_ONLY
44
.tlb_fill = hppa_cpu_tlb_fill,
45
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
46
index XXXXXXX..XXXXXXX 100644
47
--- a/target/hppa/translate.c
48
+++ b/target/hppa/translate.c
49
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
50
DisasContext ctx;
51
translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
52
}
53
-
54
-void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
55
- target_ulong *data)
56
-{
57
- env->iaoq_f = data[0];
58
- if (data[1] != (target_ureg)-1) {
59
- env->iaoq_b = data[1];
60
- }
61
- /* Since we were executing the instruction at IAOQ_F, and took some
62
- sort of action that provoked the cpu_restore_state, we can infer
63
- that the instruction was not nullified. */
64
- env->psw_n = 0;
65
-}
66
--
67
2.34.1
68
69
diff view generated by jsdifflib
New patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
target/i386/tcg/tcg-cpu.c | 19 +++++++++++++++++++
5
target/i386/tcg/translate.c | 15 ---------------
6
2 files changed, 19 insertions(+), 15 deletions(-)
1
7
8
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/target/i386/tcg/tcg-cpu.c
11
+++ b/target/i386/tcg/tcg-cpu.c
12
@@ -XXX,XX +XXX,XX @@ static void x86_cpu_synchronize_from_tb(CPUState *cs,
13
}
14
}
15
16
+static void x86_restore_state_to_opc(CPUState *cs,
17
+ const TranslationBlock *tb,
18
+ const uint64_t *data)
19
+{
20
+ X86CPU *cpu = X86_CPU(cs);
21
+ CPUX86State *env = &cpu->env;
22
+ int cc_op = data[1];
23
+
24
+ if (TARGET_TB_PCREL) {
25
+ env->eip = (env->eip & TARGET_PAGE_MASK) | data[0];
26
+ } else {
27
+ env->eip = data[0] - tb->cs_base;
28
+ }
29
+ if (cc_op != CC_OP_DYNAMIC) {
30
+ env->cc_op = cc_op;
31
+ }
32
+}
33
+
34
#ifndef CONFIG_USER_ONLY
35
static bool x86_debug_check_breakpoint(CPUState *cs)
36
{
37
@@ -XXX,XX +XXX,XX @@ static bool x86_debug_check_breakpoint(CPUState *cs)
38
static const struct TCGCPUOps x86_tcg_ops = {
39
.initialize = tcg_x86_init,
40
.synchronize_from_tb = x86_cpu_synchronize_from_tb,
41
+ .restore_state_to_opc = x86_restore_state_to_opc,
42
.cpu_exec_enter = x86_cpu_exec_enter,
43
.cpu_exec_exit = x86_cpu_exec_exit,
44
#ifdef CONFIG_USER_ONLY
45
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
46
index XXXXXXX..XXXXXXX 100644
47
--- a/target/i386/tcg/translate.c
48
+++ b/target/i386/tcg/translate.c
49
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
50
51
translator_loop(cpu, tb, max_insns, pc, host_pc, &i386_tr_ops, &dc.base);
52
}
53
-
54
-void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb,
55
- target_ulong *data)
56
-{
57
- int cc_op = data[1];
58
-
59
- if (TARGET_TB_PCREL) {
60
- env->eip = (env->eip & TARGET_PAGE_MASK) | data[0];
61
- } else {
62
- env->eip = data[0] - tb->cs_base;
63
- }
64
- if (cc_op != CC_OP_DYNAMIC) {
65
- env->cc_op = cc_op;
66
- }
67
-}
68
--
69
2.34.1
70
71
diff view generated by jsdifflib
New patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
target/loongarch/cpu.c | 11 +++++++++++
5
target/loongarch/translate.c | 6 ------
6
2 files changed, 11 insertions(+), 6 deletions(-)
1
7
8
diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/target/loongarch/cpu.c
11
+++ b/target/loongarch/cpu.c
12
@@ -XXX,XX +XXX,XX @@ static void loongarch_cpu_synchronize_from_tb(CPUState *cs,
13
14
env->pc = tb_pc(tb);
15
}
16
+
17
+static void loongarch_restore_state_to_opc(CPUState *cs,
18
+ const TranslationBlock *tb,
19
+ const uint64_t *data)
20
+{
21
+ LoongArchCPU *cpu = LOONGARCH_CPU(cs);
22
+ CPULoongArchState *env = &cpu->env;
23
+
24
+ env->pc = data[0];
25
+}
26
#endif /* CONFIG_TCG */
27
28
static bool loongarch_cpu_has_work(CPUState *cs)
29
@@ -XXX,XX +XXX,XX @@ void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags)
30
static struct TCGCPUOps loongarch_tcg_ops = {
31
.initialize = loongarch_translate_init,
32
.synchronize_from_tb = loongarch_cpu_synchronize_from_tb,
33
+ .restore_state_to_opc = loongarch_restore_state_to_opc,
34
35
#ifndef CONFIG_USER_ONLY
36
.tlb_fill = loongarch_cpu_tlb_fill,
37
diff --git a/target/loongarch/translate.c b/target/loongarch/translate.c
38
index XXXXXXX..XXXXXXX 100644
39
--- a/target/loongarch/translate.c
40
+++ b/target/loongarch/translate.c
41
@@ -XXX,XX +XXX,XX @@ void loongarch_translate_init(void)
42
cpu_llval = tcg_global_mem_new(cpu_env,
43
offsetof(CPULoongArchState, llval), "llval");
44
}
45
-
46
-void restore_state_to_opc(CPULoongArchState *env, TranslationBlock *tb,
47
- target_ulong *data)
48
-{
49
- env->pc = data[0];
50
-}
51
--
52
2.34.1
53
54
diff view generated by jsdifflib
New patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
target/m68k/cpu.c | 14 ++++++++++++++
5
target/m68k/translate.c | 10 ----------
6
2 files changed, 14 insertions(+), 10 deletions(-)
1
7
8
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/target/m68k/cpu.c
11
+++ b/target/m68k/cpu.c
12
@@ -XXX,XX +XXX,XX @@ static vaddr m68k_cpu_get_pc(CPUState *cs)
13
return cpu->env.pc;
14
}
15
16
+static void m68k_restore_state_to_opc(CPUState *cs,
17
+ const TranslationBlock *tb,
18
+ const uint64_t *data)
19
+{
20
+ M68kCPU *cpu = M68K_CPU(cs);
21
+ int cc_op = data[1];
22
+
23
+ cpu->env.pc = data[0];
24
+ if (cc_op != CC_OP_DYNAMIC) {
25
+ cpu->env.cc_op = cc_op;
26
+ }
27
+}
28
+
29
static bool m68k_cpu_has_work(CPUState *cs)
30
{
31
return cs->interrupt_request & CPU_INTERRUPT_HARD;
32
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps m68k_sysemu_ops = {
33
34
static const struct TCGCPUOps m68k_tcg_ops = {
35
.initialize = m68k_tcg_init,
36
+ .restore_state_to_opc = m68k_restore_state_to_opc,
37
38
#ifndef CONFIG_USER_ONLY
39
.tlb_fill = m68k_cpu_tlb_fill,
40
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
41
index XXXXXXX..XXXXXXX 100644
42
--- a/target/m68k/translate.c
43
+++ b/target/m68k/translate.c
44
@@ -XXX,XX +XXX,XX @@ void m68k_cpu_dump_state(CPUState *cs, FILE *f, int flags)
45
env->mmu.mmusr, env->mmu.ar);
46
#endif
47
}
48
-
49
-void restore_state_to_opc(CPUM68KState *env, TranslationBlock *tb,
50
- target_ulong *data)
51
-{
52
- int cc_op = data[1];
53
- env->pc = data[0];
54
- if (cc_op != CC_OP_DYNAMIC) {
55
- env->cc_op = cc_op;
56
- }
57
-}
58
--
59
2.34.1
60
61
diff view generated by jsdifflib
New patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
target/microblaze/cpu.c | 11 +++++++++++
5
target/microblaze/translate.c | 7 -------
6
2 files changed, 11 insertions(+), 7 deletions(-)
1
7
8
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/target/microblaze/cpu.c
11
+++ b/target/microblaze/cpu.c
12
@@ -XXX,XX +XXX,XX @@ static void mb_cpu_synchronize_from_tb(CPUState *cs,
13
cpu->env.iflags = tb->flags & IFLAGS_TB_MASK;
14
}
15
16
+static void mb_restore_state_to_opc(CPUState *cs,
17
+ const TranslationBlock *tb,
18
+ const uint64_t *data)
19
+{
20
+ MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
21
+
22
+ cpu->env.pc = data[0];
23
+ cpu->env.iflags = data[1];
24
+}
25
+
26
static bool mb_cpu_has_work(CPUState *cs)
27
{
28
return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI);
29
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps mb_sysemu_ops = {
30
static const struct TCGCPUOps mb_tcg_ops = {
31
.initialize = mb_tcg_init,
32
.synchronize_from_tb = mb_cpu_synchronize_from_tb,
33
+ .restore_state_to_opc = mb_restore_state_to_opc,
34
35
#ifndef CONFIG_USER_ONLY
36
.tlb_fill = mb_cpu_tlb_fill,
37
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
38
index XXXXXXX..XXXXXXX 100644
39
--- a/target/microblaze/translate.c
40
+++ b/target/microblaze/translate.c
41
@@ -XXX,XX +XXX,XX @@ void mb_tcg_init(void)
42
cpu_res_addr =
43
tcg_global_mem_new(cpu_env, offsetof(CPUMBState, res_addr), "res_addr");
44
}
45
-
46
-void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
47
- target_ulong *data)
48
-{
49
- env->pc = data[0];
50
- env->iflags = data[1];
51
-}
52
--
53
2.34.1
54
55
diff view generated by jsdifflib
New patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
target/mips/tcg/tcg-internal.h | 3 +++
5
target/mips/cpu.c | 1 +
6
target/mips/tcg/translate.c | 8 ++++++--
7
3 files changed, 10 insertions(+), 2 deletions(-)
1
8
9
diff --git a/target/mips/tcg/tcg-internal.h b/target/mips/tcg/tcg-internal.h
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/mips/tcg/tcg-internal.h
12
+++ b/target/mips/tcg/tcg-internal.h
13
@@ -XXX,XX +XXX,XX @@ void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
14
G_NORETURN void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
15
MMUAccessType access_type, int mmu_idx,
16
uintptr_t retaddr);
17
+void mips_restore_state_to_opc(CPUState *cs,
18
+ const TranslationBlock *tb,
19
+ const uint64_t *data);
20
21
const char *mips_exception_name(int32_t exception);
22
23
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
24
index XXXXXXX..XXXXXXX 100644
25
--- a/target/mips/cpu.c
26
+++ b/target/mips/cpu.c
27
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps mips_sysemu_ops = {
28
static const struct TCGCPUOps mips_tcg_ops = {
29
.initialize = mips_tcg_init,
30
.synchronize_from_tb = mips_cpu_synchronize_from_tb,
31
+ .restore_state_to_opc = mips_restore_state_to_opc,
32
33
#if !defined(CONFIG_USER_ONLY)
34
.tlb_fill = mips_cpu_tlb_fill,
35
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
36
index XXXXXXX..XXXXXXX 100644
37
--- a/target/mips/tcg/translate.c
38
+++ b/target/mips/tcg/translate.c
39
@@ -XXX,XX +XXX,XX @@ void mips_tcg_init(void)
40
}
41
}
42
43
-void restore_state_to_opc(CPUMIPSState *env, TranslationBlock *tb,
44
- target_ulong *data)
45
+void mips_restore_state_to_opc(CPUState *cs,
46
+ const TranslationBlock *tb,
47
+ const uint64_t *data)
48
{
49
+ MIPSCPU *cpu = MIPS_CPU(cs);
50
+ CPUMIPSState *env = &cpu->env;
51
+
52
env->active_tc.PC = data[0];
53
env->hflags &= ~MIPS_HFLAG_BMASK;
54
env->hflags |= data[1];
55
--
56
2.34.1
57
58
diff view generated by jsdifflib
New patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
target/nios2/cpu.c | 11 +++++++++++
5
target/nios2/translate.c | 6 ------
6
2 files changed, 11 insertions(+), 6 deletions(-)
1
7
8
diff --git a/target/nios2/cpu.c b/target/nios2/cpu.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/target/nios2/cpu.c
11
+++ b/target/nios2/cpu.c
12
@@ -XXX,XX +XXX,XX @@ static vaddr nios2_cpu_get_pc(CPUState *cs)
13
return env->pc;
14
}
15
16
+static void nios2_restore_state_to_opc(CPUState *cs,
17
+ const TranslationBlock *tb,
18
+ const uint64_t *data)
19
+{
20
+ Nios2CPU *cpu = NIOS2_CPU(cs);
21
+ CPUNios2State *env = &cpu->env;
22
+
23
+ env->pc = data[0];
24
+}
25
+
26
static bool nios2_cpu_has_work(CPUState *cs)
27
{
28
return cs->interrupt_request & CPU_INTERRUPT_HARD;
29
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps nios2_sysemu_ops = {
30
31
static const struct TCGCPUOps nios2_tcg_ops = {
32
.initialize = nios2_tcg_init,
33
+ .restore_state_to_opc = nios2_restore_state_to_opc,
34
35
#ifndef CONFIG_USER_ONLY
36
.tlb_fill = nios2_cpu_tlb_fill,
37
diff --git a/target/nios2/translate.c b/target/nios2/translate.c
38
index XXXXXXX..XXXXXXX 100644
39
--- a/target/nios2/translate.c
40
+++ b/target/nios2/translate.c
41
@@ -XXX,XX +XXX,XX @@ void nios2_tcg_init(void)
42
cpu_pc = tcg_global_mem_new(cpu_env,
43
offsetof(CPUNios2State, pc), "pc");
44
}
45
-
46
-void restore_state_to_opc(CPUNios2State *env, TranslationBlock *tb,
47
- target_ulong *data)
48
-{
49
- env->pc = data[0];
50
-}
51
--
52
2.34.1
53
54
diff view generated by jsdifflib
New patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
target/openrisc/cpu.c | 13 +++++++++++++
5
target/openrisc/translate.c | 10 ----------
6
2 files changed, 13 insertions(+), 10 deletions(-)
1
7
8
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/target/openrisc/cpu.c
11
+++ b/target/openrisc/cpu.c
12
@@ -XXX,XX +XXX,XX @@ static void openrisc_cpu_synchronize_from_tb(CPUState *cs,
13
cpu->env.pc = tb_pc(tb);
14
}
15
16
+static void openrisc_restore_state_to_opc(CPUState *cs,
17
+ const TranslationBlock *tb,
18
+ const uint64_t *data)
19
+{
20
+ OpenRISCCPU *cpu = OPENRISC_CPU(cs);
21
+
22
+ cpu->env.pc = data[0];
23
+ cpu->env.dflag = data[1] & 1;
24
+ if (data[1] & 2) {
25
+ cpu->env.ppc = cpu->env.pc - 4;
26
+ }
27
+}
28
29
static bool openrisc_cpu_has_work(CPUState *cs)
30
{
31
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps openrisc_sysemu_ops = {
32
static const struct TCGCPUOps openrisc_tcg_ops = {
33
.initialize = openrisc_translate_init,
34
.synchronize_from_tb = openrisc_cpu_synchronize_from_tb,
35
+ .restore_state_to_opc = openrisc_restore_state_to_opc,
36
37
#ifndef CONFIG_USER_ONLY
38
.tlb_fill = openrisc_cpu_tlb_fill,
39
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/target/openrisc/translate.c
42
+++ b/target/openrisc/translate.c
43
@@ -XXX,XX +XXX,XX @@ void openrisc_cpu_dump_state(CPUState *cs, FILE *f, int flags)
44
(i % 4) == 3 ? '\n' : ' ');
45
}
46
}
47
-
48
-void restore_state_to_opc(CPUOpenRISCState *env, TranslationBlock *tb,
49
- target_ulong *data)
50
-{
51
- env->pc = data[0];
52
- env->dflag = data[1] & 1;
53
- if (data[1] & 2) {
54
- env->ppc = env->pc - 4;
55
- }
56
-}
57
--
58
2.34.1
59
60
diff view generated by jsdifflib
New patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
target/ppc/cpu_init.c | 10 ++++++++++
5
target/ppc/translate.c | 6 ------
6
2 files changed, 10 insertions(+), 6 deletions(-)
1
7
8
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/target/ppc/cpu_init.c
11
+++ b/target/ppc/cpu_init.c
12
@@ -XXX,XX +XXX,XX @@ static vaddr ppc_cpu_get_pc(CPUState *cs)
13
return cpu->env.nip;
14
}
15
16
+static void ppc_restore_state_to_opc(CPUState *cs,
17
+ const TranslationBlock *tb,
18
+ const uint64_t *data)
19
+{
20
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
21
+
22
+ cpu->env.nip = data[0];
23
+}
24
+
25
static bool ppc_cpu_has_work(CPUState *cs)
26
{
27
PowerPCCPU *cpu = POWERPC_CPU(cs);
28
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps ppc_sysemu_ops = {
29
30
static const struct TCGCPUOps ppc_tcg_ops = {
31
.initialize = ppc_translate_init,
32
+ .restore_state_to_opc = ppc_restore_state_to_opc,
33
34
#ifdef CONFIG_USER_ONLY
35
.record_sigsegv = ppc_cpu_record_sigsegv,
36
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/ppc/translate.c
39
+++ b/target/ppc/translate.c
40
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
41
42
translator_loop(cs, tb, max_insns, pc, host_pc, &ppc_tr_ops, &ctx.base);
43
}
44
-
45
-void restore_state_to_opc(CPUPPCState *env, TranslationBlock *tb,
46
- target_ulong *data)
47
-{
48
- env->nip = data[0];
49
-}
50
--
51
2.34.1
52
53
diff view generated by jsdifflib
New patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
target/riscv/cpu.c | 9 +++++++--
5
1 file changed, 7 insertions(+), 2 deletions(-)
1
6
7
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/target/riscv/cpu.c
10
+++ b/target/riscv/cpu.c
11
@@ -XXX,XX +XXX,XX @@ static bool riscv_cpu_has_work(CPUState *cs)
12
#endif
13
}
14
15
-void restore_state_to_opc(CPURISCVState *env, TranslationBlock *tb,
16
- target_ulong *data)
17
+static void riscv_restore_state_to_opc(CPUState *cs,
18
+ const TranslationBlock *tb,
19
+ const uint64_t *data)
20
{
21
+ RISCVCPU *cpu = RISCV_CPU(cs);
22
+ CPURISCVState *env = &cpu->env;
23
RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
24
+
25
if (xl == MXL_RV32) {
26
env->pc = (int32_t)data[0];
27
} else {
28
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps riscv_sysemu_ops = {
29
static const struct TCGCPUOps riscv_tcg_ops = {
30
.initialize = riscv_translate_init,
31
.synchronize_from_tb = riscv_cpu_synchronize_from_tb,
32
+ .restore_state_to_opc = riscv_restore_state_to_opc,
33
34
#ifndef CONFIG_USER_ONLY
35
.tlb_fill = riscv_cpu_tlb_fill,
36
--
37
2.34.1
38
39
diff view generated by jsdifflib
New patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
target/rx/cpu.c | 10 ++++++++++
5
target/rx/translate.c | 6 ------
6
2 files changed, 10 insertions(+), 6 deletions(-)
1
7
8
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/target/rx/cpu.c
11
+++ b/target/rx/cpu.c
12
@@ -XXX,XX +XXX,XX @@ static void rx_cpu_synchronize_from_tb(CPUState *cs,
13
cpu->env.pc = tb_pc(tb);
14
}
15
16
+static void rx_restore_state_to_opc(CPUState *cs,
17
+ const TranslationBlock *tb,
18
+ const uint64_t *data)
19
+{
20
+ RXCPU *cpu = RX_CPU(cs);
21
+
22
+ cpu->env.pc = data[0];
23
+}
24
+
25
static bool rx_cpu_has_work(CPUState *cs)
26
{
27
return cs->interrupt_request &
28
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps rx_sysemu_ops = {
29
static const struct TCGCPUOps rx_tcg_ops = {
30
.initialize = rx_translate_init,
31
.synchronize_from_tb = rx_cpu_synchronize_from_tb,
32
+ .restore_state_to_opc = rx_restore_state_to_opc,
33
.tlb_fill = rx_cpu_tlb_fill,
34
35
#ifndef CONFIG_USER_ONLY
36
diff --git a/target/rx/translate.c b/target/rx/translate.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/rx/translate.c
39
+++ b/target/rx/translate.c
40
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
41
translator_loop(cs, tb, max_insns, pc, host_pc, &rx_tr_ops, &dc.base);
42
}
43
44
-void restore_state_to_opc(CPURXState *env, TranslationBlock *tb,
45
- target_ulong *data)
46
-{
47
- env->pc = data[0];
48
-}
49
-
50
#define ALLOC_REGISTER(sym, name) \
51
cpu_##sym = tcg_global_mem_new_i32(cpu_env, \
52
offsetof(CPURXState, sym), name)
53
--
54
2.34.1
55
56
diff view generated by jsdifflib
1
From: Shahab Vahedi <shahab.vahedi@gmail.com>
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
3
This change adapts io_readx() to its input access_type. Currently
4
io_readx() treats any memory access as a read, although it has an
5
input argument "MMUAccessType access_type". This results in:
6
7
1) Calling the tlb_fill() only with MMU_DATA_LOAD
8
2) Considering only entry->addr_read as the tlb_addr
9
10
Buglink: https://bugs.launchpad.net/qemu/+bug/1825359
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Signed-off-by: Shahab Vahedi <shahab.vahedi@gmail.com>
13
Message-Id: <20190420072236.12347-1-shahab.vahedi@gmail.com>
14
[rth: Remove assert; fix expression formatting.]
15
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
16
---
3
---
17
accel/tcg/cputlb.c | 5 +++--
4
target/s390x/s390x-internal.h | 4 +++-
18
1 file changed, 3 insertions(+), 2 deletions(-)
5
target/s390x/cpu.c | 1 +
6
target/s390x/tcg/translate.c | 7 +++++--
7
3 files changed, 9 insertions(+), 3 deletions(-)
19
8
20
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
9
diff --git a/target/s390x/s390x-internal.h b/target/s390x/s390x-internal.h
21
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
22
--- a/accel/tcg/cputlb.c
11
--- a/target/s390x/s390x-internal.h
23
+++ b/accel/tcg/cputlb.c
12
+++ b/target/s390x/s390x-internal.h
24
@@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
13
@@ -XXX,XX +XXX,XX @@ void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3,
25
CPUTLBEntry *entry;
14
26
target_ulong tlb_addr;
15
/* translate.c */
27
16
void s390x_translate_init(void);
28
- tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
17
-
29
+ tlb_fill(cpu, addr, size, access_type, mmu_idx, retaddr);
18
+void s390x_restore_state_to_opc(CPUState *cs,
30
19
+ const TranslationBlock *tb,
31
entry = tlb_entry(env, mmu_idx, addr);
20
+ const uint64_t *data);
32
- tlb_addr = entry->addr_read;
21
33
+ tlb_addr = (access_type == MMU_DATA_LOAD ?
22
/* sigp.c */
34
+ entry->addr_read : entry->addr_code);
23
int handle_sigp(CPUS390XState *env, uint8_t order, uint64_t r1, uint64_t r3);
35
if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
24
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
36
/* RAM access */
25
index XXXXXXX..XXXXXXX 100644
37
uintptr_t haddr = addr + entry->addend;
26
--- a/target/s390x/cpu.c
27
+++ b/target/s390x/cpu.c
28
@@ -XXX,XX +XXX,XX @@ static void s390_cpu_reset_full(DeviceState *dev)
29
30
static const struct TCGCPUOps s390_tcg_ops = {
31
.initialize = s390x_translate_init,
32
+ .restore_state_to_opc = s390x_restore_state_to_opc,
33
34
#ifdef CONFIG_USER_ONLY
35
.record_sigsegv = s390_cpu_record_sigsegv,
36
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/s390x/tcg/translate.c
39
+++ b/target/s390x/tcg/translate.c
40
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
41
translator_loop(cs, tb, max_insns, pc, host_pc, &s390x_tr_ops, &dc.base);
42
}
43
44
-void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
45
- target_ulong *data)
46
+void s390x_restore_state_to_opc(CPUState *cs,
47
+ const TranslationBlock *tb,
48
+ const uint64_t *data)
49
{
50
+ S390CPU *cpu = S390_CPU(cs);
51
+ CPUS390XState *env = &cpu->env;
52
int cc_op = data[1];
53
54
env->psw.addr = data[0];
38
--
55
--
39
2.17.1
56
2.34.1
40
57
41
58
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
3
---
4
tcg/arm/tcg-target.h | 2 +-
4
target/sh4/cpu.c | 16 ++++++++++++++++
5
tcg/arm/tcg-target.inc.c | 25 +++++++++++++++++++++++++
5
target/sh4/translate.c | 10 ----------
6
2 files changed, 26 insertions(+), 1 deletion(-)
6
2 files changed, 16 insertions(+), 10 deletions(-)
7
7
8
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
8
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
9
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/arm/tcg-target.h
10
--- a/target/sh4/cpu.c
11
+++ b/tcg/arm/tcg-target.h
11
+++ b/target/sh4/cpu.c
12
@@ -XXX,XX +XXX,XX @@ extern bool use_idiv_instructions;
12
@@ -XXX,XX +XXX,XX @@ static void superh_cpu_synchronize_from_tb(CPUState *cs,
13
#define TCG_TARGET_HAS_deposit_i32 use_armv7_instructions
13
cpu->env.flags = tb->flags;
14
#define TCG_TARGET_HAS_extract_i32 use_armv7_instructions
14
}
15
#define TCG_TARGET_HAS_sextract_i32 use_armv7_instructions
15
16
-#define TCG_TARGET_HAS_extract2_i32 0
16
+static void superh_restore_state_to_opc(CPUState *cs,
17
+#define TCG_TARGET_HAS_extract2_i32 1
17
+ const TranslationBlock *tb,
18
#define TCG_TARGET_HAS_movcond_i32 1
18
+ const uint64_t *data)
19
#define TCG_TARGET_HAS_mulu2_i32 1
19
+{
20
#define TCG_TARGET_HAS_muls2_i32 1
20
+ SuperHCPU *cpu = SUPERH_CPU(cs);
21
diff --git a/tcg/arm/tcg-target.inc.c b/tcg/arm/tcg-target.inc.c
21
+
22
+ cpu->env.pc = data[0];
23
+ cpu->env.flags = data[1];
24
+ /*
25
+ * Theoretically delayed_pc should also be restored. In practice the
26
+ * branch instruction is re-executed after exception, so the delayed
27
+ * branch target will be recomputed.
28
+ */
29
+}
30
+
31
#ifndef CONFIG_USER_ONLY
32
static bool superh_io_recompile_replay_branch(CPUState *cs,
33
const TranslationBlock *tb)
34
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps sh4_sysemu_ops = {
35
static const struct TCGCPUOps superh_tcg_ops = {
36
.initialize = sh4_translate_init,
37
.synchronize_from_tb = superh_cpu_synchronize_from_tb,
38
+ .restore_state_to_opc = superh_restore_state_to_opc,
39
40
#ifndef CONFIG_USER_ONLY
41
.tlb_fill = superh_cpu_tlb_fill,
42
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
22
index XXXXXXX..XXXXXXX 100644
43
index XXXXXXX..XXXXXXX 100644
23
--- a/tcg/arm/tcg-target.inc.c
44
--- a/target/sh4/translate.c
24
+++ b/tcg/arm/tcg-target.inc.c
45
+++ b/target/sh4/translate.c
25
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
46
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
26
case INDEX_op_sextract_i32:
47
27
tcg_out_sextract(s, COND_AL, args[0], args[1], args[2], args[3]);
48
translator_loop(cs, tb, max_insns, pc, host_pc, &sh4_tr_ops, &ctx.base);
28
break;
49
}
29
+ case INDEX_op_extract2_i32:
50
-
30
+ /* ??? These optimization vs zero should be generic. */
51
-void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb,
31
+ /* ??? But we can't substitute 2 for 1 in the opcode stream yet. */
52
- target_ulong *data)
32
+ if (const_args[1]) {
53
-{
33
+ if (const_args[2]) {
54
- env->pc = data[0];
34
+ tcg_out_movi(s, TCG_TYPE_REG, args[0], 0);
55
- env->flags = data[1];
35
+ } else {
56
- /* Theoretically delayed_pc should also be restored. In practice the
36
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
57
- branch instruction is re-executed after exception, so the delayed
37
+ args[2], SHIFT_IMM_LSL(32 - args[3]));
58
- branch target will be recomputed. */
38
+ }
59
-}
39
+ } else if (const_args[2]) {
40
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
41
+ args[1], SHIFT_IMM_LSR(args[3]));
42
+ } else {
43
+ /* We can do extract2 in 2 insns, vs the 3 required otherwise. */
44
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0,
45
+ args[2], SHIFT_IMM_LSL(32 - args[3]));
46
+ tcg_out_dat_reg(s, COND_AL, ARITH_ORR, args[0], TCG_REG_TMP,
47
+ args[1], SHIFT_IMM_LSR(args[3]));
48
+ }
49
+ break;
50
51
case INDEX_op_div_i32:
52
tcg_out_sdiv(s, COND_AL, args[0], args[1], args[2]);
53
@@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
54
= { .args_ct_str = { "s", "s", "s", "s" } };
55
static const TCGTargetOpDef br
56
= { .args_ct_str = { "r", "rIN" } };
57
+ static const TCGTargetOpDef ext2
58
+ = { .args_ct_str = { "r", "rZ", "rZ" } };
59
static const TCGTargetOpDef dep
60
= { .args_ct_str = { "r", "0", "rZ" } };
61
static const TCGTargetOpDef movc
62
@@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
63
return &br;
64
case INDEX_op_deposit_i32:
65
return &dep;
66
+ case INDEX_op_extract2_i32:
67
+ return &ext2;
68
case INDEX_op_movcond_i32:
69
return &movc;
70
case INDEX_op_add2_i32:
71
--
60
--
72
2.17.1
61
2.34.1
73
62
74
63
diff view generated by jsdifflib
1
This will let backends implement the double-word shift operation.
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
3
Reviewed-by: David Hildenbrand <david@redhat.com>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
3
---
6
tcg/aarch64/tcg-target.h | 2 ++
4
target/sparc/cpu.h | 3 +++
7
tcg/arm/tcg-target.h | 1 +
5
target/sparc/cpu.c | 1 +
8
tcg/i386/tcg-target.h | 2 ++
6
target/sparc/translate.c | 7 +++++--
9
tcg/mips/tcg-target.h | 2 ++
7
3 files changed, 9 insertions(+), 2 deletions(-)
10
tcg/ppc/tcg-target.h | 2 ++
11
tcg/riscv/tcg-target.h | 2 ++
12
tcg/s390/tcg-target.h | 2 ++
13
tcg/sparc/tcg-target.h | 2 ++
14
tcg/tcg-opc.h | 2 ++
15
tcg/tcg.h | 1 +
16
tcg/tci/tcg-target.h | 2 ++
17
tcg/optimize.c | 16 ++++++++++++++++
18
tcg/tcg-op.c | 4 ++++
19
tcg/tcg.c | 4 ++++
20
tcg/README | 7 +++++++
21
15 files changed, 51 insertions(+)
22
8
23
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
9
diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h
24
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
25
--- a/tcg/aarch64/tcg-target.h
11
--- a/target/sparc/cpu.h
26
+++ b/tcg/aarch64/tcg-target.h
12
+++ b/target/sparc/cpu.h
27
@@ -XXX,XX +XXX,XX @@ typedef enum {
13
@@ -XXX,XX +XXX,XX @@ int sparc_cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
28
#define TCG_TARGET_HAS_deposit_i32 1
14
29
#define TCG_TARGET_HAS_extract_i32 1
15
/* translate.c */
30
#define TCG_TARGET_HAS_sextract_i32 1
16
void sparc_tcg_init(void);
31
+#define TCG_TARGET_HAS_extract2_i32 0
17
+void sparc_restore_state_to_opc(CPUState *cs,
32
#define TCG_TARGET_HAS_movcond_i32 1
18
+ const TranslationBlock *tb,
33
#define TCG_TARGET_HAS_add2_i32 1
19
+ const uint64_t *data);
34
#define TCG_TARGET_HAS_sub2_i32 1
20
35
@@ -XXX,XX +XXX,XX @@ typedef enum {
21
/* cpu-exec.c */
36
#define TCG_TARGET_HAS_deposit_i64 1
22
37
#define TCG_TARGET_HAS_extract_i64 1
23
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
38
#define TCG_TARGET_HAS_sextract_i64 1
39
+#define TCG_TARGET_HAS_extract2_i64 0
40
#define TCG_TARGET_HAS_movcond_i64 1
41
#define TCG_TARGET_HAS_add2_i64 1
42
#define TCG_TARGET_HAS_sub2_i64 1
43
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
44
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
45
--- a/tcg/arm/tcg-target.h
25
--- a/target/sparc/cpu.c
46
+++ b/tcg/arm/tcg-target.h
26
+++ b/target/sparc/cpu.c
47
@@ -XXX,XX +XXX,XX @@ extern bool use_idiv_instructions;
27
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps sparc_sysemu_ops = {
48
#define TCG_TARGET_HAS_deposit_i32 use_armv7_instructions
28
static const struct TCGCPUOps sparc_tcg_ops = {
49
#define TCG_TARGET_HAS_extract_i32 use_armv7_instructions
29
.initialize = sparc_tcg_init,
50
#define TCG_TARGET_HAS_sextract_i32 use_armv7_instructions
30
.synchronize_from_tb = sparc_cpu_synchronize_from_tb,
51
+#define TCG_TARGET_HAS_extract2_i32 0
31
+ .restore_state_to_opc = sparc_restore_state_to_opc,
52
#define TCG_TARGET_HAS_movcond_i32 1
32
53
#define TCG_TARGET_HAS_mulu2_i32 1
33
#ifndef CONFIG_USER_ONLY
54
#define TCG_TARGET_HAS_muls2_i32 1
34
.tlb_fill = sparc_cpu_tlb_fill,
55
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
35
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
56
index XXXXXXX..XXXXXXX 100644
36
index XXXXXXX..XXXXXXX 100644
57
--- a/tcg/i386/tcg-target.h
37
--- a/target/sparc/translate.c
58
+++ b/tcg/i386/tcg-target.h
38
+++ b/target/sparc/translate.c
59
@@ -XXX,XX +XXX,XX @@ extern bool have_avx2;
39
@@ -XXX,XX +XXX,XX @@ void sparc_tcg_init(void)
60
#define TCG_TARGET_HAS_deposit_i32 1
40
}
61
#define TCG_TARGET_HAS_extract_i32 1
41
}
62
#define TCG_TARGET_HAS_sextract_i32 1
42
63
+#define TCG_TARGET_HAS_extract2_i32 0
43
-void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb,
64
#define TCG_TARGET_HAS_movcond_i32 1
44
- target_ulong *data)
65
#define TCG_TARGET_HAS_add2_i32 1
45
+void sparc_restore_state_to_opc(CPUState *cs,
66
#define TCG_TARGET_HAS_sub2_i32 1
46
+ const TranslationBlock *tb,
67
@@ -XXX,XX +XXX,XX @@ extern bool have_avx2;
47
+ const uint64_t *data)
68
#define TCG_TARGET_HAS_deposit_i64 1
48
{
69
#define TCG_TARGET_HAS_extract_i64 1
49
+ SPARCCPU *cpu = SPARC_CPU(cs);
70
#define TCG_TARGET_HAS_sextract_i64 0
50
+ CPUSPARCState *env = &cpu->env;
71
+#define TCG_TARGET_HAS_extract2_i64 0
51
target_ulong pc = data[0];
72
#define TCG_TARGET_HAS_movcond_i64 1
52
target_ulong npc = data[1];
73
#define TCG_TARGET_HAS_add2_i64 1
53
74
#define TCG_TARGET_HAS_sub2_i64 1
75
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
76
index XXXXXXX..XXXXXXX 100644
77
--- a/tcg/mips/tcg-target.h
78
+++ b/tcg/mips/tcg-target.h
79
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
80
#define TCG_TARGET_HAS_deposit_i32 use_mips32r2_instructions
81
#define TCG_TARGET_HAS_extract_i32 use_mips32r2_instructions
82
#define TCG_TARGET_HAS_sextract_i32 0
83
+#define TCG_TARGET_HAS_extract2_i32 0
84
#define TCG_TARGET_HAS_ext8s_i32 use_mips32r2_instructions
85
#define TCG_TARGET_HAS_ext16s_i32 use_mips32r2_instructions
86
#define TCG_TARGET_HAS_rot_i32 use_mips32r2_instructions
87
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
88
#define TCG_TARGET_HAS_deposit_i64 use_mips32r2_instructions
89
#define TCG_TARGET_HAS_extract_i64 use_mips32r2_instructions
90
#define TCG_TARGET_HAS_sextract_i64 0
91
+#define TCG_TARGET_HAS_extract2_i64 0
92
#define TCG_TARGET_HAS_ext8s_i64 use_mips32r2_instructions
93
#define TCG_TARGET_HAS_ext16s_i64 use_mips32r2_instructions
94
#define TCG_TARGET_HAS_rot_i64 use_mips32r2_instructions
95
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
96
index XXXXXXX..XXXXXXX 100644
97
--- a/tcg/ppc/tcg-target.h
98
+++ b/tcg/ppc/tcg-target.h
99
@@ -XXX,XX +XXX,XX @@ extern bool have_isa_3_00;
100
#define TCG_TARGET_HAS_deposit_i32 1
101
#define TCG_TARGET_HAS_extract_i32 1
102
#define TCG_TARGET_HAS_sextract_i32 0
103
+#define TCG_TARGET_HAS_extract2_i32 0
104
#define TCG_TARGET_HAS_movcond_i32 1
105
#define TCG_TARGET_HAS_mulu2_i32 0
106
#define TCG_TARGET_HAS_muls2_i32 0
107
@@ -XXX,XX +XXX,XX @@ extern bool have_isa_3_00;
108
#define TCG_TARGET_HAS_deposit_i64 1
109
#define TCG_TARGET_HAS_extract_i64 1
110
#define TCG_TARGET_HAS_sextract_i64 0
111
+#define TCG_TARGET_HAS_extract2_i64 0
112
#define TCG_TARGET_HAS_movcond_i64 1
113
#define TCG_TARGET_HAS_add2_i64 1
114
#define TCG_TARGET_HAS_sub2_i64 1
115
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
116
index XXXXXXX..XXXXXXX 100644
117
--- a/tcg/riscv/tcg-target.h
118
+++ b/tcg/riscv/tcg-target.h
119
@@ -XXX,XX +XXX,XX @@ typedef enum {
120
#define TCG_TARGET_HAS_deposit_i32 0
121
#define TCG_TARGET_HAS_extract_i32 0
122
#define TCG_TARGET_HAS_sextract_i32 0
123
+#define TCG_TARGET_HAS_extract2_i32 0
124
#define TCG_TARGET_HAS_add2_i32 1
125
#define TCG_TARGET_HAS_sub2_i32 1
126
#define TCG_TARGET_HAS_mulu2_i32 0
127
@@ -XXX,XX +XXX,XX @@ typedef enum {
128
#define TCG_TARGET_HAS_deposit_i64 0
129
#define TCG_TARGET_HAS_extract_i64 0
130
#define TCG_TARGET_HAS_sextract_i64 0
131
+#define TCG_TARGET_HAS_extract2_i64 0
132
#define TCG_TARGET_HAS_extrl_i64_i32 1
133
#define TCG_TARGET_HAS_extrh_i64_i32 1
134
#define TCG_TARGET_HAS_ext8s_i64 1
135
diff --git a/tcg/s390/tcg-target.h b/tcg/s390/tcg-target.h
136
index XXXXXXX..XXXXXXX 100644
137
--- a/tcg/s390/tcg-target.h
138
+++ b/tcg/s390/tcg-target.h
139
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities;
140
#define TCG_TARGET_HAS_deposit_i32 (s390_facilities & FACILITY_GEN_INST_EXT)
141
#define TCG_TARGET_HAS_extract_i32 (s390_facilities & FACILITY_GEN_INST_EXT)
142
#define TCG_TARGET_HAS_sextract_i32 0
143
+#define TCG_TARGET_HAS_extract2_i32 0
144
#define TCG_TARGET_HAS_movcond_i32 1
145
#define TCG_TARGET_HAS_add2_i32 1
146
#define TCG_TARGET_HAS_sub2_i32 1
147
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities;
148
#define TCG_TARGET_HAS_deposit_i64 (s390_facilities & FACILITY_GEN_INST_EXT)
149
#define TCG_TARGET_HAS_extract_i64 (s390_facilities & FACILITY_GEN_INST_EXT)
150
#define TCG_TARGET_HAS_sextract_i64 0
151
+#define TCG_TARGET_HAS_extract2_i64 0
152
#define TCG_TARGET_HAS_movcond_i64 1
153
#define TCG_TARGET_HAS_add2_i64 1
154
#define TCG_TARGET_HAS_sub2_i64 1
155
diff --git a/tcg/sparc/tcg-target.h b/tcg/sparc/tcg-target.h
156
index XXXXXXX..XXXXXXX 100644
157
--- a/tcg/sparc/tcg-target.h
158
+++ b/tcg/sparc/tcg-target.h
159
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
160
#define TCG_TARGET_HAS_deposit_i32 0
161
#define TCG_TARGET_HAS_extract_i32 0
162
#define TCG_TARGET_HAS_sextract_i32 0
163
+#define TCG_TARGET_HAS_extract2_i32 0
164
#define TCG_TARGET_HAS_movcond_i32 1
165
#define TCG_TARGET_HAS_add2_i32 1
166
#define TCG_TARGET_HAS_sub2_i32 1
167
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
168
#define TCG_TARGET_HAS_deposit_i64 0
169
#define TCG_TARGET_HAS_extract_i64 0
170
#define TCG_TARGET_HAS_sextract_i64 0
171
+#define TCG_TARGET_HAS_extract2_i64 0
172
#define TCG_TARGET_HAS_movcond_i64 1
173
#define TCG_TARGET_HAS_add2_i64 1
174
#define TCG_TARGET_HAS_sub2_i64 1
175
diff --git a/tcg/tcg-opc.h b/tcg/tcg-opc.h
176
index XXXXXXX..XXXXXXX 100644
177
--- a/tcg/tcg-opc.h
178
+++ b/tcg/tcg-opc.h
179
@@ -XXX,XX +XXX,XX @@ DEF(rotr_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rot_i32))
180
DEF(deposit_i32, 1, 2, 2, IMPL(TCG_TARGET_HAS_deposit_i32))
181
DEF(extract_i32, 1, 1, 2, IMPL(TCG_TARGET_HAS_extract_i32))
182
DEF(sextract_i32, 1, 1, 2, IMPL(TCG_TARGET_HAS_sextract_i32))
183
+DEF(extract2_i32, 1, 2, 1, IMPL(TCG_TARGET_HAS_extract2_i32))
184
185
DEF(brcond_i32, 0, 2, 2, TCG_OPF_BB_END)
186
187
@@ -XXX,XX +XXX,XX @@ DEF(rotr_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rot_i64))
188
DEF(deposit_i64, 1, 2, 2, IMPL64 | IMPL(TCG_TARGET_HAS_deposit_i64))
189
DEF(extract_i64, 1, 1, 2, IMPL64 | IMPL(TCG_TARGET_HAS_extract_i64))
190
DEF(sextract_i64, 1, 1, 2, IMPL64 | IMPL(TCG_TARGET_HAS_sextract_i64))
191
+DEF(extract2_i64, 1, 2, 1, IMPL64 | IMPL(TCG_TARGET_HAS_extract2_i64))
192
193
/* size changing ops */
194
DEF(ext_i32_i64, 1, 1, 0, IMPL64)
195
diff --git a/tcg/tcg.h b/tcg/tcg.h
196
index XXXXXXX..XXXXXXX 100644
197
--- a/tcg/tcg.h
198
+++ b/tcg/tcg.h
199
@@ -XXX,XX +XXX,XX @@ typedef uint64_t TCGRegSet;
200
#define TCG_TARGET_HAS_deposit_i64 0
201
#define TCG_TARGET_HAS_extract_i64 0
202
#define TCG_TARGET_HAS_sextract_i64 0
203
+#define TCG_TARGET_HAS_extract2_i64 0
204
#define TCG_TARGET_HAS_movcond_i64 0
205
#define TCG_TARGET_HAS_add2_i64 0
206
#define TCG_TARGET_HAS_sub2_i64 0
207
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
208
index XXXXXXX..XXXXXXX 100644
209
--- a/tcg/tci/tcg-target.h
210
+++ b/tcg/tci/tcg-target.h
211
@@ -XXX,XX +XXX,XX @@
212
#define TCG_TARGET_HAS_deposit_i32 1
213
#define TCG_TARGET_HAS_extract_i32 0
214
#define TCG_TARGET_HAS_sextract_i32 0
215
+#define TCG_TARGET_HAS_extract2_i32 0
216
#define TCG_TARGET_HAS_eqv_i32 0
217
#define TCG_TARGET_HAS_nand_i32 0
218
#define TCG_TARGET_HAS_nor_i32 0
219
@@ -XXX,XX +XXX,XX @@
220
#define TCG_TARGET_HAS_deposit_i64 1
221
#define TCG_TARGET_HAS_extract_i64 0
222
#define TCG_TARGET_HAS_sextract_i64 0
223
+#define TCG_TARGET_HAS_extract2_i64 0
224
#define TCG_TARGET_HAS_div_i64 0
225
#define TCG_TARGET_HAS_rem_i64 0
226
#define TCG_TARGET_HAS_ext8s_i64 1
227
diff --git a/tcg/optimize.c b/tcg/optimize.c
228
index XXXXXXX..XXXXXXX 100644
229
--- a/tcg/optimize.c
230
+++ b/tcg/optimize.c
231
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
232
}
233
goto do_default;
234
235
+ CASE_OP_32_64(extract2):
236
+ if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
237
+ TCGArg v1 = arg_info(op->args[1])->val;
238
+ TCGArg v2 = arg_info(op->args[2])->val;
239
+
240
+ if (opc == INDEX_op_extract2_i64) {
241
+ tmp = (v1 >> op->args[3]) | (v2 << (64 - op->args[3]));
242
+ } else {
243
+ tmp = (v1 >> op->args[3]) | (v2 << (32 - op->args[3]));
244
+ tmp = (int32_t)tmp;
245
+ }
246
+ tcg_opt_gen_movi(s, op, op->args[0], tmp);
247
+ break;
248
+ }
249
+ goto do_default;
250
+
251
CASE_OP_32_64(setcond):
252
tmp = do_constant_folding_cond(opc, op->args[1],
253
op->args[2], op->args[3]);
254
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
255
index XXXXXXX..XXXXXXX 100644
256
--- a/tcg/tcg-op.c
257
+++ b/tcg/tcg-op.c
258
@@ -XXX,XX +XXX,XX @@ void tcg_gen_extract2_i32(TCGv_i32 ret, TCGv_i32 al, TCGv_i32 ah,
259
tcg_gen_mov_i32(ret, ah);
260
} else if (al == ah) {
261
tcg_gen_rotri_i32(ret, al, ofs);
262
+ } else if (TCG_TARGET_HAS_extract2_i32) {
263
+ tcg_gen_op4i_i32(INDEX_op_extract2_i32, ret, al, ah, ofs);
264
} else {
265
TCGv_i32 t0 = tcg_temp_new_i32();
266
tcg_gen_shri_i32(t0, al, ofs);
267
@@ -XXX,XX +XXX,XX @@ void tcg_gen_extract2_i64(TCGv_i64 ret, TCGv_i64 al, TCGv_i64 ah,
268
tcg_gen_mov_i64(ret, ah);
269
} else if (al == ah) {
270
tcg_gen_rotri_i64(ret, al, ofs);
271
+ } else if (TCG_TARGET_HAS_extract2_i64) {
272
+ tcg_gen_op4i_i64(INDEX_op_extract2_i64, ret, al, ah, ofs);
273
} else {
274
TCGv_i64 t0 = tcg_temp_new_i64();
275
tcg_gen_shri_i64(t0, al, ofs);
276
diff --git a/tcg/tcg.c b/tcg/tcg.c
277
index XXXXXXX..XXXXXXX 100644
278
--- a/tcg/tcg.c
279
+++ b/tcg/tcg.c
280
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op)
281
return TCG_TARGET_HAS_extract_i32;
282
case INDEX_op_sextract_i32:
283
return TCG_TARGET_HAS_sextract_i32;
284
+ case INDEX_op_extract2_i32:
285
+ return TCG_TARGET_HAS_extract2_i32;
286
case INDEX_op_add2_i32:
287
return TCG_TARGET_HAS_add2_i32;
288
case INDEX_op_sub2_i32:
289
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op)
290
return TCG_TARGET_HAS_extract_i64;
291
case INDEX_op_sextract_i64:
292
return TCG_TARGET_HAS_sextract_i64;
293
+ case INDEX_op_extract2_i64:
294
+ return TCG_TARGET_HAS_extract2_i64;
295
case INDEX_op_extrl_i64_i32:
296
return TCG_TARGET_HAS_extrl_i64_i32;
297
case INDEX_op_extrh_i64_i32:
298
diff --git a/tcg/README b/tcg/README
299
index XXXXXXX..XXXXXXX 100644
300
--- a/tcg/README
301
+++ b/tcg/README
302
@@ -XXX,XX +XXX,XX @@ at bit 8. This operation would be equivalent to
303
304
(using an arithmetic right shift).
305
306
+* extract2_i32/i64 dest, t1, t2, pos
307
+
308
+For N = {32,64}, extract an N-bit quantity from the concatenation
309
+of t2:t1, beginning at pos. The tcg_gen_extract2_{i32,i64} expander
310
+accepts 0 <= pos <= N as inputs. The backend code generator will
311
+not see either 0 or N as inputs for these opcodes.
312
+
313
* extrl_i64_i32 t0, t1
314
315
For 64-bit hosts only, extract the low 32-bits of input T1 and place it
316
--
54
--
317
2.17.1
55
2.34.1
318
56
319
57
diff view generated by jsdifflib
1
If the TB generates too much code, such that backend relocations
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
overflow, try again with a smaller TB. In support of this, move
3
relocation processing from a random place within tcg_out_op, in
4
the handling of branch opcodes, to a new function at the end of
5
tcg_gen_code.
6
7
This is not a complete solution, as there are additional relocs
8
generated for out-of-line ldst handling and constant pools.
9
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
3
---
12
tcg/tcg.h | 15 +++++++-------
4
target/tricore/cpu.c | 11 +++++++++++
13
tcg/tcg.c | 61 ++++++++++++++++++++++++++-----------------------------
5
target/tricore/translate.c | 6 ------
14
2 files changed, 36 insertions(+), 40 deletions(-)
6
2 files changed, 11 insertions(+), 6 deletions(-)
15
7
16
diff --git a/tcg/tcg.h b/tcg/tcg.h
8
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
17
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
18
--- a/tcg/tcg.h
10
--- a/target/tricore/cpu.c
19
+++ b/tcg/tcg.h
11
+++ b/target/tricore/cpu.c
20
@@ -XXX,XX +XXX,XX @@ typedef uint64_t tcg_insn_unit;
12
@@ -XXX,XX +XXX,XX @@ static void tricore_cpu_synchronize_from_tb(CPUState *cs,
21
do { if (!(X)) { __builtin_unreachable(); } } while (0)
13
env->PC = tb_pc(tb);
22
#endif
23
24
-typedef struct TCGRelocation {
25
- struct TCGRelocation *next;
26
- int type;
27
+typedef struct TCGRelocation TCGRelocation;
28
+struct TCGRelocation {
29
+ QSIMPLEQ_ENTRY(TCGRelocation) next;
30
tcg_insn_unit *ptr;
31
intptr_t addend;
32
-} TCGRelocation;
33
+ int type;
34
+};
35
36
typedef struct TCGLabel TCGLabel;
37
struct TCGLabel {
38
@@ -XXX,XX +XXX,XX @@ struct TCGLabel {
39
union {
40
uintptr_t value;
41
tcg_insn_unit *value_ptr;
42
- TCGRelocation *first_reloc;
43
} u;
44
-#ifdef CONFIG_DEBUG_TCG
45
+ QSIMPLEQ_HEAD(, TCGRelocation) relocs;
46
QSIMPLEQ_ENTRY(TCGLabel) next;
47
-#endif
48
};
49
50
typedef struct TCGPool {
51
@@ -XXX,XX +XXX,XX @@ struct TCGContext {
52
#endif
53
54
#ifdef CONFIG_DEBUG_TCG
55
- QSIMPLEQ_HEAD(, TCGLabel) labels;
56
int temps_in_use;
57
int goto_tb_issue_mask;
58
#endif
59
@@ -XXX,XX +XXX,XX @@ struct TCGContext {
60
TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */
61
62
QTAILQ_HEAD(, TCGOp) ops, free_ops;
63
+ QSIMPLEQ_HEAD(, TCGLabel) labels;
64
65
/* Tells which temporary holds a given register.
66
It does not take into account fixed registers */
67
diff --git a/tcg/tcg.c b/tcg/tcg.c
68
index XXXXXXX..XXXXXXX 100644
69
--- a/tcg/tcg.c
70
+++ b/tcg/tcg.c
71
@@ -XXX,XX +XXX,XX @@ static __attribute__((unused)) inline void tcg_patch64(tcg_insn_unit *p,
72
static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type,
73
TCGLabel *l, intptr_t addend)
74
{
75
- TCGRelocation *r;
76
+ TCGRelocation *r = tcg_malloc(sizeof(TCGRelocation));
77
78
- if (l->has_value) {
79
- /* FIXME: This may break relocations on RISC targets that
80
- modify instruction fields in place. The caller may not have
81
- written the initial value. */
82
- bool ok = patch_reloc(code_ptr, type, l->u.value, addend);
83
- tcg_debug_assert(ok);
84
- } else {
85
- /* add a new relocation entry */
86
- r = tcg_malloc(sizeof(TCGRelocation));
87
- r->type = type;
88
- r->ptr = code_ptr;
89
- r->addend = addend;
90
- r->next = l->u.first_reloc;
91
- l->u.first_reloc = r;
92
- }
93
+ r->type = type;
94
+ r->ptr = code_ptr;
95
+ r->addend = addend;
96
+ QSIMPLEQ_INSERT_TAIL(&l->relocs, r, next);
97
}
14
}
98
15
99
static void tcg_out_label(TCGContext *s, TCGLabel *l, tcg_insn_unit *ptr)
16
+static void tricore_restore_state_to_opc(CPUState *cs,
100
{
17
+ const TranslationBlock *tb,
101
- intptr_t value = (intptr_t)ptr;
18
+ const uint64_t *data)
102
- TCGRelocation *r;
19
+{
103
-
20
+ TriCoreCPU *cpu = TRICORE_CPU(cs);
104
tcg_debug_assert(!l->has_value);
21
+ CPUTriCoreState *env = &cpu->env;
105
-
106
- for (r = l->u.first_reloc; r != NULL; r = r->next) {
107
- bool ok = patch_reloc(r->ptr, r->type, value, r->addend);
108
- tcg_debug_assert(ok);
109
- }
110
-
111
l->has_value = 1;
112
l->u.value_ptr = ptr;
113
}
114
@@ -XXX,XX +XXX,XX @@ TCGLabel *gen_new_label(void)
115
TCGContext *s = tcg_ctx;
116
TCGLabel *l = tcg_malloc(sizeof(TCGLabel));
117
118
- *l = (TCGLabel){
119
- .id = s->nb_labels++
120
- };
121
-#ifdef CONFIG_DEBUG_TCG
122
+ memset(l, 0, sizeof(TCGLabel));
123
+ l->id = s->nb_labels++;
124
+ QSIMPLEQ_INIT(&l->relocs);
125
+
22
+
126
QSIMPLEQ_INSERT_TAIL(&s->labels, l, next);
23
+ env->PC = data[0];
127
-#endif
128
129
return l;
130
}
131
132
+static bool tcg_resolve_relocs(TCGContext *s)
133
+{
134
+ TCGLabel *l;
135
+
136
+ QSIMPLEQ_FOREACH(l, &s->labels, next) {
137
+ TCGRelocation *r;
138
+ uintptr_t value = l->u.value;
139
+
140
+ QSIMPLEQ_FOREACH(r, &l->relocs, next) {
141
+ if (!patch_reloc(r->ptr, r->type, value, r->addend)) {
142
+ return false;
143
+ }
144
+ }
145
+ }
146
+ return true;
147
+}
24
+}
148
+
25
+
149
static void set_jmp_reset_offset(TCGContext *s, int which)
26
static void tricore_cpu_reset(DeviceState *dev)
150
{
27
{
151
size_t off = tcg_current_code_size(s);
28
CPUState *s = CPU(dev);
152
@@ -XXX,XX +XXX,XX @@ void tcg_func_start(TCGContext *s)
29
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps tricore_sysemu_ops = {
153
30
static const struct TCGCPUOps tricore_tcg_ops = {
154
QTAILQ_INIT(&s->ops);
31
.initialize = tricore_tcg_init,
155
QTAILQ_INIT(&s->free_ops);
32
.synchronize_from_tb = tricore_cpu_synchronize_from_tb,
156
-#ifdef CONFIG_DEBUG_TCG
33
+ .restore_state_to_opc = tricore_restore_state_to_opc,
157
QSIMPLEQ_INIT(&s->labels);
34
.tlb_fill = tricore_cpu_tlb_fill,
158
-#endif
35
};
36
37
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
38
index XXXXXXX..XXXXXXX 100644
39
--- a/target/tricore/translate.c
40
+++ b/target/tricore/translate.c
41
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
42
&tricore_tr_ops, &ctx.base);
159
}
43
}
160
44
161
static inline TCGTemp *tcg_temp_alloc(TCGContext *s)
45
-void
162
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
46
-restore_state_to_opc(CPUTriCoreState *env, TranslationBlock *tb,
163
return -1;
47
- target_ulong *data)
164
}
48
-{
165
#endif
49
- env->PC = data[0];
166
+ if (!tcg_resolve_relocs(s)) {
50
-}
167
+ return -2;
51
/*
168
+ }
52
*
169
53
* Initialization
170
/* flush instruction cache */
171
flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr);
172
--
54
--
173
2.17.1
55
2.34.1
174
56
175
57
diff view generated by jsdifflib
1
From: David Hildenbrand <david@redhat.com>
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
3
Will be helpful for s390x. Input 128 bit and output 64 bit only,
4
which is sufficient for now.
5
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: David Hildenbrand <david@redhat.com>
9
Message-Id: <20190225154204.26751-1-david@redhat.com>
10
[rth: Add matching tcg_gen_extract2_i32.]
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
3
---
13
tcg/tcg-op.h | 6 ++++++
4
target/xtensa/cpu.c | 10 ++++++++++
14
tcg/tcg-op.c | 44 ++++++++++++++++++++++++++++++++++++++++++++
5
target/xtensa/translate.c | 6 ------
15
2 files changed, 50 insertions(+)
6
2 files changed, 10 insertions(+), 6 deletions(-)
16
7
17
diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h
8
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
18
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
19
--- a/tcg/tcg-op.h
10
--- a/target/xtensa/cpu.c
20
+++ b/tcg/tcg-op.h
11
+++ b/target/xtensa/cpu.c
21
@@ -XXX,XX +XXX,XX @@ void tcg_gen_extract_i32(TCGv_i32 ret, TCGv_i32 arg,
12
@@ -XXX,XX +XXX,XX @@ static vaddr xtensa_cpu_get_pc(CPUState *cs)
22
unsigned int ofs, unsigned int len);
13
return cpu->env.pc;
23
void tcg_gen_sextract_i32(TCGv_i32 ret, TCGv_i32 arg,
24
unsigned int ofs, unsigned int len);
25
+void tcg_gen_extract2_i32(TCGv_i32 ret, TCGv_i32 al, TCGv_i32 ah,
26
+ unsigned int ofs);
27
void tcg_gen_brcond_i32(TCGCond cond, TCGv_i32 arg1, TCGv_i32 arg2, TCGLabel *);
28
void tcg_gen_brcondi_i32(TCGCond cond, TCGv_i32 arg1, int32_t arg2, TCGLabel *);
29
void tcg_gen_setcond_i32(TCGCond cond, TCGv_i32 ret,
30
@@ -XXX,XX +XXX,XX @@ void tcg_gen_extract_i64(TCGv_i64 ret, TCGv_i64 arg,
31
unsigned int ofs, unsigned int len);
32
void tcg_gen_sextract_i64(TCGv_i64 ret, TCGv_i64 arg,
33
unsigned int ofs, unsigned int len);
34
+void tcg_gen_extract2_i64(TCGv_i64 ret, TCGv_i64 al, TCGv_i64 ah,
35
+ unsigned int ofs);
36
void tcg_gen_brcond_i64(TCGCond cond, TCGv_i64 arg1, TCGv_i64 arg2, TCGLabel *);
37
void tcg_gen_brcondi_i64(TCGCond cond, TCGv_i64 arg1, int64_t arg2, TCGLabel *);
38
void tcg_gen_setcond_i64(TCGCond cond, TCGv_i64 ret,
39
@@ -XXX,XX +XXX,XX @@ void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t);
40
#define tcg_gen_deposit_z_tl tcg_gen_deposit_z_i64
41
#define tcg_gen_extract_tl tcg_gen_extract_i64
42
#define tcg_gen_sextract_tl tcg_gen_sextract_i64
43
+#define tcg_gen_extract2_tl tcg_gen_extract2_i64
44
#define tcg_const_tl tcg_const_i64
45
#define tcg_const_local_tl tcg_const_local_i64
46
#define tcg_gen_movcond_tl tcg_gen_movcond_i64
47
@@ -XXX,XX +XXX,XX @@ void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t);
48
#define tcg_gen_deposit_z_tl tcg_gen_deposit_z_i32
49
#define tcg_gen_extract_tl tcg_gen_extract_i32
50
#define tcg_gen_sextract_tl tcg_gen_sextract_i32
51
+#define tcg_gen_extract2_tl tcg_gen_extract2_i32
52
#define tcg_const_tl tcg_const_i32
53
#define tcg_const_local_tl tcg_const_local_i32
54
#define tcg_gen_movcond_tl tcg_gen_movcond_i32
55
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
56
index XXXXXXX..XXXXXXX 100644
57
--- a/tcg/tcg-op.c
58
+++ b/tcg/tcg-op.c
59
@@ -XXX,XX +XXX,XX @@ void tcg_gen_sextract_i32(TCGv_i32 ret, TCGv_i32 arg,
60
tcg_gen_sari_i32(ret, ret, 32 - len);
61
}
14
}
62
15
63
+/*
16
+static void xtensa_restore_state_to_opc(CPUState *cs,
64
+ * Extract 32-bits from a 64-bit input, ah:al, starting from ofs.
17
+ const TranslationBlock *tb,
65
+ * Unlike tcg_gen_extract_i32 above, len is fixed at 32.
18
+ const uint64_t *data)
66
+ */
67
+void tcg_gen_extract2_i32(TCGv_i32 ret, TCGv_i32 al, TCGv_i32 ah,
68
+ unsigned int ofs)
69
+{
19
+{
70
+ tcg_debug_assert(ofs <= 32);
20
+ XtensaCPU *cpu = XTENSA_CPU(cs);
71
+ if (ofs == 0) {
21
+
72
+ tcg_gen_mov_i32(ret, al);
22
+ cpu->env.pc = data[0];
73
+ } else if (ofs == 32) {
74
+ tcg_gen_mov_i32(ret, ah);
75
+ } else if (al == ah) {
76
+ tcg_gen_rotri_i32(ret, al, ofs);
77
+ } else {
78
+ TCGv_i32 t0 = tcg_temp_new_i32();
79
+ tcg_gen_shri_i32(t0, al, ofs);
80
+ tcg_gen_deposit_i32(ret, t0, ah, 32 - ofs, ofs);
81
+ tcg_temp_free_i32(t0);
82
+ }
83
+}
23
+}
84
+
24
+
85
void tcg_gen_movcond_i32(TCGCond cond, TCGv_i32 ret, TCGv_i32 c1,
25
static bool xtensa_cpu_has_work(CPUState *cs)
86
TCGv_i32 c2, TCGv_i32 v1, TCGv_i32 v2)
87
{
26
{
88
@@ -XXX,XX +XXX,XX @@ void tcg_gen_sextract_i64(TCGv_i64 ret, TCGv_i64 arg,
27
#ifndef CONFIG_USER_ONLY
89
tcg_gen_sari_i64(ret, ret, 64 - len);
28
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps xtensa_sysemu_ops = {
29
static const struct TCGCPUOps xtensa_tcg_ops = {
30
.initialize = xtensa_translate_init,
31
.debug_excp_handler = xtensa_breakpoint_handler,
32
+ .restore_state_to_opc = xtensa_restore_state_to_opc,
33
34
#ifndef CONFIG_USER_ONLY
35
.tlb_fill = xtensa_cpu_tlb_fill,
36
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/xtensa/translate.c
39
+++ b/target/xtensa/translate.c
40
@@ -XXX,XX +XXX,XX @@ void xtensa_cpu_dump_state(CPUState *cs, FILE *f, int flags)
41
}
90
}
42
}
91
43
92
+/*
44
-void restore_state_to_opc(CPUXtensaState *env, TranslationBlock *tb,
93
+ * Extract 64 bits from a 128-bit input, ah:al, starting from ofs.
45
- target_ulong *data)
94
+ * Unlike tcg_gen_extract_i64 above, len is fixed at 64.
46
-{
95
+ */
47
- env->pc = data[0];
96
+void tcg_gen_extract2_i64(TCGv_i64 ret, TCGv_i64 al, TCGv_i64 ah,
48
-}
97
+ unsigned int ofs)
49
-
98
+{
50
static void translate_abs(DisasContext *dc, const OpcodeArg arg[],
99
+ tcg_debug_assert(ofs <= 64);
51
const uint32_t par[])
100
+ if (ofs == 0) {
101
+ tcg_gen_mov_i64(ret, al);
102
+ } else if (ofs == 64) {
103
+ tcg_gen_mov_i64(ret, ah);
104
+ } else if (al == ah) {
105
+ tcg_gen_rotri_i64(ret, al, ofs);
106
+ } else {
107
+ TCGv_i64 t0 = tcg_temp_new_i64();
108
+ tcg_gen_shri_i64(t0, al, ofs);
109
+ tcg_gen_deposit_i64(ret, t0, ah, 64 - ofs, ofs);
110
+ tcg_temp_free_i64(t0);
111
+ }
112
+}
113
+
114
void tcg_gen_movcond_i64(TCGCond cond, TCGv_i64 ret, TCGv_i64 c1,
115
TCGv_i64 c2, TCGv_i64 v1, TCGv_i64 v2)
116
{
52
{
117
--
53
--
118
2.17.1
54
2.34.1
119
55
120
56
diff view generated by jsdifflib
New patch
1
All targets have been updated. Use the tcg_ops target hook
2
exclusively, which allows the compat code to be removed.
1
3
4
Reviewed-by: Claudio Fontana <cfontana@suse.de>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
include/exec/exec-all.h | 3 ---
8
accel/tcg/translate-all.c | 16 ++--------------
9
2 files changed, 2 insertions(+), 17 deletions(-)
10
11
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/include/exec/exec-all.h
14
+++ b/include/exec/exec-all.h
15
@@ -XXX,XX +XXX,XX @@ typedef ram_addr_t tb_page_addr_t;
16
#define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
17
#endif
18
19
-void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb,
20
- target_ulong *data) __attribute__((weak));
21
-
22
/**
23
* cpu_restore_state:
24
* @cpu: the vCPU state is to be restore to
25
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
26
index XXXXXXX..XXXXXXX 100644
27
--- a/accel/tcg/translate-all.c
28
+++ b/accel/tcg/translate-all.c
29
@@ -XXX,XX +XXX,XX @@ static int encode_search(TranslationBlock *tb, uint8_t *block)
30
int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
31
uintptr_t searched_pc, bool reset_icount)
32
{
33
- target_ulong data[TARGET_INSN_START_WORDS];
34
+ uint64_t data[TARGET_INSN_START_WORDS];
35
uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
36
const uint8_t *p = tb->tc.ptr + tb->tc.size;
37
int i, j, num_insns = tb->icount;
38
@@ -XXX,XX +XXX,XX @@ int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
39
cpu_neg(cpu)->icount_decr.u16.low += num_insns - i;
40
}
41
42
- {
43
- const struct TCGCPUOps *ops = cpu->cc->tcg_ops;
44
- __typeof(ops->restore_state_to_opc) restore = ops->restore_state_to_opc;
45
- if (restore) {
46
- uint64_t d64[TARGET_INSN_START_WORDS];
47
- for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
48
- d64[i] = data[i];
49
- }
50
- restore(cpu, tb, d64);
51
- } else {
52
- restore_state_to_opc(cpu->env_ptr, tb, data);
53
- }
54
- }
55
+ cpu->cc->tcg_ops->restore_state_to_opc(cpu, tb, data);
56
57
#ifdef CONFIG_PROFILER
58
qatomic_set(&prof->restore_time,
59
--
60
2.34.1
diff view generated by jsdifflib