1
The following changes since commit 390e8fc6b0e7b521c9eceb8dfe0958e141009ab9:
1
v2: Fix target/loongarch printf formats for vaddr
2
Include two more reviewed patches.
2
3
3
Merge tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu into staging (2023-06-26 16:05:45 +0200)
4
This time with actual pull urls. :-/
5
6
r~
7
8
9
The following changes since commit db7aa99ef894e88fc5eedf02ca2579b8c344b2ec:
10
11
Merge tag 'hw-misc-20250216' of https://github.com/philmd/qemu into staging (2025-02-16 20:48:06 -0500)
4
12
5
are available in the Git repository at:
13
are available in the Git repository at:
6
14
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20230626
15
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20250215-2
8
16
9
for you to fetch changes up to a0eaae08c7c6a59c185cf646b02f4167b2ac6ec0:
17
for you to fetch changes up to a39bdd0f4ba96fcbb6b5bcb6e89591d2b24f52eb:
10
18
11
accel/tcg: Renumber TLB_DISCARD_WRITE (2023-06-26 17:33:00 +0200)
19
tcg: Remove TCG_TARGET_HAS_{br,set}cond2 from riscv and loongarch64 (2025-02-17 09:52:07 -0800)
12
20
13
----------------------------------------------------------------
21
----------------------------------------------------------------
14
accel/tcg: Replace target_ulong in some APIs
22
tcg: Remove last traces of TCG_TARGET_NEED_POOL_LABELS
15
accel/tcg: Remove CONFIG_PROFILER
23
tcg: Cleanups after disallowing 64-on-32
16
accel/tcg: Store some tlb flags in CPUTLBEntryFull
24
tcg: Introduce constraint for zero register
17
tcg: Issue memory barriers as required for the guest memory model
25
tcg: Remove TCG_TARGET_HAS_{br,set}cond2 from riscv and loongarch64
18
tcg: Fix temporary variable in tcg_gen_gvec_andcs
26
tcg/i386: Use tcg_{high,unsigned}_cond in tcg_out_brcond2
27
linux-user: Move TARGET_SA_RESTORER out of generic/signal.h
28
linux-user: Fix alignment when unmapping excess reservation
29
target/sparc: Fix register selection for all F*TOx and FxTO* instructions
30
target/sparc: Fix gdbstub incorrectly handling registers f32-f62
31
target/sparc: fake UltraSPARC T1 PCR and PIC registers
19
32
20
----------------------------------------------------------------
33
----------------------------------------------------------------
21
Alex Bennée (1):
34
Andreas Schwab (1):
22
softfloat: use QEMU_FLATTEN to avoid mistaken isra inlining
35
linux-user: Move TARGET_SA_RESTORER out of generic/signal.h
23
36
24
Anton Johansson (11):
37
Artyom Tarasenko (1):
25
accel: Replace target_ulong in tlb_*()
38
target/sparc: fake UltraSPARC T1 PCR and PIC registers
26
accel/tcg/translate-all.c: Widen pc and cs_base
27
target: Widen pc/cs_base in cpu_get_tb_cpu_state
28
accel/tcg/cputlb.c: Widen CPUTLBEntry access functions
29
accel/tcg/cputlb.c: Widen addr in MMULookupPageData
30
accel/tcg/cpu-exec.c: Widen pc to vaddr
31
accel/tcg: Widen pc to vaddr in CPUJumpCache
32
accel: Replace target_ulong with vaddr in probe_*()
33
accel/tcg: Replace target_ulong with vaddr in *_mmu_lookup()
34
accel/tcg: Replace target_ulong with vaddr in translator_*()
35
cpu: Replace target_ulong with hwaddr in tb_invalidate_phys_addr()
36
39
37
Fei Wu (1):
40
Fabiano Rosas (1):
38
accel/tcg: remove CONFIG_PROFILER
41
elfload: Fix alignment when unmapping excess reservation
39
42
40
Max Chou (1):
43
Mikael Szreder (2):
41
tcg: Fix temporary variable in tcg_gen_gvec_andcs
44
target/sparc: Fix register selection for all F*TOx and FxTO* instructions
45
target/sparc: Fix gdbstub incorrectly handling registers f32-f62
42
46
43
Richard Henderson (8):
47
Richard Henderson (22):
44
tests/plugin: Remove duplicate insn log from libinsn.so
48
tcg: Remove last traces of TCG_TARGET_NEED_POOL_LABELS
45
target/microblaze: Define TCG_GUEST_DEFAULT_MO
49
tcg: Remove TCG_OVERSIZED_GUEST
46
tcg: Do not elide memory barriers for !CF_PARALLEL in system mode
50
tcg: Drop support for two address registers in gen_ldst
47
tcg: Add host memory barriers to cpu_ldst.h interfaces
51
tcg: Merge INDEX_op_qemu_*_{a32,a64}_*
48
accel/tcg: Remove check_tcg_memory_orders_compatible
52
tcg/arm: Drop addrhi from prepare_host_addr
49
accel/tcg: Store some tlb flags in CPUTLBEntryFull
53
tcg/i386: Drop addrhi from prepare_host_addr
50
accel/tcg: Move TLB_WATCHPOINT to TLB_SLOW_FLAGS_MASK
54
tcg/mips: Drop addrhi from prepare_host_addr
51
accel/tcg: Renumber TLB_DISCARD_WRITE
55
tcg/ppc: Drop addrhi from prepare_host_addr
56
tcg: Replace addr{lo,hi}_reg with addr_reg in TCGLabelQemuLdst
57
plugins: Fix qemu_plugin_read_memory_vaddr parameters
58
accel/tcg: Fix tlb_set_page_with_attrs, tlb_set_page
59
target/loongarch: Use VADDR_PRIx for logging pc_next
60
include/exec: Change vaddr to uintptr_t
61
include/exec: Use uintptr_t in CPUTLBEntry
62
tcg: Introduce the 'z' constraint for a hardware zero register
63
tcg/aarch64: Use 'z' constraint
64
tcg/loongarch64: Use 'z' constraint
65
tcg/mips: Use 'z' constraint
66
tcg/riscv: Use 'z' constraint
67
tcg/sparc64: Use 'z' constraint
68
tcg/i386: Use tcg_{high,unsigned}_cond in tcg_out_brcond2
69
tcg: Remove TCG_TARGET_HAS_{br,set}cond2 from riscv and loongarch64
52
70
53
meson.build | 2 -
71
include/exec/tlb-common.h | 10 +-
54
qapi/machine.json | 18 --
72
include/exec/vaddr.h | 16 +-
55
accel/tcg/internal.h | 40 +++-
73
include/qemu/atomic.h | 18 +-
56
accel/tcg/tb-hash.h | 12 +-
74
include/tcg/oversized-guest.h | 23 ---
57
accel/tcg/tb-jmp-cache.h | 2 +-
75
include/tcg/tcg-opc.h | 28 +--
58
include/exec/cpu-all.h | 27 ++-
76
include/tcg/tcg.h | 3 +-
59
include/exec/cpu-defs.h | 10 +-
77
linux-user/aarch64/target_signal.h | 2 +
60
include/exec/cpu_ldst.h | 10 +-
78
linux-user/arm/target_signal.h | 2 +
61
include/exec/exec-all.h | 95 +++++----
79
linux-user/generic/signal.h | 1 -
62
include/exec/translator.h | 6 +-
80
linux-user/i386/target_signal.h | 2 +
63
include/hw/core/cpu.h | 1 +
81
linux-user/m68k/target_signal.h | 1 +
64
include/qemu/plugin-memory.h | 2 +-
82
linux-user/microblaze/target_signal.h | 2 +
65
include/qemu/timer.h | 9 -
83
linux-user/ppc/target_signal.h | 2 +
66
include/tcg/tcg.h | 26 ---
84
linux-user/s390x/target_signal.h | 2 +
67
target/alpha/cpu.h | 4 +-
85
linux-user/sh4/target_signal.h | 2 +
68
target/arm/cpu.h | 4 +-
86
linux-user/x86_64/target_signal.h | 2 +
69
target/avr/cpu.h | 4 +-
87
linux-user/xtensa/target_signal.h | 2 +
70
target/cris/cpu.h | 4 +-
88
tcg/aarch64/tcg-target-con-set.h | 12 +-
71
target/hexagon/cpu.h | 4 +-
89
tcg/aarch64/tcg-target.h | 2 +
72
target/hppa/cpu.h | 5 +-
90
tcg/loongarch64/tcg-target-con-set.h | 15 +-
73
target/i386/cpu.h | 4 +-
91
tcg/loongarch64/tcg-target-con-str.h | 1 -
74
target/loongarch/cpu.h | 6 +-
92
tcg/loongarch64/tcg-target-has.h | 2 -
75
target/m68k/cpu.h | 4 +-
93
tcg/loongarch64/tcg-target.h | 2 +
76
target/microblaze/cpu.h | 7 +-
94
tcg/mips/tcg-target-con-set.h | 26 +--
77
target/mips/cpu.h | 4 +-
95
tcg/mips/tcg-target-con-str.h | 1 -
78
target/nios2/cpu.h | 4 +-
96
tcg/mips/tcg-target.h | 2 +
79
target/openrisc/cpu.h | 5 +-
97
tcg/riscv/tcg-target-con-set.h | 10 +-
80
target/ppc/cpu.h | 8 +-
98
tcg/riscv/tcg-target-con-str.h | 1 -
81
target/riscv/cpu.h | 4 +-
99
tcg/riscv/tcg-target-has.h | 2 -
82
target/rx/cpu.h | 4 +-
100
tcg/riscv/tcg-target.h | 2 +
83
target/s390x/cpu.h | 4 +-
101
tcg/sparc64/tcg-target-con-set.h | 12 +-
84
target/sh4/cpu.h | 4 +-
102
tcg/sparc64/tcg-target-con-str.h | 1 -
85
target/sparc/cpu.h | 4 +-
103
tcg/sparc64/tcg-target.h | 3 +-
86
target/tricore/cpu.h | 4 +-
104
tcg/tci/tcg-target.h | 1 -
87
target/xtensa/cpu.h | 4 +-
105
accel/tcg/cputlb.c | 32 +---
88
accel/stubs/tcg-stub.c | 6 +-
106
accel/tcg/tcg-all.c | 9 +-
89
accel/tcg/cpu-exec.c | 43 ++--
107
linux-user/elfload.c | 4 +-
90
accel/tcg/cputlb.c | 351 +++++++++++++++++--------------
108
plugins/api.c | 2 +-
91
accel/tcg/monitor.c | 31 ---
109
target/arm/ptw.c | 34 ----
92
accel/tcg/tb-maint.c | 2 +-
110
target/loongarch/tcg/translate.c | 2 +-
93
accel/tcg/tcg-accel-ops.c | 10 -
111
target/riscv/cpu_helper.c | 13 +-
94
accel/tcg/tcg-all.c | 39 +---
112
target/sparc/gdbstub.c | 18 +-
95
accel/tcg/translate-all.c | 46 +---
113
target/sparc/translate.c | 19 +++
96
accel/tcg/translator.c | 10 +-
114
tcg/optimize.c | 21 +--
97
accel/tcg/user-exec.c | 24 ++-
115
tcg/tcg-op-ldst.c | 103 +++--------
98
cpu.c | 2 +-
116
tcg/tcg.c | 97 +++++------
99
fpu/softfloat.c | 22 +-
117
tcg/tci.c | 119 +++----------
100
softmmu/runstate.c | 9 -
118
docs/devel/multi-thread-tcg.rst | 1 -
101
target/arm/helper.c | 4 +-
119
docs/devel/tcg-ops.rst | 4 +-
102
target/ppc/helper_regs.c | 4 +-
120
target/loongarch/tcg/insn_trans/trans_atomic.c.inc | 2 +-
103
target/riscv/cpu_helper.c | 4 +-
121
target/sparc/insns.decode | 19 ++-
104
tcg/tcg-op-gvec.c | 2 +-
122
tcg/aarch64/tcg-target.c.inc | 86 ++++------
105
tcg/tcg-op-ldst.c | 2 +-
123
tcg/arm/tcg-target.c.inc | 114 ++++---------
106
tcg/tcg-op.c | 14 +-
124
tcg/i386/tcg-target.c.inc | 190 +++++----------------
107
tcg/tcg.c | 214 -------------------
125
tcg/loongarch64/tcg-target.c.inc | 72 +++-----
108
tests/plugin/insn.c | 9 +-
126
tcg/mips/tcg-target.c.inc | 169 ++++++------------
109
tests/qtest/qmp-cmd-test.c | 3 -
127
tcg/ppc/tcg-target.c.inc | 164 +++++-------------
110
hmp-commands-info.hx | 15 --
128
tcg/riscv/tcg-target.c.inc | 56 +++---
111
meson_options.txt | 2 -
129
tcg/s390x/tcg-target.c.inc | 40 ++---
112
scripts/meson-buildoptions.sh | 3 -
130
tcg/sparc64/tcg-target.c.inc | 45 ++---
113
tests/tcg/i386/Makefile.softmmu-target | 9 -
131
tcg/tci/tcg-target.c.inc | 60 ++-----
114
tests/tcg/i386/Makefile.target | 6 -
132
61 files changed, 548 insertions(+), 1160 deletions(-)
115
tests/tcg/x86_64/Makefile.softmmu-target | 9 -
133
delete mode 100644 include/tcg/oversized-guest.h
116
63 files changed, 469 insertions(+), 781 deletions(-)
117
diff view generated by jsdifflib
Deleted patch
1
From: Anton Johansson <anjo@rev.ng>
2
1
3
Replaces target_ulong with vaddr for guest virtual addresses in tlb_*()
4
functions and auxilliary structs.
5
6
Signed-off-by: Anton Johansson <anjo@rev.ng>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-Id: <20230621135633.1649-2-anjo@rev.ng>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
include/exec/cpu-defs.h | 4 +-
12
include/exec/exec-all.h | 79 ++++++++--------
13
include/qemu/plugin-memory.h | 2 +-
14
accel/stubs/tcg-stub.c | 2 +-
15
accel/tcg/cputlb.c | 177 +++++++++++++++++------------------
16
accel/tcg/tb-maint.c | 2 +-
17
6 files changed, 131 insertions(+), 135 deletions(-)
18
19
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/include/exec/cpu-defs.h
22
+++ b/include/exec/cpu-defs.h
23
@@ -XXX,XX +XXX,XX @@ typedef struct CPUTLBDesc {
24
* we must flush the entire tlb. The region is matched if
25
* (addr & large_page_mask) == large_page_addr.
26
*/
27
- target_ulong large_page_addr;
28
- target_ulong large_page_mask;
29
+ vaddr large_page_addr;
30
+ vaddr large_page_mask;
31
/* host time (in ns) at the beginning of the time window */
32
int64_t window_begin_ns;
33
/* maximum number of entries observed in the window */
34
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
35
index XXXXXXX..XXXXXXX 100644
36
--- a/include/exec/exec-all.h
37
+++ b/include/exec/exec-all.h
38
@@ -XXX,XX +XXX,XX @@ void tlb_destroy(CPUState *cpu);
39
* Flush one page from the TLB of the specified CPU, for all
40
* MMU indexes.
41
*/
42
-void tlb_flush_page(CPUState *cpu, target_ulong addr);
43
+void tlb_flush_page(CPUState *cpu, vaddr addr);
44
/**
45
* tlb_flush_page_all_cpus:
46
* @cpu: src CPU of the flush
47
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page(CPUState *cpu, target_ulong addr);
48
* Flush one page from the TLB of the specified CPU, for all
49
* MMU indexes.
50
*/
51
-void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr);
52
+void tlb_flush_page_all_cpus(CPUState *src, vaddr addr);
53
/**
54
* tlb_flush_page_all_cpus_synced:
55
* @cpu: src CPU of the flush
56
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr);
57
* the source vCPUs safe work is complete. This will depend on when
58
* the guests translation ends the TB.
59
*/
60
-void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr);
61
+void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr);
62
/**
63
* tlb_flush:
64
* @cpu: CPU whose TLB should be flushed
65
@@ -XXX,XX +XXX,XX @@ void tlb_flush_all_cpus_synced(CPUState *src_cpu);
66
* Flush one page from the TLB of the specified CPU, for the specified
67
* MMU indexes.
68
*/
69
-void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr,
70
+void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr,
71
uint16_t idxmap);
72
/**
73
* tlb_flush_page_by_mmuidx_all_cpus:
74
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr,
75
* Flush one page from the TLB of all CPUs, for the specified
76
* MMU indexes.
77
*/
78
-void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
79
+void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
80
uint16_t idxmap);
81
/**
82
* tlb_flush_page_by_mmuidx_all_cpus_synced:
83
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
84
* complete once the source vCPUs safe work is complete. This will
85
* depend on when the guests translation ends the TB.
86
*/
87
-void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
88
+void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
89
uint16_t idxmap);
90
/**
91
* tlb_flush_by_mmuidx:
92
@@ -XXX,XX +XXX,XX @@ void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
93
*
94
* Similar to tlb_flush_page_mask, but with a bitmap of indexes.
95
*/
96
-void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
97
+void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
98
uint16_t idxmap, unsigned bits);
99
100
/* Similarly, with broadcast and syncing. */
101
-void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
102
+void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
103
uint16_t idxmap, unsigned bits);
104
void tlb_flush_page_bits_by_mmuidx_all_cpus_synced
105
- (CPUState *cpu, target_ulong addr, uint16_t idxmap, unsigned bits);
106
+ (CPUState *cpu, vaddr addr, uint16_t idxmap, unsigned bits);
107
108
/**
109
* tlb_flush_range_by_mmuidx
110
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced
111
* For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len),
112
* comparing only the low @bits worth of each virtual page.
113
*/
114
-void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
115
- target_ulong len, uint16_t idxmap,
116
+void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
117
+ vaddr len, uint16_t idxmap,
118
unsigned bits);
119
120
/* Similarly, with broadcast and syncing. */
121
-void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
122
- target_ulong len, uint16_t idxmap,
123
+void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr,
124
+ vaddr len, uint16_t idxmap,
125
unsigned bits);
126
void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
127
- target_ulong addr,
128
- target_ulong len,
129
+ vaddr addr,
130
+ vaddr len,
131
uint16_t idxmap,
132
unsigned bits);
133
134
@@ -XXX,XX +XXX,XX @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
135
* tlb_set_page_full:
136
* @cpu: CPU context
137
* @mmu_idx: mmu index of the tlb to modify
138
- * @vaddr: virtual address of the entry to add
139
+ * @addr: virtual address of the entry to add
140
* @full: the details of the tlb entry
141
*
142
* Add an entry to @cpu tlb index @mmu_idx. All of the fields of
143
@@ -XXX,XX +XXX,XX @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
144
* single TARGET_PAGE_SIZE region is mapped; @full->lg_page_size is only
145
* used by tlb_flush_page.
146
*/
147
-void tlb_set_page_full(CPUState *cpu, int mmu_idx, target_ulong vaddr,
148
+void tlb_set_page_full(CPUState *cpu, int mmu_idx, vaddr addr,
149
CPUTLBEntryFull *full);
150
151
/**
152
* tlb_set_page_with_attrs:
153
* @cpu: CPU to add this TLB entry for
154
- * @vaddr: virtual address of page to add entry for
155
+ * @addr: virtual address of page to add entry for
156
* @paddr: physical address of the page
157
* @attrs: memory transaction attributes
158
* @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
159
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx, target_ulong vaddr,
160
* @size: size of the page in bytes
161
*
162
* Add an entry to this CPU's TLB (a mapping from virtual address
163
- * @vaddr to physical address @paddr) with the specified memory
164
+ * @addr to physical address @paddr) with the specified memory
165
* transaction attributes. This is generally called by the target CPU
166
* specific code after it has been called through the tlb_fill()
167
* entry point and performed a successful page table walk to find
168
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx, target_ulong vaddr,
169
* single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
170
* used by tlb_flush_page.
171
*/
172
-void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
173
+void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
174
hwaddr paddr, MemTxAttrs attrs,
175
- int prot, int mmu_idx, target_ulong size);
176
+ int prot, int mmu_idx, vaddr size);
177
/* tlb_set_page:
178
*
179
* This function is equivalent to calling tlb_set_page_with_attrs()
180
* with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
181
* as a convenience for CPUs which don't use memory transaction attributes.
182
*/
183
-void tlb_set_page(CPUState *cpu, target_ulong vaddr,
184
+void tlb_set_page(CPUState *cpu, vaddr addr,
185
hwaddr paddr, int prot,
186
- int mmu_idx, target_ulong size);
187
+ int mmu_idx, vaddr size);
188
#else
189
static inline void tlb_init(CPUState *cpu)
190
{
191
@@ -XXX,XX +XXX,XX @@ static inline void tlb_init(CPUState *cpu)
192
static inline void tlb_destroy(CPUState *cpu)
193
{
194
}
195
-static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
196
+static inline void tlb_flush_page(CPUState *cpu, vaddr addr)
197
{
198
}
199
-static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
200
+static inline void tlb_flush_page_all_cpus(CPUState *src, vaddr addr)
201
{
202
}
203
-static inline void tlb_flush_page_all_cpus_synced(CPUState *src,
204
- target_ulong addr)
205
+static inline void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr)
206
{
207
}
208
static inline void tlb_flush(CPUState *cpu)
209
@@ -XXX,XX +XXX,XX @@ static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
210
{
211
}
212
static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
213
- target_ulong addr, uint16_t idxmap)
214
+ vaddr addr, uint16_t idxmap)
215
{
216
}
217
218
@@ -XXX,XX +XXX,XX @@ static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
219
{
220
}
221
static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu,
222
- target_ulong addr,
223
+ vaddr addr,
224
uint16_t idxmap)
225
{
226
}
227
static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
228
- target_ulong addr,
229
+ vaddr addr,
230
uint16_t idxmap)
231
{
232
}
233
@@ -XXX,XX +XXX,XX @@ static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
234
{
235
}
236
static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu,
237
- target_ulong addr,
238
+ vaddr addr,
239
uint16_t idxmap,
240
unsigned bits)
241
{
242
}
243
static inline void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu,
244
- target_ulong addr,
245
+ vaddr addr,
246
uint16_t idxmap,
247
unsigned bits)
248
{
249
}
250
static inline void
251
-tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
252
+tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
253
uint16_t idxmap, unsigned bits)
254
{
255
}
256
-static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
257
- target_ulong len, uint16_t idxmap,
258
+static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
259
+ vaddr len, uint16_t idxmap,
260
unsigned bits)
261
{
262
}
263
static inline void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu,
264
- target_ulong addr,
265
- target_ulong len,
266
+ vaddr addr,
267
+ vaddr len,
268
uint16_t idxmap,
269
unsigned bits)
270
{
271
}
272
static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
273
- target_ulong addr,
274
- target_long len,
275
+ vaddr addr,
276
+ vaddr len,
277
uint16_t idxmap,
278
unsigned bits)
279
{
280
@@ -XXX,XX +XXX,XX @@ static inline void mmap_lock(void) {}
281
static inline void mmap_unlock(void) {}
282
283
void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
284
-void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
285
+void tlb_set_dirty(CPUState *cpu, vaddr addr);
286
287
MemoryRegionSection *
288
address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
289
diff --git a/include/qemu/plugin-memory.h b/include/qemu/plugin-memory.h
290
index XXXXXXX..XXXXXXX 100644
291
--- a/include/qemu/plugin-memory.h
292
+++ b/include/qemu/plugin-memory.h
293
@@ -XXX,XX +XXX,XX @@ struct qemu_plugin_hwaddr {
294
* It would only fail if not called from an instrumented memory access
295
* which would be an abuse of the API.
296
*/
297
-bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
298
+bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx,
299
bool is_store, struct qemu_plugin_hwaddr *data);
300
301
#endif /* PLUGIN_MEMORY_H */
302
diff --git a/accel/stubs/tcg-stub.c b/accel/stubs/tcg-stub.c
303
index XXXXXXX..XXXXXXX 100644
304
--- a/accel/stubs/tcg-stub.c
305
+++ b/accel/stubs/tcg-stub.c
306
@@ -XXX,XX +XXX,XX @@ void tb_flush(CPUState *cpu)
307
{
308
}
309
310
-void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
311
+void tlb_set_dirty(CPUState *cpu, vaddr vaddr)
312
{
313
}
314
315
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
316
index XXXXXXX..XXXXXXX 100644
317
--- a/accel/tcg/cputlb.c
318
+++ b/accel/tcg/cputlb.c
319
@@ -XXX,XX +XXX,XX @@ void tlb_flush_all_cpus_synced(CPUState *src_cpu)
320
}
321
322
static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry,
323
- target_ulong page, target_ulong mask)
324
+ vaddr page, vaddr mask)
325
{
326
page &= mask;
327
mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK;
328
@@ -XXX,XX +XXX,XX @@ static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry,
329
page == (tlb_entry->addr_code & mask));
330
}
331
332
-static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
333
- target_ulong page)
334
+static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, vaddr page)
335
{
336
return tlb_hit_page_mask_anyprot(tlb_entry, page, -1);
337
}
338
@@ -XXX,XX +XXX,XX @@ static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
339
340
/* Called with tlb_c.lock held */
341
static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry,
342
- target_ulong page,
343
- target_ulong mask)
344
+ vaddr page,
345
+ vaddr mask)
346
{
347
if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) {
348
memset(tlb_entry, -1, sizeof(*tlb_entry));
349
@@ -XXX,XX +XXX,XX @@ static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry,
350
return false;
351
}
352
353
-static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
354
- target_ulong page)
355
+static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, vaddr page)
356
{
357
return tlb_flush_entry_mask_locked(tlb_entry, page, -1);
358
}
359
360
/* Called with tlb_c.lock held */
361
static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx,
362
- target_ulong page,
363
- target_ulong mask)
364
+ vaddr page,
365
+ vaddr mask)
366
{
367
CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx];
368
int k;
369
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx,
370
}
371
372
static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
373
- target_ulong page)
374
+ vaddr page)
375
{
376
tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1);
377
}
378
379
-static void tlb_flush_page_locked(CPUArchState *env, int midx,
380
- target_ulong page)
381
+static void tlb_flush_page_locked(CPUArchState *env, int midx, vaddr page)
382
{
383
- target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr;
384
- target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask;
385
+ vaddr lp_addr = env_tlb(env)->d[midx].large_page_addr;
386
+ vaddr lp_mask = env_tlb(env)->d[midx].large_page_mask;
387
388
/* Check if we need to flush due to large pages. */
389
if ((page & lp_mask) == lp_addr) {
390
- tlb_debug("forcing full flush midx %d ("
391
- TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
392
+ tlb_debug("forcing full flush midx %d (%"
393
+ VADDR_PRIx "/%" VADDR_PRIx ")\n",
394
midx, lp_addr, lp_mask);
395
tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
396
} else {
397
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_locked(CPUArchState *env, int midx,
398
* at @addr from the tlbs indicated by @idxmap from @cpu.
399
*/
400
static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
401
- target_ulong addr,
402
+ vaddr addr,
403
uint16_t idxmap)
404
{
405
CPUArchState *env = cpu->env_ptr;
406
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
407
408
assert_cpu_is_self(cpu);
409
410
- tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap);
411
+ tlb_debug("page addr: %" VADDR_PRIx " mmu_map:0x%x\n", addr, idxmap);
412
413
qemu_spin_lock(&env_tlb(env)->c.lock);
414
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
415
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
416
static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
417
run_on_cpu_data data)
418
{
419
- target_ulong addr_and_idxmap = (target_ulong) data.target_ptr;
420
- target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK;
421
+ vaddr addr_and_idxmap = data.target_ptr;
422
+ vaddr addr = addr_and_idxmap & TARGET_PAGE_MASK;
423
uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
424
425
tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
426
}
427
428
typedef struct {
429
- target_ulong addr;
430
+ vaddr addr;
431
uint16_t idxmap;
432
} TLBFlushPageByMMUIdxData;
433
434
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
435
g_free(d);
436
}
437
438
-void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
439
+void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, uint16_t idxmap)
440
{
441
- tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
442
+ tlb_debug("addr: %" VADDR_PRIx " mmu_idx:%" PRIx16 "\n", addr, idxmap);
443
444
/* This should already be page aligned */
445
addr &= TARGET_PAGE_MASK;
446
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
447
}
448
}
449
450
-void tlb_flush_page(CPUState *cpu, target_ulong addr)
451
+void tlb_flush_page(CPUState *cpu, vaddr addr)
452
{
453
tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
454
}
455
456
-void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
457
+void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, vaddr addr,
458
uint16_t idxmap)
459
{
460
- tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
461
+ tlb_debug("addr: %" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
462
463
/* This should already be page aligned */
464
addr &= TARGET_PAGE_MASK;
465
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
466
tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap);
467
}
468
469
-void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
470
+void tlb_flush_page_all_cpus(CPUState *src, vaddr addr)
471
{
472
tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
473
}
474
475
void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
476
- target_ulong addr,
477
+ vaddr addr,
478
uint16_t idxmap)
479
{
480
- tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
481
+ tlb_debug("addr: %" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
482
483
/* This should already be page aligned */
484
addr &= TARGET_PAGE_MASK;
485
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
486
}
487
}
488
489
-void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
490
+void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr)
491
{
492
tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
493
}
494
495
static void tlb_flush_range_locked(CPUArchState *env, int midx,
496
- target_ulong addr, target_ulong len,
497
+ vaddr addr, vaddr len,
498
unsigned bits)
499
{
500
CPUTLBDesc *d = &env_tlb(env)->d[midx];
501
CPUTLBDescFast *f = &env_tlb(env)->f[midx];
502
- target_ulong mask = MAKE_64BIT_MASK(0, bits);
503
+ vaddr mask = MAKE_64BIT_MASK(0, bits);
504
505
/*
506
* If @bits is smaller than the tlb size, there may be multiple entries
507
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_range_locked(CPUArchState *env, int midx,
508
*/
509
if (mask < f->mask || len > f->mask) {
510
tlb_debug("forcing full flush midx %d ("
511
- TARGET_FMT_lx "/" TARGET_FMT_lx "+" TARGET_FMT_lx ")\n",
512
+ "%" VADDR_PRIx "/%" VADDR_PRIx "+%" VADDR_PRIx ")\n",
513
midx, addr, mask, len);
514
tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
515
return;
516
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_range_locked(CPUArchState *env, int midx,
517
*/
518
if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) {
519
tlb_debug("forcing full flush midx %d ("
520
- TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
521
+ "%" VADDR_PRIx "/%" VADDR_PRIx ")\n",
522
midx, d->large_page_addr, d->large_page_mask);
523
tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
524
return;
525
}
526
527
- for (target_ulong i = 0; i < len; i += TARGET_PAGE_SIZE) {
528
- target_ulong page = addr + i;
529
+ for (vaddr i = 0; i < len; i += TARGET_PAGE_SIZE) {
530
+ vaddr page = addr + i;
531
CPUTLBEntry *entry = tlb_entry(env, midx, page);
532
533
if (tlb_flush_entry_mask_locked(entry, page, mask)) {
534
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_range_locked(CPUArchState *env, int midx,
535
}
536
537
typedef struct {
538
- target_ulong addr;
539
- target_ulong len;
540
+ vaddr addr;
541
+ vaddr len;
542
uint16_t idxmap;
543
uint16_t bits;
544
} TLBFlushRangeData;
545
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
546
547
assert_cpu_is_self(cpu);
548
549
- tlb_debug("range:" TARGET_FMT_lx "/%u+" TARGET_FMT_lx " mmu_map:0x%x\n",
550
+ tlb_debug("range: %" VADDR_PRIx "/%u+%" VADDR_PRIx " mmu_map:0x%x\n",
551
d.addr, d.bits, d.len, d.idxmap);
552
553
qemu_spin_lock(&env_tlb(env)->c.lock);
554
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
555
* overlap the flushed pages, which includes the previous.
556
*/
557
d.addr -= TARGET_PAGE_SIZE;
558
- for (target_ulong i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) {
559
+ for (vaddr i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) {
560
tb_jmp_cache_clear_page(cpu, d.addr);
561
d.addr += TARGET_PAGE_SIZE;
562
}
563
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu,
564
g_free(d);
565
}
566
567
-void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
568
- target_ulong len, uint16_t idxmap,
569
+void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
570
+ vaddr len, uint16_t idxmap,
571
unsigned bits)
572
{
573
TLBFlushRangeData d;
574
@@ -XXX,XX +XXX,XX @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
575
}
576
}
577
578
-void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
579
+void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
580
uint16_t idxmap, unsigned bits)
581
{
582
tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits);
583
}
584
585
void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu,
586
- target_ulong addr, target_ulong len,
587
+ vaddr addr, vaddr len,
588
uint16_t idxmap, unsigned bits)
589
{
590
TLBFlushRangeData d;
591
@@ -XXX,XX +XXX,XX @@ void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu,
592
}
593
594
void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
595
- target_ulong addr,
596
- uint16_t idxmap, unsigned bits)
597
+ vaddr addr, uint16_t idxmap,
598
+ unsigned bits)
599
{
600
tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE,
601
idxmap, bits);
602
}
603
604
void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
605
- target_ulong addr,
606
- target_ulong len,
607
+ vaddr addr,
608
+ vaddr len,
609
uint16_t idxmap,
610
unsigned bits)
611
{
612
@@ -XXX,XX +XXX,XX @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
613
}
614
615
void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
616
- target_ulong addr,
617
+ vaddr addr,
618
uint16_t idxmap,
619
unsigned bits)
620
{
621
@@ -XXX,XX +XXX,XX @@ void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
622
623
/* Called with tlb_c.lock held */
624
static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
625
- target_ulong vaddr)
626
+ vaddr addr)
627
{
628
- if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
629
- tlb_entry->addr_write = vaddr;
630
+ if (tlb_entry->addr_write == (addr | TLB_NOTDIRTY)) {
631
+ tlb_entry->addr_write = addr;
632
}
633
}
634
635
/* update the TLB corresponding to virtual page vaddr
636
so that it is no longer dirty */
637
-void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
638
+void tlb_set_dirty(CPUState *cpu, vaddr addr)
639
{
640
CPUArchState *env = cpu->env_ptr;
641
int mmu_idx;
642
643
assert_cpu_is_self(cpu);
644
645
- vaddr &= TARGET_PAGE_MASK;
646
+ addr &= TARGET_PAGE_MASK;
647
qemu_spin_lock(&env_tlb(env)->c.lock);
648
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
649
- tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr);
650
+ tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, addr), addr);
651
}
652
653
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
654
int k;
655
for (k = 0; k < CPU_VTLB_SIZE; k++) {
656
- tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr);
657
+ tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], addr);
658
}
659
}
660
qemu_spin_unlock(&env_tlb(env)->c.lock);
661
@@ -XXX,XX +XXX,XX @@ void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
662
/* Our TLB does not support large pages, so remember the area covered by
663
large pages and trigger a full TLB flush if these are invalidated. */
664
static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
665
- target_ulong vaddr, target_ulong size)
666
+ vaddr addr, uint64_t size)
667
{
668
- target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr;
669
- target_ulong lp_mask = ~(size - 1);
670
+ vaddr lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr;
671
+ vaddr lp_mask = ~(size - 1);
672
673
- if (lp_addr == (target_ulong)-1) {
674
+ if (lp_addr == (vaddr)-1) {
675
/* No previous large page. */
676
- lp_addr = vaddr;
677
+ lp_addr = addr;
678
} else {
679
/* Extend the existing region to include the new page.
680
This is a compromise between unnecessary flushes and
681
the cost of maintaining a full variable size TLB. */
682
lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask;
683
- while (((lp_addr ^ vaddr) & lp_mask) != 0) {
684
+ while (((lp_addr ^ addr) & lp_mask) != 0) {
685
lp_mask <<= 1;
686
}
687
}
688
@@ -XXX,XX +XXX,XX @@ static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
689
* critical section.
690
*/
691
void tlb_set_page_full(CPUState *cpu, int mmu_idx,
692
- target_ulong vaddr, CPUTLBEntryFull *full)
693
+ vaddr addr, CPUTLBEntryFull *full)
694
{
695
CPUArchState *env = cpu->env_ptr;
696
CPUTLB *tlb = env_tlb(env);
697
CPUTLBDesc *desc = &tlb->d[mmu_idx];
698
MemoryRegionSection *section;
699
unsigned int index;
700
- target_ulong address;
701
- target_ulong write_address;
702
+ vaddr address;
703
+ vaddr write_address;
704
uintptr_t addend;
705
CPUTLBEntry *te, tn;
706
hwaddr iotlb, xlat, sz, paddr_page;
707
- target_ulong vaddr_page;
708
+ vaddr addr_page;
709
int asidx, wp_flags, prot;
710
bool is_ram, is_romd;
711
712
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
713
sz = TARGET_PAGE_SIZE;
714
} else {
715
sz = (hwaddr)1 << full->lg_page_size;
716
- tlb_add_large_page(env, mmu_idx, vaddr, sz);
717
+ tlb_add_large_page(env, mmu_idx, addr, sz);
718
}
719
- vaddr_page = vaddr & TARGET_PAGE_MASK;
720
+ addr_page = addr & TARGET_PAGE_MASK;
721
paddr_page = full->phys_addr & TARGET_PAGE_MASK;
722
723
prot = full->prot;
724
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
725
&xlat, &sz, full->attrs, &prot);
726
assert(sz >= TARGET_PAGE_SIZE);
727
728
- tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" HWADDR_FMT_plx
729
+ tlb_debug("vaddr=%" VADDR_PRIx " paddr=0x" HWADDR_FMT_plx
730
" prot=%x idx=%d\n",
731
- vaddr, full->phys_addr, prot, mmu_idx);
732
+ addr, full->phys_addr, prot, mmu_idx);
733
734
- address = vaddr_page;
735
+ address = addr_page;
736
if (full->lg_page_size < TARGET_PAGE_BITS) {
737
/* Repeat the MMU check and TLB fill on every access. */
738
address |= TLB_INVALID_MASK;
739
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
740
}
741
}
742
743
- wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page,
744
+ wp_flags = cpu_watchpoint_address_matches(cpu, addr_page,
745
TARGET_PAGE_SIZE);
746
747
- index = tlb_index(env, mmu_idx, vaddr_page);
748
- te = tlb_entry(env, mmu_idx, vaddr_page);
749
+ index = tlb_index(env, mmu_idx, addr_page);
750
+ te = tlb_entry(env, mmu_idx, addr_page);
751
752
/*
753
* Hold the TLB lock for the rest of the function. We could acquire/release
754
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
755
tlb->c.dirty |= 1 << mmu_idx;
756
757
/* Make sure there's no cached translation for the new page. */
758
- tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);
759
+ tlb_flush_vtlb_page_locked(env, mmu_idx, addr_page);
760
761
/*
762
* Only evict the old entry to the victim tlb if it's for a
763
* different page; otherwise just overwrite the stale data.
764
*/
765
- if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) {
766
+ if (!tlb_hit_page_anyprot(te, addr_page) && !tlb_entry_is_empty(te)) {
767
unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
768
CPUTLBEntry *tv = &desc->vtable[vidx];
769
770
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
771
* vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
772
*/
773
desc->fulltlb[index] = *full;
774
- desc->fulltlb[index].xlat_section = iotlb - vaddr_page;
775
+ desc->fulltlb[index].xlat_section = iotlb - addr_page;
776
desc->fulltlb[index].phys_addr = paddr_page;
777
778
/* Now calculate the new entry */
779
- tn.addend = addend - vaddr_page;
780
+ tn.addend = addend - addr_page;
781
if (prot & PAGE_READ) {
782
tn.addr_read = address;
783
if (wp_flags & BP_MEM_READ) {
784
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
785
qemu_spin_unlock(&tlb->c.lock);
786
}
787
788
-void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
789
+void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
790
hwaddr paddr, MemTxAttrs attrs, int prot,
791
- int mmu_idx, target_ulong size)
792
+ int mmu_idx, uint64_t size)
793
{
794
CPUTLBEntryFull full = {
795
.phys_addr = paddr,
796
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
797
};
798
799
assert(is_power_of_2(size));
800
- tlb_set_page_full(cpu, mmu_idx, vaddr, &full);
801
+ tlb_set_page_full(cpu, mmu_idx, addr, &full);
802
}
803
804
-void tlb_set_page(CPUState *cpu, target_ulong vaddr,
805
+void tlb_set_page(CPUState *cpu, vaddr addr,
806
hwaddr paddr, int prot,
807
- int mmu_idx, target_ulong size)
808
+ int mmu_idx, uint64_t size)
809
{
810
- tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
811
+ tlb_set_page_with_attrs(cpu, addr, paddr, MEMTXATTRS_UNSPECIFIED,
812
prot, mmu_idx, size);
813
}
814
815
@@ -XXX,XX +XXX,XX @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr,
816
* caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
817
* be discarded and looked up again (e.g. via tlb_entry()).
818
*/
819
-static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
820
+static void tlb_fill(CPUState *cpu, vaddr addr, int size,
821
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
822
{
823
bool ok;
824
@@ -XXX,XX +XXX,XX @@ static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
825
}
826
827
static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
828
- int mmu_idx, target_ulong addr, uintptr_t retaddr,
829
+ int mmu_idx, vaddr addr, uintptr_t retaddr,
830
MMUAccessType access_type, MemOp op)
831
{
832
CPUState *cpu = env_cpu(env);
833
@@ -XXX,XX +XXX,XX @@ static void save_iotlb_data(CPUState *cs, MemoryRegionSection *section,
834
}
835
836
static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
837
- int mmu_idx, uint64_t val, target_ulong addr,
838
+ int mmu_idx, uint64_t val, vaddr addr,
839
uintptr_t retaddr, MemOp op)
840
{
841
CPUState *cpu = env_cpu(env);
842
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
843
/* Return true if ADDR is present in the victim tlb, and has been copied
844
back to the main tlb. */
845
static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
846
- MMUAccessType access_type, target_ulong page)
847
+ MMUAccessType access_type, vaddr page)
848
{
849
size_t vidx;
850
851
@@ -XXX,XX +XXX,XX @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
852
* from the same thread (which a mem callback will be) this is safe.
853
*/
854
855
-bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
856
+bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx,
857
bool is_store, struct qemu_plugin_hwaddr *data)
858
{
859
CPUArchState *env = cpu->env_ptr;
860
CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
861
uintptr_t index = tlb_index(env, mmu_idx, addr);
862
- target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read;
863
+ vaddr tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read;
864
865
if (likely(tlb_hit(tlb_addr, addr))) {
866
/* We must have an iotlb entry for MMIO */
867
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
868
index XXXXXXX..XXXXXXX 100644
869
--- a/accel/tcg/tb-maint.c
870
+++ b/accel/tcg/tb-maint.c
871
@@ -XXX,XX +XXX,XX @@ static void tb_remove_all(void)
872
/* Call with mmap_lock held. */
873
static void tb_record(TranslationBlock *tb, PageDesc *p1, PageDesc *p2)
874
{
875
- target_ulong addr;
876
+ vaddr addr;
877
int flags;
878
879
assert_memory_lock();
880
--
881
2.34.1
diff view generated by jsdifflib
Deleted patch
1
From: Anton Johansson <anjo@rev.ng>
2
1
3
Signed-off-by: Anton Johansson <anjo@rev.ng>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-Id: <20230621135633.1649-3-anjo@rev.ng>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
accel/tcg/internal.h | 6 +++---
9
accel/tcg/translate-all.c | 10 +++++-----
10
2 files changed, 8 insertions(+), 8 deletions(-)
11
12
diff --git a/accel/tcg/internal.h b/accel/tcg/internal.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/accel/tcg/internal.h
15
+++ b/accel/tcg/internal.h
16
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
17
G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
18
#endif /* CONFIG_SOFTMMU */
19
20
-TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc,
21
- target_ulong cs_base, uint32_t flags,
22
+TranslationBlock *tb_gen_code(CPUState *cpu, vaddr pc,
23
+ uint64_t cs_base, uint32_t flags,
24
int cflags);
25
void page_init(void);
26
void tb_htable_init(void);
27
@@ -XXX,XX +XXX,XX @@ void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
28
uintptr_t host_pc);
29
30
/* Return the current PC from CPU, which may be cached in TB. */
31
-static inline target_ulong log_pc(CPUState *cpu, const TranslationBlock *tb)
32
+static inline vaddr log_pc(CPUState *cpu, const TranslationBlock *tb)
33
{
34
if (tb_cflags(tb) & CF_PCREL) {
35
return cpu->cc->get_pc(cpu);
36
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/accel/tcg/translate-all.c
39
+++ b/accel/tcg/translate-all.c
40
@@ -XXX,XX +XXX,XX @@ void page_init(void)
41
* Return the size of the generated code, or negative on error.
42
*/
43
static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
44
- target_ulong pc, void *host_pc,
45
+ vaddr pc, void *host_pc,
46
int *max_insns, int64_t *ti)
47
{
48
int ret = sigsetjmp(tcg_ctx->jmp_trans, 0);
49
@@ -XXX,XX +XXX,XX @@ static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
50
51
/* Called with mmap_lock held for user mode emulation. */
52
TranslationBlock *tb_gen_code(CPUState *cpu,
53
- target_ulong pc, target_ulong cs_base,
54
+ vaddr pc, uint64_t cs_base,
55
uint32_t flags, int cflags)
56
{
57
CPUArchState *env = cpu->env_ptr;
58
@@ -XXX,XX +XXX,XX @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
59
cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n;
60
61
if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
62
- target_ulong pc = log_pc(cpu, tb);
63
+ vaddr pc = log_pc(cpu, tb);
64
if (qemu_log_in_addr_range(pc)) {
65
- qemu_log("cpu_io_recompile: rewound execution of TB to "
66
- TARGET_FMT_lx "\n", pc);
67
+ qemu_log("cpu_io_recompile: rewound execution of TB to %"
68
+ VADDR_PRIx "\n", pc);
69
}
70
}
71
72
--
73
2.34.1
diff view generated by jsdifflib
Deleted patch
1
From: Anton Johansson <anjo@rev.ng>
2
1
3
Signed-off-by: Anton Johansson <anjo@rev.ng>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-Id: <20230621135633.1649-4-anjo@rev.ng>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
target/alpha/cpu.h | 4 ++--
9
target/arm/cpu.h | 4 ++--
10
target/avr/cpu.h | 4 ++--
11
target/cris/cpu.h | 4 ++--
12
target/hexagon/cpu.h | 4 ++--
13
target/hppa/cpu.h | 5 ++---
14
target/i386/cpu.h | 4 ++--
15
target/loongarch/cpu.h | 6 ++----
16
target/m68k/cpu.h | 4 ++--
17
target/microblaze/cpu.h | 4 ++--
18
target/mips/cpu.h | 4 ++--
19
target/nios2/cpu.h | 4 ++--
20
target/openrisc/cpu.h | 5 ++---
21
target/ppc/cpu.h | 8 ++++----
22
target/riscv/cpu.h | 4 ++--
23
target/rx/cpu.h | 4 ++--
24
target/s390x/cpu.h | 4 ++--
25
target/sh4/cpu.h | 4 ++--
26
target/sparc/cpu.h | 4 ++--
27
target/tricore/cpu.h | 4 ++--
28
target/xtensa/cpu.h | 4 ++--
29
accel/tcg/cpu-exec.c | 9 ++++++---
30
accel/tcg/translate-all.c | 3 ++-
31
target/arm/helper.c | 4 ++--
32
target/ppc/helper_regs.c | 4 ++--
33
target/riscv/cpu_helper.c | 4 ++--
34
26 files changed, 58 insertions(+), 58 deletions(-)
35
36
diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/alpha/cpu.h
39
+++ b/target/alpha/cpu.h
40
@@ -XXX,XX +XXX,XX @@ void alpha_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
41
MemTxResult response, uintptr_t retaddr);
42
#endif
43
44
-static inline void cpu_get_tb_cpu_state(CPUAlphaState *env, target_ulong *pc,
45
- target_ulong *cs_base, uint32_t *pflags)
46
+static inline void cpu_get_tb_cpu_state(CPUAlphaState *env, vaddr *pc,
47
+ uint64_t *cs_base, uint32_t *pflags)
48
{
49
*pc = env->pc;
50
*cs_base = 0;
51
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
52
index XXXXXXX..XXXXXXX 100644
53
--- a/target/arm/cpu.h
54
+++ b/target/arm/cpu.h
55
@@ -XXX,XX +XXX,XX @@ static inline bool arm_cpu_bswap_data(CPUARMState *env)
56
}
57
#endif
58
59
-void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
60
- target_ulong *cs_base, uint32_t *flags);
61
+void cpu_get_tb_cpu_state(CPUARMState *env, vaddr *pc,
62
+ uint64_t *cs_base, uint32_t *flags);
63
64
enum {
65
QEMU_PSCI_CONDUIT_DISABLED = 0,
66
diff --git a/target/avr/cpu.h b/target/avr/cpu.h
67
index XXXXXXX..XXXXXXX 100644
68
--- a/target/avr/cpu.h
69
+++ b/target/avr/cpu.h
70
@@ -XXX,XX +XXX,XX @@ enum {
71
TB_FLAGS_SKIP = 2,
72
};
73
74
-static inline void cpu_get_tb_cpu_state(CPUAVRState *env, target_ulong *pc,
75
- target_ulong *cs_base, uint32_t *pflags)
76
+static inline void cpu_get_tb_cpu_state(CPUAVRState *env, vaddr *pc,
77
+ uint64_t *cs_base, uint32_t *pflags)
78
{
79
uint32_t flags = 0;
80
81
diff --git a/target/cris/cpu.h b/target/cris/cpu.h
82
index XXXXXXX..XXXXXXX 100644
83
--- a/target/cris/cpu.h
84
+++ b/target/cris/cpu.h
85
@@ -XXX,XX +XXX,XX @@ static inline int cpu_mmu_index (CPUCRISState *env, bool ifetch)
86
87
#include "exec/cpu-all.h"
88
89
-static inline void cpu_get_tb_cpu_state(CPUCRISState *env, target_ulong *pc,
90
- target_ulong *cs_base, uint32_t *flags)
91
+static inline void cpu_get_tb_cpu_state(CPUCRISState *env, vaddr *pc,
92
+ uint64_t *cs_base, uint32_t *flags)
93
{
94
*pc = env->pc;
95
*cs_base = 0;
96
diff --git a/target/hexagon/cpu.h b/target/hexagon/cpu.h
97
index XXXXXXX..XXXXXXX 100644
98
--- a/target/hexagon/cpu.h
99
+++ b/target/hexagon/cpu.h
100
@@ -XXX,XX +XXX,XX @@ struct ArchCPU {
101
102
FIELD(TB_FLAGS, IS_TIGHT_LOOP, 0, 1)
103
104
-static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, target_ulong *pc,
105
- target_ulong *cs_base, uint32_t *flags)
106
+static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, vaddr *pc,
107
+ uint64_t *cs_base, uint32_t *flags)
108
{
109
uint32_t hex_flags = 0;
110
*pc = env->gpr[HEX_REG_PC];
111
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
112
index XXXXXXX..XXXXXXX 100644
113
--- a/target/hppa/cpu.h
114
+++ b/target/hppa/cpu.h
115
@@ -XXX,XX +XXX,XX @@ static inline target_ulong hppa_form_gva(CPUHPPAState *env, uint64_t spc,
116
#define TB_FLAG_PRIV_SHIFT 8
117
#define TB_FLAG_UNALIGN 0x400
118
119
-static inline void cpu_get_tb_cpu_state(CPUHPPAState *env, target_ulong *pc,
120
- target_ulong *cs_base,
121
- uint32_t *pflags)
122
+static inline void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc,
123
+ uint64_t *cs_base, uint32_t *pflags)
124
{
125
uint32_t flags = env->psw_n * PSW_N;
126
127
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
128
index XXXXXXX..XXXXXXX 100644
129
--- a/target/i386/cpu.h
130
+++ b/target/i386/cpu.h
131
@@ -XXX,XX +XXX,XX @@ static inline int cpu_mmu_index_kernel(CPUX86State *env)
132
#include "hw/i386/apic.h"
133
#endif
134
135
-static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc,
136
- target_ulong *cs_base, uint32_t *flags)
137
+static inline void cpu_get_tb_cpu_state(CPUX86State *env, vaddr *pc,
138
+ uint64_t *cs_base, uint32_t *flags)
139
{
140
*cs_base = env->segs[R_CS].base;
141
*pc = *cs_base + env->eip;
142
diff --git a/target/loongarch/cpu.h b/target/loongarch/cpu.h
143
index XXXXXXX..XXXXXXX 100644
144
--- a/target/loongarch/cpu.h
145
+++ b/target/loongarch/cpu.h
146
@@ -XXX,XX +XXX,XX @@ static inline int cpu_mmu_index(CPULoongArchState *env, bool ifetch)
147
#define HW_FLAGS_EUEN_FPE 0x04
148
#define HW_FLAGS_EUEN_SXE 0x08
149
150
-static inline void cpu_get_tb_cpu_state(CPULoongArchState *env,
151
- target_ulong *pc,
152
- target_ulong *cs_base,
153
- uint32_t *flags)
154
+static inline void cpu_get_tb_cpu_state(CPULoongArchState *env, vaddr *pc,
155
+ uint64_t *cs_base, uint32_t *flags)
156
{
157
*pc = env->pc;
158
*cs_base = 0;
159
diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h
160
index XXXXXXX..XXXXXXX 100644
161
--- a/target/m68k/cpu.h
162
+++ b/target/m68k/cpu.h
163
@@ -XXX,XX +XXX,XX @@ void m68k_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
164
#define TB_FLAGS_TRACE 16
165
#define TB_FLAGS_TRACE_BIT (1 << TB_FLAGS_TRACE)
166
167
-static inline void cpu_get_tb_cpu_state(CPUM68KState *env, target_ulong *pc,
168
- target_ulong *cs_base, uint32_t *flags)
169
+static inline void cpu_get_tb_cpu_state(CPUM68KState *env, vaddr *pc,
170
+ uint64_t *cs_base, uint32_t *flags)
171
{
172
*pc = env->pc;
173
*cs_base = 0;
174
diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
175
index XXXXXXX..XXXXXXX 100644
176
--- a/target/microblaze/cpu.h
177
+++ b/target/microblaze/cpu.h
178
@@ -XXX,XX +XXX,XX @@ void mb_tcg_init(void);
179
/* Ensure there is no overlap between the two masks. */
180
QEMU_BUILD_BUG_ON(MSR_TB_MASK & IFLAGS_TB_MASK);
181
182
-static inline void cpu_get_tb_cpu_state(CPUMBState *env, target_ulong *pc,
183
- target_ulong *cs_base, uint32_t *flags)
184
+static inline void cpu_get_tb_cpu_state(CPUMBState *env, vaddr *pc,
185
+ uint64_t *cs_base, uint32_t *flags)
186
{
187
*pc = env->pc;
188
*flags = (env->iflags & IFLAGS_TB_MASK) | (env->msr & MSR_TB_MASK);
189
diff --git a/target/mips/cpu.h b/target/mips/cpu.h
190
index XXXXXXX..XXXXXXX 100644
191
--- a/target/mips/cpu.h
192
+++ b/target/mips/cpu.h
193
@@ -XXX,XX +XXX,XX @@ void itc_reconfigure(struct MIPSITUState *tag);
194
/* helper.c */
195
target_ulong exception_resume_pc(CPUMIPSState *env);
196
197
-static inline void cpu_get_tb_cpu_state(CPUMIPSState *env, target_ulong *pc,
198
- target_ulong *cs_base, uint32_t *flags)
199
+static inline void cpu_get_tb_cpu_state(CPUMIPSState *env, vaddr *pc,
200
+ uint64_t *cs_base, uint32_t *flags)
201
{
202
*pc = env->active_tc.PC;
203
*cs_base = 0;
204
diff --git a/target/nios2/cpu.h b/target/nios2/cpu.h
205
index XXXXXXX..XXXXXXX 100644
206
--- a/target/nios2/cpu.h
207
+++ b/target/nios2/cpu.h
208
@@ -XXX,XX +XXX,XX @@ FIELD(TBFLAGS, CRS0, 0, 1) /* Set if CRS == 0. */
209
FIELD(TBFLAGS, U, 1, 1) /* Overlaps CR_STATUS_U */
210
FIELD(TBFLAGS, R0_0, 2, 1) /* Set if R0 == 0. */
211
212
-static inline void cpu_get_tb_cpu_state(CPUNios2State *env, target_ulong *pc,
213
- target_ulong *cs_base, uint32_t *flags)
214
+static inline void cpu_get_tb_cpu_state(CPUNios2State *env, vaddr *pc,
215
+ uint64_t *cs_base, uint32_t *flags)
216
{
217
unsigned crs = FIELD_EX32(env->ctrl[CR_STATUS], CR_STATUS, CRS);
218
219
diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h
220
index XXXXXXX..XXXXXXX 100644
221
--- a/target/openrisc/cpu.h
222
+++ b/target/openrisc/cpu.h
223
@@ -XXX,XX +XXX,XX @@ static inline void cpu_set_gpr(CPUOpenRISCState *env, int i, uint32_t val)
224
env->shadow_gpr[0][i] = val;
225
}
226
227
-static inline void cpu_get_tb_cpu_state(CPUOpenRISCState *env,
228
- target_ulong *pc,
229
- target_ulong *cs_base, uint32_t *flags)
230
+static inline void cpu_get_tb_cpu_state(CPUOpenRISCState *env, vaddr *pc,
231
+ uint64_t *cs_base, uint32_t *flags)
232
{
233
*pc = env->pc;
234
*cs_base = 0;
235
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
236
index XXXXXXX..XXXXXXX 100644
237
--- a/target/ppc/cpu.h
238
+++ b/target/ppc/cpu.h
239
@@ -XXX,XX +XXX,XX @@ void cpu_write_xer(CPUPPCState *env, target_ulong xer);
240
#define is_book3s_arch2x(ctx) (!!((ctx)->insns_flags & PPC_SEGMENT_64B))
241
242
#ifdef CONFIG_DEBUG_TCG
243
-void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc,
244
- target_ulong *cs_base, uint32_t *flags);
245
+void cpu_get_tb_cpu_state(CPUPPCState *env, vaddr *pc,
246
+ uint64_t *cs_base, uint32_t *flags);
247
#else
248
-static inline void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc,
249
- target_ulong *cs_base, uint32_t *flags)
250
+static inline void cpu_get_tb_cpu_state(CPUPPCState *env, vaddr *pc,
251
+ uint64_t *cs_base, uint32_t *flags)
252
{
253
*pc = env->nip;
254
*cs_base = 0;
255
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
256
index XXXXXXX..XXXXXXX 100644
257
--- a/target/riscv/cpu.h
258
+++ b/target/riscv/cpu.h
259
@@ -XXX,XX +XXX,XX @@ static inline uint32_t vext_get_vlmax(RISCVCPU *cpu, target_ulong vtype)
260
return cpu->cfg.vlen >> (sew + 3 - lmul);
261
}
262
263
-void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
264
- target_ulong *cs_base, uint32_t *pflags);
265
+void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
266
+ uint64_t *cs_base, uint32_t *pflags);
267
268
void riscv_cpu_update_mask(CPURISCVState *env);
269
270
diff --git a/target/rx/cpu.h b/target/rx/cpu.h
271
index XXXXXXX..XXXXXXX 100644
272
--- a/target/rx/cpu.h
273
+++ b/target/rx/cpu.h
274
@@ -XXX,XX +XXX,XX @@ void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte);
275
#define RX_CPU_IRQ 0
276
#define RX_CPU_FIR 1
277
278
-static inline void cpu_get_tb_cpu_state(CPURXState *env, target_ulong *pc,
279
- target_ulong *cs_base, uint32_t *flags)
280
+static inline void cpu_get_tb_cpu_state(CPURXState *env, vaddr *pc,
281
+ uint64_t *cs_base, uint32_t *flags)
282
{
283
*pc = env->pc;
284
*cs_base = 0;
285
diff --git a/target/s390x/cpu.h b/target/s390x/cpu.h
286
index XXXXXXX..XXXXXXX 100644
287
--- a/target/s390x/cpu.h
288
+++ b/target/s390x/cpu.h
289
@@ -XXX,XX +XXX,XX @@ static inline int cpu_mmu_index(CPUS390XState *env, bool ifetch)
290
#endif
291
}
292
293
-static inline void cpu_get_tb_cpu_state(CPUS390XState* env, target_ulong *pc,
294
- target_ulong *cs_base, uint32_t *flags)
295
+static inline void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc,
296
+ uint64_t *cs_base, uint32_t *flags)
297
{
298
if (env->psw.addr & 1) {
299
/*
300
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
301
index XXXXXXX..XXXXXXX 100644
302
--- a/target/sh4/cpu.h
303
+++ b/target/sh4/cpu.h
304
@@ -XXX,XX +XXX,XX @@ static inline void cpu_write_sr(CPUSH4State *env, target_ulong sr)
305
env->sr = sr & ~((1u << SR_M) | (1u << SR_Q) | (1u << SR_T));
306
}
307
308
-static inline void cpu_get_tb_cpu_state(CPUSH4State *env, target_ulong *pc,
309
- target_ulong *cs_base, uint32_t *flags)
310
+static inline void cpu_get_tb_cpu_state(CPUSH4State *env, vaddr *pc,
311
+ uint64_t *cs_base, uint32_t *flags)
312
{
313
*pc = env->pc;
314
/* For a gUSA region, notice the end of the region. */
315
diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h
316
index XXXXXXX..XXXXXXX 100644
317
--- a/target/sparc/cpu.h
318
+++ b/target/sparc/cpu.h
319
@@ -XXX,XX +XXX,XX @@ trap_state* cpu_tsptr(CPUSPARCState* env);
320
#define TB_FLAG_HYPER (1 << 7)
321
#define TB_FLAG_ASI_SHIFT 24
322
323
-static inline void cpu_get_tb_cpu_state(CPUSPARCState *env, target_ulong *pc,
324
- target_ulong *cs_base, uint32_t *pflags)
325
+static inline void cpu_get_tb_cpu_state(CPUSPARCState *env, vaddr *pc,
326
+ uint64_t *cs_base, uint32_t *pflags)
327
{
328
uint32_t flags;
329
*pc = env->pc;
330
diff --git a/target/tricore/cpu.h b/target/tricore/cpu.h
331
index XXXXXXX..XXXXXXX 100644
332
--- a/target/tricore/cpu.h
333
+++ b/target/tricore/cpu.h
334
@@ -XXX,XX +XXX,XX @@ FIELD(TB_FLAGS, PRIV, 0, 2)
335
void cpu_state_reset(CPUTriCoreState *s);
336
void tricore_tcg_init(void);
337
338
-static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, target_ulong *pc,
339
- target_ulong *cs_base, uint32_t *flags)
340
+static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, vaddr *pc,
341
+ uint64_t *cs_base, uint32_t *flags)
342
{
343
uint32_t new_flags = 0;
344
*pc = env->PC;
345
diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h
346
index XXXXXXX..XXXXXXX 100644
347
--- a/target/xtensa/cpu.h
348
+++ b/target/xtensa/cpu.h
349
@@ -XXX,XX +XXX,XX @@ static inline int cpu_mmu_index(CPUXtensaState *env, bool ifetch)
350
351
#include "exec/cpu-all.h"
352
353
-static inline void cpu_get_tb_cpu_state(CPUXtensaState *env, target_ulong *pc,
354
- target_ulong *cs_base, uint32_t *flags)
355
+static inline void cpu_get_tb_cpu_state(CPUXtensaState *env, vaddr *pc,
356
+ uint64_t *cs_base, uint32_t *flags)
357
{
358
*pc = env->pc;
359
*cs_base = 0;
360
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
361
index XXXXXXX..XXXXXXX 100644
362
--- a/accel/tcg/cpu-exec.c
363
+++ b/accel/tcg/cpu-exec.c
364
@@ -XXX,XX +XXX,XX @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
365
{
366
CPUState *cpu = env_cpu(env);
367
TranslationBlock *tb;
368
- target_ulong cs_base, pc;
369
+ vaddr pc;
370
+ uint64_t cs_base;
371
uint32_t flags, cflags;
372
373
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
374
@@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu)
375
{
376
CPUArchState *env = cpu->env_ptr;
377
TranslationBlock *tb;
378
- target_ulong cs_base, pc;
379
+ vaddr pc;
380
+ uint64_t cs_base;
381
uint32_t flags, cflags;
382
int tb_exit;
383
384
@@ -XXX,XX +XXX,XX @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
385
386
while (!cpu_handle_interrupt(cpu, &last_tb)) {
387
TranslationBlock *tb;
388
- target_ulong cs_base, pc;
389
+ vaddr pc;
390
+ uint64_t cs_base;
391
uint32_t flags, cflags;
392
393
cpu_get_tb_cpu_state(cpu->env_ptr, &pc, &cs_base, &flags);
394
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
395
index XXXXXXX..XXXXXXX 100644
396
--- a/accel/tcg/translate-all.c
397
+++ b/accel/tcg/translate-all.c
398
@@ -XXX,XX +XXX,XX @@ void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
399
/* The exception probably happened in a helper. The CPU state should
400
have been saved before calling it. Fetch the PC from there. */
401
CPUArchState *env = cpu->env_ptr;
402
- target_ulong pc, cs_base;
403
+ vaddr pc;
404
+ uint64_t cs_base;
405
tb_page_addr_t addr;
406
uint32_t flags;
407
408
diff --git a/target/arm/helper.c b/target/arm/helper.c
409
index XXXXXXX..XXXXXXX 100644
410
--- a/target/arm/helper.c
411
+++ b/target/arm/helper.c
412
@@ -XXX,XX +XXX,XX @@ static bool mve_no_pred(CPUARMState *env)
413
return true;
414
}
415
416
-void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
417
- target_ulong *cs_base, uint32_t *pflags)
418
+void cpu_get_tb_cpu_state(CPUARMState *env, vaddr *pc,
419
+ uint64_t *cs_base, uint32_t *pflags)
420
{
421
CPUARMTBFlags flags;
422
423
diff --git a/target/ppc/helper_regs.c b/target/ppc/helper_regs.c
424
index XXXXXXX..XXXXXXX 100644
425
--- a/target/ppc/helper_regs.c
426
+++ b/target/ppc/helper_regs.c
427
@@ -XXX,XX +XXX,XX @@ void hreg_update_pmu_hflags(CPUPPCState *env)
428
}
429
430
#ifdef CONFIG_DEBUG_TCG
431
-void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc,
432
- target_ulong *cs_base, uint32_t *flags)
433
+void cpu_get_tb_cpu_state(CPUPPCState *env, vaddr *pc,
434
+ uint64_t *cs_base, uint32_t *flags)
435
{
436
uint32_t hflags_current = env->hflags;
437
uint32_t hflags_rebuilt;
438
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
439
index XXXXXXX..XXXXXXX 100644
440
--- a/target/riscv/cpu_helper.c
441
+++ b/target/riscv/cpu_helper.c
442
@@ -XXX,XX +XXX,XX @@ int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch)
443
#endif
444
}
445
446
-void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
447
- target_ulong *cs_base, uint32_t *pflags)
448
+void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
449
+ uint64_t *cs_base, uint32_t *pflags)
450
{
451
CPUState *cs = env_cpu(env);
452
RISCVCPU *cpu = RISCV_CPU(cs);
453
--
454
2.34.1
diff view generated by jsdifflib
Deleted patch
1
From: Anton Johansson <anjo@rev.ng>
2
1
3
Signed-off-by: Anton Johansson <anjo@rev.ng>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-Id: <20230621135633.1649-5-anjo@rev.ng>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
include/exec/cpu_ldst.h | 10 +++++-----
9
accel/tcg/cputlb.c | 8 ++++----
10
2 files changed, 9 insertions(+), 9 deletions(-)
11
12
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/include/exec/cpu_ldst.h
15
+++ b/include/exec/cpu_ldst.h
16
@@ -XXX,XX +XXX,XX @@ static inline void clear_helper_retaddr(void)
17
18
#include "tcg/oversized-guest.h"
19
20
-static inline target_ulong tlb_read_idx(const CPUTLBEntry *entry,
21
- MMUAccessType access_type)
22
+static inline uint64_t tlb_read_idx(const CPUTLBEntry *entry,
23
+ MMUAccessType access_type)
24
{
25
/* Do not rearrange the CPUTLBEntry structure members. */
26
QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_read) !=
27
@@ -XXX,XX +XXX,XX @@ static inline target_ulong tlb_read_idx(const CPUTLBEntry *entry,
28
#endif
29
}
30
31
-static inline target_ulong tlb_addr_write(const CPUTLBEntry *entry)
32
+static inline uint64_t tlb_addr_write(const CPUTLBEntry *entry)
33
{
34
return tlb_read_idx(entry, MMU_DATA_STORE);
35
}
36
37
/* Find the TLB index corresponding to the mmu_idx + address pair. */
38
static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx,
39
- target_ulong addr)
40
+ vaddr addr)
41
{
42
uintptr_t size_mask = env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS;
43
44
@@ -XXX,XX +XXX,XX @@ static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx,
45
46
/* Find the TLB entry corresponding to the mmu_idx + address pair. */
47
static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx,
48
- target_ulong addr)
49
+ vaddr addr)
50
{
51
return &env_tlb(env)->f[mmu_idx].table[tlb_index(env, mmu_idx, addr)];
52
}
53
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
54
index XXXXXXX..XXXXXXX 100644
55
--- a/accel/tcg/cputlb.c
56
+++ b/accel/tcg/cputlb.c
57
@@ -XXX,XX +XXX,XX @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
58
assert_cpu_is_self(env_cpu(env));
59
for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
60
CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx];
61
- target_ulong cmp = tlb_read_idx(vtlb, access_type);
62
+ uint64_t cmp = tlb_read_idx(vtlb, access_type);
63
64
if (cmp == page) {
65
/* Found entry in victim tlb, swap tlb and iotlb. */
66
@@ -XXX,XX +XXX,XX @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
67
{
68
uintptr_t index = tlb_index(env, mmu_idx, addr);
69
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
70
- target_ulong tlb_addr = tlb_read_idx(entry, access_type);
71
+ uint64_t tlb_addr = tlb_read_idx(entry, access_type);
72
target_ulong page_addr = addr & TARGET_PAGE_MASK;
73
int flags = TLB_FLAGS_MASK;
74
75
@@ -XXX,XX +XXX,XX @@ bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx,
76
CPUArchState *env = cpu->env_ptr;
77
CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
78
uintptr_t index = tlb_index(env, mmu_idx, addr);
79
- vaddr tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read;
80
+ uint64_t tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read;
81
82
if (likely(tlb_hit(tlb_addr, addr))) {
83
/* We must have an iotlb entry for MMIO */
84
@@ -XXX,XX +XXX,XX @@ static bool mmu_lookup1(CPUArchState *env, MMULookupPageData *data,
85
target_ulong addr = data->addr;
86
uintptr_t index = tlb_index(env, mmu_idx, addr);
87
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
88
- target_ulong tlb_addr = tlb_read_idx(entry, access_type);
89
+ uint64_t tlb_addr = tlb_read_idx(entry, access_type);
90
bool maybe_resized = false;
91
92
/* If the TLB entry is for a different page, reload and try again. */
93
--
94
2.34.1
diff view generated by jsdifflib
Deleted patch
1
From: Anton Johansson <anjo@rev.ng>
2
1
3
Functions accessing MMULookupPageData are also updated.
4
5
Signed-off-by: Anton Johansson <anjo@rev.ng>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-Id: <20230621135633.1649-6-anjo@rev.ng>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
accel/tcg/cputlb.c | 30 +++++++++++++++---------------
11
1 file changed, 15 insertions(+), 15 deletions(-)
12
13
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/accel/tcg/cputlb.c
16
+++ b/accel/tcg/cputlb.c
17
@@ -XXX,XX +XXX,XX @@ bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx,
18
typedef struct MMULookupPageData {
19
CPUTLBEntryFull *full;
20
void *haddr;
21
- target_ulong addr;
22
+ vaddr addr;
23
int flags;
24
int size;
25
} MMULookupPageData;
26
@@ -XXX,XX +XXX,XX @@ typedef struct MMULookupLocals {
27
static bool mmu_lookup1(CPUArchState *env, MMULookupPageData *data,
28
int mmu_idx, MMUAccessType access_type, uintptr_t ra)
29
{
30
- target_ulong addr = data->addr;
31
+ vaddr addr = data->addr;
32
uintptr_t index = tlb_index(env, mmu_idx, addr);
33
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
34
uint64_t tlb_addr = tlb_read_idx(entry, access_type);
35
@@ -XXX,XX +XXX,XX @@ static void mmu_watch_or_dirty(CPUArchState *env, MMULookupPageData *data,
36
MMUAccessType access_type, uintptr_t ra)
37
{
38
CPUTLBEntryFull *full = data->full;
39
- target_ulong addr = data->addr;
40
+ vaddr addr = data->addr;
41
int flags = data->flags;
42
int size = data->size;
43
44
@@ -XXX,XX +XXX,XX @@ static void mmu_watch_or_dirty(CPUArchState *env, MMULookupPageData *data,
45
* Resolve the translation for the page(s) beginning at @addr, for MemOp.size
46
* bytes. Return true if the lookup crosses a page boundary.
47
*/
48
-static bool mmu_lookup(CPUArchState *env, target_ulong addr, MemOpIdx oi,
49
+static bool mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
50
uintptr_t ra, MMUAccessType type, MMULookupLocals *l)
51
{
52
unsigned a_bits;
53
@@ -XXX,XX +XXX,XX @@ static uint64_t do_ld_mmio_beN(CPUArchState *env, MMULookupPageData *p,
54
MMUAccessType type, uintptr_t ra)
55
{
56
CPUTLBEntryFull *full = p->full;
57
- target_ulong addr = p->addr;
58
+ vaddr addr = p->addr;
59
int i, size = p->size;
60
61
QEMU_IOTHREAD_LOCK_GUARD();
62
@@ -XXX,XX +XXX,XX @@ static uint64_t do_ld_8(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
63
return ret;
64
}
65
66
-static uint8_t do_ld1_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi,
67
+static uint8_t do_ld1_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
68
uintptr_t ra, MMUAccessType access_type)
69
{
70
MMULookupLocals l;
71
@@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr,
72
return do_ld1_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
73
}
74
75
-static uint16_t do_ld2_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi,
76
+static uint16_t do_ld2_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
77
uintptr_t ra, MMUAccessType access_type)
78
{
79
MMULookupLocals l;
80
@@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr,
81
return do_ld2_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
82
}
83
84
-static uint32_t do_ld4_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi,
85
+static uint32_t do_ld4_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
86
uintptr_t ra, MMUAccessType access_type)
87
{
88
MMULookupLocals l;
89
@@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr,
90
return do_ld4_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
91
}
92
93
-static uint64_t do_ld8_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi,
94
+static uint64_t do_ld8_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
95
uintptr_t ra, MMUAccessType access_type)
96
{
97
MMULookupLocals l;
98
@@ -XXX,XX +XXX,XX @@ tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr,
99
return (int32_t)helper_ldul_mmu(env, addr, oi, retaddr);
100
}
101
102
-static Int128 do_ld16_mmu(CPUArchState *env, target_ulong addr,
103
+static Int128 do_ld16_mmu(CPUArchState *env, vaddr addr,
104
MemOpIdx oi, uintptr_t ra)
105
{
106
MMULookupLocals l;
107
@@ -XXX,XX +XXX,XX @@ static uint64_t do_st_mmio_leN(CPUArchState *env, MMULookupPageData *p,
108
uint64_t val_le, int mmu_idx, uintptr_t ra)
109
{
110
CPUTLBEntryFull *full = p->full;
111
- target_ulong addr = p->addr;
112
+ vaddr addr = p->addr;
113
int i, size = p->size;
114
115
QEMU_IOTHREAD_LOCK_GUARD();
116
@@ -XXX,XX +XXX,XX @@ void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
117
do_st_1(env, &l.page[0], val, l.mmu_idx, ra);
118
}
119
120
-static void do_st2_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
121
+static void do_st2_mmu(CPUArchState *env, vaddr addr, uint16_t val,
122
MemOpIdx oi, uintptr_t ra)
123
{
124
MMULookupLocals l;
125
@@ -XXX,XX +XXX,XX @@ void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
126
do_st2_mmu(env, addr, val, oi, retaddr);
127
}
128
129
-static void do_st4_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
130
+static void do_st4_mmu(CPUArchState *env, vaddr addr, uint32_t val,
131
MemOpIdx oi, uintptr_t ra)
132
{
133
MMULookupLocals l;
134
@@ -XXX,XX +XXX,XX @@ void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
135
do_st4_mmu(env, addr, val, oi, retaddr);
136
}
137
138
-static void do_st8_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
139
+static void do_st8_mmu(CPUArchState *env, vaddr addr, uint64_t val,
140
MemOpIdx oi, uintptr_t ra)
141
{
142
MMULookupLocals l;
143
@@ -XXX,XX +XXX,XX @@ void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val,
144
do_st8_mmu(env, addr, val, oi, retaddr);
145
}
146
147
-static void do_st16_mmu(CPUArchState *env, target_ulong addr, Int128 val,
148
+static void do_st16_mmu(CPUArchState *env, vaddr addr, Int128 val,
149
MemOpIdx oi, uintptr_t ra)
150
{
151
MMULookupLocals l;
152
--
153
2.34.1
diff view generated by jsdifflib
Deleted patch
1
From: Anton Johansson <anjo@rev.ng>
2
1
3
Signed-off-by: Anton Johansson <anjo@rev.ng>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-Id: <20230621135633.1649-7-anjo@rev.ng>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
accel/tcg/cpu-exec.c | 34 +++++++++++++++++-----------------
9
1 file changed, 17 insertions(+), 17 deletions(-)
10
11
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/cpu-exec.c
14
+++ b/accel/tcg/cpu-exec.c
15
@@ -XXX,XX +XXX,XX @@ uint32_t curr_cflags(CPUState *cpu)
16
}
17
18
struct tb_desc {
19
- target_ulong pc;
20
- target_ulong cs_base;
21
+ vaddr pc;
22
+ uint64_t cs_base;
23
CPUArchState *env;
24
tb_page_addr_t page_addr0;
25
uint32_t flags;
26
@@ -XXX,XX +XXX,XX @@ static bool tb_lookup_cmp(const void *p, const void *d)
27
return true;
28
} else {
29
tb_page_addr_t phys_page1;
30
- target_ulong virt_page1;
31
+ vaddr virt_page1;
32
33
/*
34
* We know that the first page matched, and an otherwise valid TB
35
@@ -XXX,XX +XXX,XX @@ static bool tb_lookup_cmp(const void *p, const void *d)
36
return false;
37
}
38
39
-static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
40
- target_ulong cs_base, uint32_t flags,
41
+static TranslationBlock *tb_htable_lookup(CPUState *cpu, vaddr pc,
42
+ uint64_t cs_base, uint32_t flags,
43
uint32_t cflags)
44
{
45
tb_page_addr_t phys_pc;
46
@@ -XXX,XX +XXX,XX @@ static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
47
}
48
49
/* Might cause an exception, so have a longjmp destination ready */
50
-static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
51
- target_ulong cs_base,
52
- uint32_t flags, uint32_t cflags)
53
+static inline TranslationBlock *tb_lookup(CPUState *cpu, vaddr pc,
54
+ uint64_t cs_base, uint32_t flags,
55
+ uint32_t cflags)
56
{
57
TranslationBlock *tb;
58
CPUJumpCache *jc;
59
@@ -XXX,XX +XXX,XX @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
60
return tb;
61
}
62
63
-static void log_cpu_exec(target_ulong pc, CPUState *cpu,
64
+static void log_cpu_exec(vaddr pc, CPUState *cpu,
65
const TranslationBlock *tb)
66
{
67
if (qemu_log_in_addr_range(pc)) {
68
qemu_log_mask(CPU_LOG_EXEC,
69
"Trace %d: %p [%08" PRIx64
70
- "/" TARGET_FMT_lx "/%08x/%08x] %s\n",
71
+ "/%" VADDR_PRIx "/%08x/%08x] %s\n",
72
cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc,
73
tb->flags, tb->cflags, lookup_symbol(pc));
74
75
@@ -XXX,XX +XXX,XX @@ static void log_cpu_exec(target_ulong pc, CPUState *cpu,
76
}
77
}
78
79
-static bool check_for_breakpoints_slow(CPUState *cpu, target_ulong pc,
80
+static bool check_for_breakpoints_slow(CPUState *cpu, vaddr pc,
81
uint32_t *cflags)
82
{
83
CPUBreakpoint *bp;
84
@@ -XXX,XX +XXX,XX @@ static bool check_for_breakpoints_slow(CPUState *cpu, target_ulong pc,
85
return false;
86
}
87
88
-static inline bool check_for_breakpoints(CPUState *cpu, target_ulong pc,
89
+static inline bool check_for_breakpoints(CPUState *cpu, vaddr pc,
90
uint32_t *cflags)
91
{
92
return unlikely(!QTAILQ_EMPTY(&cpu->breakpoints)) &&
93
@@ -XXX,XX +XXX,XX @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
94
cc->set_pc(cpu, last_tb->pc);
95
}
96
if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
97
- target_ulong pc = log_pc(cpu, last_tb);
98
+ vaddr pc = log_pc(cpu, last_tb);
99
if (qemu_log_in_addr_range(pc)) {
100
- qemu_log("Stopped execution of TB chain before %p ["
101
- TARGET_FMT_lx "] %s\n",
102
+ qemu_log("Stopped execution of TB chain before %p [%"
103
+ VADDR_PRIx "] %s\n",
104
last_tb->tc.ptr, pc, lookup_symbol(pc));
105
}
106
}
107
@@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
108
}
109
110
static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
111
- target_ulong pc,
112
- TranslationBlock **last_tb, int *tb_exit)
113
+ vaddr pc, TranslationBlock **last_tb,
114
+ int *tb_exit)
115
{
116
int32_t insns_left;
117
118
--
119
2.34.1
diff view generated by jsdifflib
Deleted patch
1
From: Anton Johansson <anjo@rev.ng>
2
1
3
Related functions dealing with the jump cache are also updated.
4
5
Signed-off-by: Anton Johansson <anjo@rev.ng>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-Id: <20230621135633.1649-8-anjo@rev.ng>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
accel/tcg/tb-hash.h | 12 ++++++------
11
accel/tcg/tb-jmp-cache.h | 2 +-
12
accel/tcg/cputlb.c | 2 +-
13
3 files changed, 8 insertions(+), 8 deletions(-)
14
15
diff --git a/accel/tcg/tb-hash.h b/accel/tcg/tb-hash.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/accel/tcg/tb-hash.h
18
+++ b/accel/tcg/tb-hash.h
19
@@ -XXX,XX +XXX,XX @@
20
#define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1)
21
#define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE)
22
23
-static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
24
+static inline unsigned int tb_jmp_cache_hash_page(vaddr pc)
25
{
26
- target_ulong tmp;
27
+ vaddr tmp;
28
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
29
return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
30
}
31
32
-static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
33
+static inline unsigned int tb_jmp_cache_hash_func(vaddr pc)
34
{
35
- target_ulong tmp;
36
+ vaddr tmp;
37
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
38
return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
39
| (tmp & TB_JMP_ADDR_MASK));
40
@@ -XXX,XX +XXX,XX @@ static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
41
#else
42
43
/* In user-mode we can get better hashing because we do not have a TLB */
44
-static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
45
+static inline unsigned int tb_jmp_cache_hash_func(vaddr pc)
46
{
47
return (pc ^ (pc >> TB_JMP_CACHE_BITS)) & (TB_JMP_CACHE_SIZE - 1);
48
}
49
@@ -XXX,XX +XXX,XX @@ static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
50
#endif /* CONFIG_SOFTMMU */
51
52
static inline
53
-uint32_t tb_hash_func(tb_page_addr_t phys_pc, target_ulong pc,
54
+uint32_t tb_hash_func(tb_page_addr_t phys_pc, vaddr pc,
55
uint32_t flags, uint64_t flags2, uint32_t cf_mask)
56
{
57
return qemu_xxhash8(phys_pc, pc, flags2, flags, cf_mask);
58
diff --git a/accel/tcg/tb-jmp-cache.h b/accel/tcg/tb-jmp-cache.h
59
index XXXXXXX..XXXXXXX 100644
60
--- a/accel/tcg/tb-jmp-cache.h
61
+++ b/accel/tcg/tb-jmp-cache.h
62
@@ -XXX,XX +XXX,XX @@ struct CPUJumpCache {
63
struct rcu_head rcu;
64
struct {
65
TranslationBlock *tb;
66
- target_ulong pc;
67
+ vaddr pc;
68
} array[TB_JMP_CACHE_SIZE];
69
};
70
71
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
72
index XXXXXXX..XXXXXXX 100644
73
--- a/accel/tcg/cputlb.c
74
+++ b/accel/tcg/cputlb.c
75
@@ -XXX,XX +XXX,XX @@ static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
76
desc->window_max_entries = max_entries;
77
}
78
79
-static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
80
+static void tb_jmp_cache_clear_page(CPUState *cpu, vaddr page_addr)
81
{
82
CPUJumpCache *jc = cpu->tb_jmp_cache;
83
int i, i0;
84
--
85
2.34.1
diff view generated by jsdifflib
Deleted patch
1
From: Anton Johansson <anjo@rev.ng>
2
1
3
Functions for probing memory accesses (and functions that call these)
4
are updated to take a vaddr for guest virtual addresses over
5
target_ulong.
6
7
Signed-off-by: Anton Johansson <anjo@rev.ng>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-Id: <20230621135633.1649-9-anjo@rev.ng>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
12
include/exec/exec-all.h | 14 +++++++-------
13
accel/stubs/tcg-stub.c | 4 ++--
14
accel/tcg/cputlb.c | 12 ++++++------
15
accel/tcg/user-exec.c | 8 ++++----
16
4 files changed, 19 insertions(+), 19 deletions(-)
17
18
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/include/exec/exec-all.h
21
+++ b/include/exec/exec-all.h
22
@@ -XXX,XX +XXX,XX @@ static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
23
* Finally, return the host address for a page that is backed by RAM,
24
* or NULL if the page requires I/O.
25
*/
26
-void *probe_access(CPUArchState *env, target_ulong addr, int size,
27
+void *probe_access(CPUArchState *env, vaddr addr, int size,
28
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
29
30
-static inline void *probe_write(CPUArchState *env, target_ulong addr, int size,
31
+static inline void *probe_write(CPUArchState *env, vaddr addr, int size,
32
int mmu_idx, uintptr_t retaddr)
33
{
34
return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
35
}
36
37
-static inline void *probe_read(CPUArchState *env, target_ulong addr, int size,
38
+static inline void *probe_read(CPUArchState *env, vaddr addr, int size,
39
int mmu_idx, uintptr_t retaddr)
40
{
41
return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
42
@@ -XXX,XX +XXX,XX @@ static inline void *probe_read(CPUArchState *env, target_ulong addr, int size,
43
* Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags.
44
* For simplicity, all "mmio-like" flags are folded to TLB_MMIO.
45
*/
46
-int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
47
+int probe_access_flags(CPUArchState *env, vaddr addr, int size,
48
MMUAccessType access_type, int mmu_idx,
49
bool nonfault, void **phost, uintptr_t retaddr);
50
51
@@ -XXX,XX +XXX,XX @@ int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
52
* and must be consumed or copied immediately, before any further
53
* access or changes to TLB @mmu_idx.
54
*/
55
-int probe_access_full(CPUArchState *env, target_ulong addr, int size,
56
+int probe_access_full(CPUArchState *env, vaddr addr, int size,
57
MMUAccessType access_type, int mmu_idx,
58
bool nonfault, void **phost,
59
CPUTLBEntryFull **pfull, uintptr_t retaddr);
60
@@ -XXX,XX +XXX,XX @@ struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
61
*
62
* Note: this function can trigger an exception.
63
*/
64
-tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
65
+tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
66
void **hostp);
67
68
/**
69
@@ -XXX,XX +XXX,XX @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
70
* Note: this function can trigger an exception.
71
*/
72
static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
73
- target_ulong addr)
74
+ vaddr addr)
75
{
76
return get_page_addr_code_hostp(env, addr, NULL);
77
}
78
diff --git a/accel/stubs/tcg-stub.c b/accel/stubs/tcg-stub.c
79
index XXXXXXX..XXXXXXX 100644
80
--- a/accel/stubs/tcg-stub.c
81
+++ b/accel/stubs/tcg-stub.c
82
@@ -XXX,XX +XXX,XX @@ void tcg_flush_jmp_cache(CPUState *cpu)
83
{
84
}
85
86
-int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
87
+int probe_access_flags(CPUArchState *env, vaddr addr, int size,
88
MMUAccessType access_type, int mmu_idx,
89
bool nonfault, void **phost, uintptr_t retaddr)
90
{
91
g_assert_not_reached();
92
}
93
94
-void *probe_access(CPUArchState *env, target_ulong addr, int size,
95
+void *probe_access(CPUArchState *env, vaddr addr, int size,
96
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
97
{
98
/* Handled by hardware accelerator. */
99
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
100
index XXXXXXX..XXXXXXX 100644
101
--- a/accel/tcg/cputlb.c
102
+++ b/accel/tcg/cputlb.c
103
@@ -XXX,XX +XXX,XX @@ static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
104
}
105
}
106
107
-static int probe_access_internal(CPUArchState *env, target_ulong addr,
108
+static int probe_access_internal(CPUArchState *env, vaddr addr,
109
int fault_size, MMUAccessType access_type,
110
int mmu_idx, bool nonfault,
111
void **phost, CPUTLBEntryFull **pfull,
112
@@ -XXX,XX +XXX,XX @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
113
uintptr_t index = tlb_index(env, mmu_idx, addr);
114
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
115
uint64_t tlb_addr = tlb_read_idx(entry, access_type);
116
- target_ulong page_addr = addr & TARGET_PAGE_MASK;
117
+ vaddr page_addr = addr & TARGET_PAGE_MASK;
118
int flags = TLB_FLAGS_MASK;
119
120
if (!tlb_hit_page(tlb_addr, page_addr)) {
121
@@ -XXX,XX +XXX,XX @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
122
return flags;
123
}
124
125
-int probe_access_full(CPUArchState *env, target_ulong addr, int size,
126
+int probe_access_full(CPUArchState *env, vaddr addr, int size,
127
MMUAccessType access_type, int mmu_idx,
128
bool nonfault, void **phost, CPUTLBEntryFull **pfull,
129
uintptr_t retaddr)
130
@@ -XXX,XX +XXX,XX @@ int probe_access_full(CPUArchState *env, target_ulong addr, int size,
131
return flags;
132
}
133
134
-int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
135
+int probe_access_flags(CPUArchState *env, vaddr addr, int size,
136
MMUAccessType access_type, int mmu_idx,
137
bool nonfault, void **phost, uintptr_t retaddr)
138
{
139
@@ -XXX,XX +XXX,XX @@ int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
140
return flags;
141
}
142
143
-void *probe_access(CPUArchState *env, target_ulong addr, int size,
144
+void *probe_access(CPUArchState *env, vaddr addr, int size,
145
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
146
{
147
CPUTLBEntryFull *full;
148
@@ -XXX,XX +XXX,XX @@ void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
149
* NOTE: This function will trigger an exception if the page is
150
* not executable.
151
*/
152
-tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
153
+tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
154
void **hostp)
155
{
156
CPUTLBEntryFull *full;
157
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
158
index XXXXXXX..XXXXXXX 100644
159
--- a/accel/tcg/user-exec.c
160
+++ b/accel/tcg/user-exec.c
161
@@ -XXX,XX +XXX,XX @@ int page_unprotect(target_ulong address, uintptr_t pc)
162
return current_tb_invalidated ? 2 : 1;
163
}
164
165
-static int probe_access_internal(CPUArchState *env, target_ulong addr,
166
+static int probe_access_internal(CPUArchState *env, vaddr addr,
167
int fault_size, MMUAccessType access_type,
168
bool nonfault, uintptr_t ra)
169
{
170
@@ -XXX,XX +XXX,XX @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
171
cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra);
172
}
173
174
-int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
175
+int probe_access_flags(CPUArchState *env, vaddr addr, int size,
176
MMUAccessType access_type, int mmu_idx,
177
bool nonfault, void **phost, uintptr_t ra)
178
{
179
@@ -XXX,XX +XXX,XX @@ int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
180
return flags;
181
}
182
183
-void *probe_access(CPUArchState *env, target_ulong addr, int size,
184
+void *probe_access(CPUArchState *env, vaddr addr, int size,
185
MMUAccessType access_type, int mmu_idx, uintptr_t ra)
186
{
187
int flags;
188
@@ -XXX,XX +XXX,XX @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
189
return size ? g2h(env_cpu(env), addr) : NULL;
190
}
191
192
-tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
193
+tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
194
void **hostp)
195
{
196
int flags;
197
--
198
2.34.1
diff view generated by jsdifflib
Deleted patch
1
From: Anton Johansson <anjo@rev.ng>
2
1
3
Update atomic_mmu_lookup() and cpu_mmu_lookup() to take the guest
4
virtual address as a vaddr instead of a target_ulong.
5
6
Signed-off-by: Anton Johansson <anjo@rev.ng>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-Id: <20230621135633.1649-10-anjo@rev.ng>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
accel/tcg/cputlb.c | 6 +++---
12
accel/tcg/user-exec.c | 6 +++---
13
2 files changed, 6 insertions(+), 6 deletions(-)
14
15
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/accel/tcg/cputlb.c
18
+++ b/accel/tcg/cputlb.c
19
@@ -XXX,XX +XXX,XX @@ static bool mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
20
* Probe for an atomic operation. Do not allow unaligned operations,
21
* or io operations to proceed. Return the host address.
22
*/
23
-static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
24
- MemOpIdx oi, int size, uintptr_t retaddr)
25
+static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
26
+ int size, uintptr_t retaddr)
27
{
28
uintptr_t mmu_idx = get_mmuidx(oi);
29
MemOp mop = get_memop(oi);
30
int a_bits = get_alignment_bits(mop);
31
uintptr_t index;
32
CPUTLBEntry *tlbe;
33
- target_ulong tlb_addr;
34
+ vaddr tlb_addr;
35
void *hostaddr;
36
CPUTLBEntryFull *full;
37
38
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/accel/tcg/user-exec.c
41
+++ b/accel/tcg/user-exec.c
42
@@ -XXX,XX +XXX,XX @@ void page_reset_target_data(target_ulong start, target_ulong last) { }
43
44
/* The softmmu versions of these helpers are in cputlb.c. */
45
46
-static void *cpu_mmu_lookup(CPUArchState *env, abi_ptr addr,
47
+static void *cpu_mmu_lookup(CPUArchState *env, vaddr addr,
48
MemOp mop, uintptr_t ra, MMUAccessType type)
49
{
50
int a_bits = get_alignment_bits(mop);
51
@@ -XXX,XX +XXX,XX @@ uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
52
/*
53
* Do not allow unaligned operations to proceed. Return the host address.
54
*/
55
-static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
56
- MemOpIdx oi, int size, uintptr_t retaddr)
57
+static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
58
+ int size, uintptr_t retaddr)
59
{
60
MemOp mop = get_memop(oi);
61
int a_bits = get_alignment_bits(mop);
62
--
63
2.34.1
diff view generated by jsdifflib
Deleted patch
1
From: Anton Johansson <anjo@rev.ng>
2
1
3
Use vaddr for guest virtual address in translator_use_goto_tb() and
4
translator_loop().
5
6
Signed-off-by: Anton Johansson <anjo@rev.ng>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-Id: <20230621135633.1649-11-anjo@rev.ng>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
include/exec/translator.h | 6 +++---
12
accel/tcg/translator.c | 10 +++++-----
13
2 files changed, 8 insertions(+), 8 deletions(-)
14
15
diff --git a/include/exec/translator.h b/include/exec/translator.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/include/exec/translator.h
18
+++ b/include/exec/translator.h
19
@@ -XXX,XX +XXX,XX @@ typedef struct TranslatorOps {
20
* - When too many instructions have been translated.
21
*/
22
void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
23
- target_ulong pc, void *host_pc,
24
- const TranslatorOps *ops, DisasContextBase *db);
25
+ vaddr pc, void *host_pc, const TranslatorOps *ops,
26
+ DisasContextBase *db);
27
28
/**
29
* translator_use_goto_tb
30
@@ -XXX,XX +XXX,XX @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
31
* Return true if goto_tb is allowed between the current TB
32
* and the destination PC.
33
*/
34
-bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest);
35
+bool translator_use_goto_tb(DisasContextBase *db, vaddr dest);
36
37
/**
38
* translator_io_start
39
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/accel/tcg/translator.c
42
+++ b/accel/tcg/translator.c
43
@@ -XXX,XX +XXX,XX @@ static void gen_tb_end(const TranslationBlock *tb, uint32_t cflags,
44
}
45
}
46
47
-bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest)
48
+bool translator_use_goto_tb(DisasContextBase *db, vaddr dest)
49
{
50
/* Suppress goto_tb if requested. */
51
if (tb_cflags(db->tb) & CF_NO_GOTO_TB) {
52
@@ -XXX,XX +XXX,XX @@ bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest)
53
}
54
55
void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
56
- target_ulong pc, void *host_pc,
57
- const TranslatorOps *ops, DisasContextBase *db)
58
+ vaddr pc, void *host_pc, const TranslatorOps *ops,
59
+ DisasContextBase *db)
60
{
61
uint32_t cflags = tb_cflags(tb);
62
TCGOp *icount_start_insn;
63
@@ -XXX,XX +XXX,XX @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
64
}
65
66
static void *translator_access(CPUArchState *env, DisasContextBase *db,
67
- target_ulong pc, size_t len)
68
+ vaddr pc, size_t len)
69
{
70
void *host;
71
- target_ulong base, end;
72
+ vaddr base, end;
73
TranslationBlock *tb;
74
75
tb = db->tb;
76
--
77
2.34.1
diff view generated by jsdifflib
Deleted patch
1
From: Anton Johansson <anjo@rev.ng>
2
1
3
Signed-off-by: Anton Johansson <anjo@rev.ng>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-Id: <20230621135633.1649-13-anjo@rev.ng>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
include/exec/exec-all.h | 2 +-
9
cpu.c | 2 +-
10
2 files changed, 2 insertions(+), 2 deletions(-)
11
12
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/include/exec/exec-all.h
15
+++ b/include/exec/exec-all.h
16
@@ -XXX,XX +XXX,XX @@ uint32_t curr_cflags(CPUState *cpu);
17
18
/* TranslationBlock invalidate API */
19
#if defined(CONFIG_USER_ONLY)
20
-void tb_invalidate_phys_addr(target_ulong addr);
21
+void tb_invalidate_phys_addr(hwaddr addr);
22
#else
23
void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
24
#endif
25
diff --git a/cpu.c b/cpu.c
26
index XXXXXXX..XXXXXXX 100644
27
--- a/cpu.c
28
+++ b/cpu.c
29
@@ -XXX,XX +XXX,XX @@ void list_cpus(void)
30
}
31
32
#if defined(CONFIG_USER_ONLY)
33
-void tb_invalidate_phys_addr(target_ulong addr)
34
+void tb_invalidate_phys_addr(hwaddr addr)
35
{
36
mmap_lock();
37
tb_invalidate_phys_page(addr);
38
--
39
2.34.1
diff view generated by jsdifflib
Deleted patch
1
From: Alex Bennée <alex.bennee@linaro.org>
2
1
3
Balton discovered that asserts for the extract/deposit calls had a
4
significant impact on a lame benchmark on qemu-ppc. Replicating with:
5
6
./qemu-ppc64 ~/lsrc/tests/lame.git-svn/builds/ppc64/frontend/lame \
7
-h pts-trondheim-3.wav pts-trondheim-3.mp3
8
9
showed up the pack/unpack routines not eliding the assert checks as it
10
should have done causing them to prominently figure in the profile:
11
12
 11.44%  qemu-ppc64  qemu-ppc64               [.] unpack_raw64.isra.0
13
 11.03%  qemu-ppc64  qemu-ppc64               [.] parts64_uncanon_normal
14
  8.26%  qemu-ppc64  qemu-ppc64               [.] helper_compute_fprf_float64
15
  6.75%  qemu-ppc64  qemu-ppc64               [.] do_float_check_status
16
  5.34%  qemu-ppc64  qemu-ppc64               [.] parts64_muladd
17
  4.75%  qemu-ppc64  qemu-ppc64               [.] pack_raw64.isra.0
18
  4.38%  qemu-ppc64  qemu-ppc64               [.] parts64_canonicalize
19
  3.62%  qemu-ppc64  qemu-ppc64               [.] float64r32_round_pack_canonical
20
21
After this patch the same test runs 31 seconds faster with a profile
22
where the generated code dominates more:
23
24
+ 14.12% 0.00% qemu-ppc64 [unknown] [.] 0x0000004000619420
25
+ 13.30% 0.00% qemu-ppc64 [unknown] [.] 0x0000004000616850
26
+ 12.58% 12.19% qemu-ppc64 qemu-ppc64 [.] parts64_uncanon_normal
27
+ 10.62% 0.00% qemu-ppc64 [unknown] [.] 0x000000400061bf70
28
+ 9.91% 9.73% qemu-ppc64 qemu-ppc64 [.] helper_compute_fprf_float64
29
+ 7.84% 7.82% qemu-ppc64 qemu-ppc64 [.] do_float_check_status
30
+ 6.47% 5.78% qemu-ppc64 qemu-ppc64 [.] parts64_canonicalize.constprop.0
31
+ 6.46% 0.00% qemu-ppc64 [unknown] [.] 0x0000004000620130
32
+ 6.42% 0.00% qemu-ppc64 [unknown] [.] 0x0000004000619400
33
+ 6.17% 6.04% qemu-ppc64 qemu-ppc64 [.] parts64_muladd
34
+ 5.85% 0.00% qemu-ppc64 [unknown] [.] 0x00000040006167e0
35
+ 5.74% 0.00% qemu-ppc64 [unknown] [.] 0x0000b693fcffffd3
36
+ 5.45% 4.78% qemu-ppc64 qemu-ppc64 [.] float64r32_round_pack_canonical
37
38
Suggested-by: Richard Henderson <richard.henderson@linaro.org>
39
Message-Id: <ec9cfe5a-d5f2-466d-34dc-c35817e7e010@linaro.org>
40
[AJB: Patchified rth's suggestion]
41
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
42
Cc: BALATON Zoltan <balaton@eik.bme.hu>
43
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
44
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
45
Tested-by: BALATON Zoltan <balaton@eik.bme.hu>
46
Message-Id: <20230523131107.3680641-1-alex.bennee@linaro.org>
47
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
48
---
49
fpu/softfloat.c | 22 +++++++++++-----------
50
1 file changed, 11 insertions(+), 11 deletions(-)
51
52
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/fpu/softfloat.c
55
+++ b/fpu/softfloat.c
56
@@ -XXX,XX +XXX,XX @@ static void unpack_raw64(FloatParts64 *r, const FloatFmt *fmt, uint64_t raw)
57
};
58
}
59
60
-static inline void float16_unpack_raw(FloatParts64 *p, float16 f)
61
+static void QEMU_FLATTEN float16_unpack_raw(FloatParts64 *p, float16 f)
62
{
63
unpack_raw64(p, &float16_params, f);
64
}
65
66
-static inline void bfloat16_unpack_raw(FloatParts64 *p, bfloat16 f)
67
+static void QEMU_FLATTEN bfloat16_unpack_raw(FloatParts64 *p, bfloat16 f)
68
{
69
unpack_raw64(p, &bfloat16_params, f);
70
}
71
72
-static inline void float32_unpack_raw(FloatParts64 *p, float32 f)
73
+static void QEMU_FLATTEN float32_unpack_raw(FloatParts64 *p, float32 f)
74
{
75
unpack_raw64(p, &float32_params, f);
76
}
77
78
-static inline void float64_unpack_raw(FloatParts64 *p, float64 f)
79
+static void QEMU_FLATTEN float64_unpack_raw(FloatParts64 *p, float64 f)
80
{
81
unpack_raw64(p, &float64_params, f);
82
}
83
84
-static void floatx80_unpack_raw(FloatParts128 *p, floatx80 f)
85
+static void QEMU_FLATTEN floatx80_unpack_raw(FloatParts128 *p, floatx80 f)
86
{
87
*p = (FloatParts128) {
88
.cls = float_class_unclassified,
89
@@ -XXX,XX +XXX,XX @@ static void floatx80_unpack_raw(FloatParts128 *p, floatx80 f)
90
};
91
}
92
93
-static void float128_unpack_raw(FloatParts128 *p, float128 f)
94
+static void QEMU_FLATTEN float128_unpack_raw(FloatParts128 *p, float128 f)
95
{
96
const int f_size = float128_params.frac_size - 64;
97
const int e_size = float128_params.exp_size;
98
@@ -XXX,XX +XXX,XX @@ static uint64_t pack_raw64(const FloatParts64 *p, const FloatFmt *fmt)
99
return ret;
100
}
101
102
-static inline float16 float16_pack_raw(const FloatParts64 *p)
103
+static float16 QEMU_FLATTEN float16_pack_raw(const FloatParts64 *p)
104
{
105
return make_float16(pack_raw64(p, &float16_params));
106
}
107
108
-static inline bfloat16 bfloat16_pack_raw(const FloatParts64 *p)
109
+static bfloat16 QEMU_FLATTEN bfloat16_pack_raw(const FloatParts64 *p)
110
{
111
return pack_raw64(p, &bfloat16_params);
112
}
113
114
-static inline float32 float32_pack_raw(const FloatParts64 *p)
115
+static float32 QEMU_FLATTEN float32_pack_raw(const FloatParts64 *p)
116
{
117
return make_float32(pack_raw64(p, &float32_params));
118
}
119
120
-static inline float64 float64_pack_raw(const FloatParts64 *p)
121
+static float64 QEMU_FLATTEN float64_pack_raw(const FloatParts64 *p)
122
{
123
return make_float64(pack_raw64(p, &float64_params));
124
}
125
126
-static float128 float128_pack_raw(const FloatParts128 *p)
127
+static float128 QEMU_FLATTEN float128_pack_raw(const FloatParts128 *p)
128
{
129
const int f_size = float128_params.frac_size - 64;
130
const int e_size = float128_params.exp_size;
131
--
132
2.34.1
133
134
diff view generated by jsdifflib
Deleted patch
1
This is a perfectly natural occurrence for x86 "rep movb",
2
where the "rep" prefix forms a counted loop of the one insn.
3
1
4
During the tests/tcg/multiarch/memory test, this logging is
5
triggered over 350000 times. Within the context of cross-i386-tci
6
build, which is already slow by nature, the logging is sufficient
7
to push the test into timeout.
8
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
tests/plugin/insn.c | 9 +--------
12
tests/tcg/i386/Makefile.softmmu-target | 9 ---------
13
tests/tcg/i386/Makefile.target | 6 ------
14
tests/tcg/x86_64/Makefile.softmmu-target | 9 ---------
15
4 files changed, 1 insertion(+), 32 deletions(-)
16
17
diff --git a/tests/plugin/insn.c b/tests/plugin/insn.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/tests/plugin/insn.c
20
+++ b/tests/plugin/insn.c
21
@@ -XXX,XX +XXX,XX @@ QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION;
22
#define MAX_CPUS 8 /* lets not go nuts */
23
24
typedef struct {
25
- uint64_t last_pc;
26
uint64_t insn_count;
27
} InstructionCount;
28
29
@@ -XXX,XX +XXX,XX @@ static void vcpu_insn_exec_before(unsigned int cpu_index, void *udata)
30
{
31
unsigned int i = cpu_index % MAX_CPUS;
32
InstructionCount *c = &counts[i];
33
- uint64_t this_pc = GPOINTER_TO_UINT(udata);
34
- if (this_pc == c->last_pc) {
35
- g_autofree gchar *out = g_strdup_printf("detected repeat execution @ 0x%"
36
- PRIx64 "\n", this_pc);
37
- qemu_plugin_outs(out);
38
- }
39
- c->last_pc = this_pc;
40
+
41
c->insn_count++;
42
}
43
44
diff --git a/tests/tcg/i386/Makefile.softmmu-target b/tests/tcg/i386/Makefile.softmmu-target
45
index XXXXXXX..XXXXXXX 100644
46
--- a/tests/tcg/i386/Makefile.softmmu-target
47
+++ b/tests/tcg/i386/Makefile.softmmu-target
48
@@ -XXX,XX +XXX,XX @@ EXTRA_RUNS+=$(MULTIARCH_RUNS)
49
50
memory: CFLAGS+=-DCHECK_UNALIGNED=1
51
52
-# non-inline runs will trigger the duplicate instruction heuristics in libinsn.so
53
-run-plugin-%-with-libinsn.so:
54
-    $(call run-test, $@, \
55
-     $(QEMU) -monitor none -display none \
56
-         -chardev file$(COMMA)path=$@.out$(COMMA)id=output \
57
- -plugin ../../plugin/libinsn.so$(COMMA)inline=on \
58
-          -d plugin -D $*-with-libinsn.so.pout \
59
-         $(QEMU_OPTS) $*)
60
-
61
# Running
62
QEMU_OPTS+=-device isa-debugcon,chardev=output -device isa-debug-exit,iobase=0xf4,iosize=0x4 -kernel
63
diff --git a/tests/tcg/i386/Makefile.target b/tests/tcg/i386/Makefile.target
64
index XXXXXXX..XXXXXXX 100644
65
--- a/tests/tcg/i386/Makefile.target
66
+++ b/tests/tcg/i386/Makefile.target
67
@@ -XXX,XX +XXX,XX @@ else
68
SKIP_I386_TESTS+=test-i386-fprem
69
endif
70
71
-# non-inline runs will trigger the duplicate instruction heuristics in libinsn.so
72
-run-plugin-%-with-libinsn.so:
73
-    $(call run-test, $@, $(QEMU) $(QEMU_OPTS) \
74
-     -plugin ../../plugin/libinsn.so$(COMMA)inline=on \
75
-     -d plugin -D $*-with-libinsn.so.pout $*)
76
-
77
# Update TESTS
78
I386_TESTS:=$(filter-out $(SKIP_I386_TESTS), $(ALL_X86_TESTS))
79
TESTS=$(MULTIARCH_TESTS) $(I386_TESTS)
80
diff --git a/tests/tcg/x86_64/Makefile.softmmu-target b/tests/tcg/x86_64/Makefile.softmmu-target
81
index XXXXXXX..XXXXXXX 100644
82
--- a/tests/tcg/x86_64/Makefile.softmmu-target
83
+++ b/tests/tcg/x86_64/Makefile.softmmu-target
84
@@ -XXX,XX +XXX,XX @@ EXTRA_RUNS+=$(MULTIARCH_RUNS)
85
86
memory: CFLAGS+=-DCHECK_UNALIGNED=1
87
88
-# non-inline runs will trigger the duplicate instruction heuristics in libinsn.so
89
-run-plugin-%-with-libinsn.so:
90
-    $(call run-test, $@, \
91
-     $(QEMU) -monitor none -display none \
92
-         -chardev file$(COMMA)path=$@.out$(COMMA)id=output \
93
- -plugin ../../plugin/libinsn.so$(COMMA)inline=on \
94
-          -d plugin -D $*-with-libinsn.so.pout \
95
-         $(QEMU_OPTS) $*)
96
-
97
# Running
98
QEMU_OPTS+=-device isa-debugcon,chardev=output -device isa-debug-exit,iobase=0xf4,iosize=0x4 -kernel
99
--
100
2.34.1
diff view generated by jsdifflib
Deleted patch
1
From: Fei Wu <fei2.wu@intel.com>
2
1
3
TBStats will be introduced to replace CONFIG_PROFILER totally, here
4
remove all CONFIG_PROFILER related stuffs first.
5
6
Signed-off-by: Vanderson M. do Rosario <vandersonmr2@gmail.com>
7
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
8
Signed-off-by: Fei Wu <fei2.wu@intel.com>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-Id: <20230607122411.3394702-2-fei2.wu@intel.com>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
13
meson.build | 2 -
14
qapi/machine.json | 18 ---
15
include/qemu/timer.h | 9 --
16
include/tcg/tcg.h | 26 -----
17
accel/tcg/monitor.c | 31 -----
18
accel/tcg/tcg-accel-ops.c | 10 --
19
accel/tcg/translate-all.c | 33 ------
20
softmmu/runstate.c | 9 --
21
tcg/tcg.c | 214 ----------------------------------
22
tests/qtest/qmp-cmd-test.c | 3 -
23
hmp-commands-info.hx | 15 ---
24
meson_options.txt | 2 -
25
scripts/meson-buildoptions.sh | 3 -
26
13 files changed, 375 deletions(-)
27
28
diff --git a/meson.build b/meson.build
29
index XXXXXXX..XXXXXXX 100644
30
--- a/meson.build
31
+++ b/meson.build
32
@@ -XXX,XX +XXX,XX @@ if numa.found()
33
dependencies: numa))
34
endif
35
config_host_data.set('CONFIG_OPENGL', opengl.found())
36
-config_host_data.set('CONFIG_PROFILER', get_option('profiler'))
37
config_host_data.set('CONFIG_RBD', rbd.found())
38
config_host_data.set('CONFIG_RDMA', rdma.found())
39
config_host_data.set('CONFIG_SAFESTACK', get_option('safe_stack'))
40
@@ -XXX,XX +XXX,XX @@ if 'objc' in all_languages
41
summary_info += {'QEMU_OBJCFLAGS': ' '.join(qemu_common_flags)}
42
endif
43
summary_info += {'QEMU_LDFLAGS': ' '.join(qemu_ldflags)}
44
-summary_info += {'profiler': get_option('profiler')}
45
summary_info += {'link-time optimization (LTO)': get_option('b_lto')}
46
summary_info += {'PIE': get_option('b_pie')}
47
summary_info += {'static build': get_option('prefer_static')}
48
diff --git a/qapi/machine.json b/qapi/machine.json
49
index XXXXXXX..XXXXXXX 100644
50
--- a/qapi/machine.json
51
+++ b/qapi/machine.json
52
@@ -XXX,XX +XXX,XX @@
53
'if': 'CONFIG_TCG',
54
'features': [ 'unstable' ] }
55
56
-##
57
-# @x-query-profile:
58
-#
59
-# Query TCG profiling information
60
-#
61
-# Features:
62
-#
63
-# @unstable: This command is meant for debugging.
64
-#
65
-# Returns: profile information
66
-#
67
-# Since: 6.2
68
-##
69
-{ 'command': 'x-query-profile',
70
- 'returns': 'HumanReadableText',
71
- 'if': 'CONFIG_TCG',
72
- 'features': [ 'unstable' ] }
73
-
74
##
75
# @x-query-ramblock:
76
#
77
diff --git a/include/qemu/timer.h b/include/qemu/timer.h
78
index XXXXXXX..XXXXXXX 100644
79
--- a/include/qemu/timer.h
80
+++ b/include/qemu/timer.h
81
@@ -XXX,XX +XXX,XX @@ static inline int64_t cpu_get_host_ticks(void)
82
}
83
#endif
84
85
-#ifdef CONFIG_PROFILER
86
-static inline int64_t profile_getclock(void)
87
-{
88
- return get_clock();
89
-}
90
-
91
-extern int64_t dev_time;
92
-#endif
93
-
94
#endif
95
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
96
index XXXXXXX..XXXXXXX 100644
97
--- a/include/tcg/tcg.h
98
+++ b/include/tcg/tcg.h
99
@@ -XXX,XX +XXX,XX @@ static inline TCGRegSet output_pref(const TCGOp *op, unsigned i)
100
return i < ARRAY_SIZE(op->output_pref) ? op->output_pref[i] : 0;
101
}
102
103
-typedef struct TCGProfile {
104
- int64_t cpu_exec_time;
105
- int64_t tb_count1;
106
- int64_t tb_count;
107
- int64_t op_count; /* total insn count */
108
- int op_count_max; /* max insn per TB */
109
- int temp_count_max;
110
- int64_t temp_count;
111
- int64_t del_op_count;
112
- int64_t code_in_len;
113
- int64_t code_out_len;
114
- int64_t search_out_len;
115
- int64_t interm_time;
116
- int64_t code_time;
117
- int64_t la_time;
118
- int64_t opt_time;
119
- int64_t restore_count;
120
- int64_t restore_time;
121
- int64_t table_op_count[NB_OPS];
122
-} TCGProfile;
123
-
124
struct TCGContext {
125
uint8_t *pool_cur, *pool_end;
126
TCGPool *pool_first, *pool_current, *pool_first_large;
127
@@ -XXX,XX +XXX,XX @@ struct TCGContext {
128
tcg_insn_unit *code_buf; /* pointer for start of tb */
129
tcg_insn_unit *code_ptr; /* pointer for running end of tb */
130
131
-#ifdef CONFIG_PROFILER
132
- TCGProfile prof;
133
-#endif
134
-
135
#ifdef CONFIG_DEBUG_TCG
136
int goto_tb_issue_mask;
137
const TCGOpcode *vecop_list;
138
@@ -XXX,XX +XXX,XX @@ static inline TCGv_ptr tcg_temp_new_ptr(void)
139
return temp_tcgv_ptr(t);
140
}
141
142
-int64_t tcg_cpu_exec_time(void);
143
void tcg_dump_info(GString *buf);
144
void tcg_dump_op_count(GString *buf);
145
146
diff --git a/accel/tcg/monitor.c b/accel/tcg/monitor.c
147
index XXXXXXX..XXXXXXX 100644
148
--- a/accel/tcg/monitor.c
149
+++ b/accel/tcg/monitor.c
150
@@ -XXX,XX +XXX,XX @@ HumanReadableText *qmp_x_query_opcount(Error **errp)
151
return human_readable_text_from_str(buf);
152
}
153
154
-#ifdef CONFIG_PROFILER
155
-
156
-int64_t dev_time;
157
-
158
-HumanReadableText *qmp_x_query_profile(Error **errp)
159
-{
160
- g_autoptr(GString) buf = g_string_new("");
161
- static int64_t last_cpu_exec_time;
162
- int64_t cpu_exec_time;
163
- int64_t delta;
164
-
165
- cpu_exec_time = tcg_cpu_exec_time();
166
- delta = cpu_exec_time - last_cpu_exec_time;
167
-
168
- g_string_append_printf(buf, "async time %" PRId64 " (%0.3f)\n",
169
- dev_time, dev_time / (double)NANOSECONDS_PER_SECOND);
170
- g_string_append_printf(buf, "qemu time %" PRId64 " (%0.3f)\n",
171
- delta, delta / (double)NANOSECONDS_PER_SECOND);
172
- last_cpu_exec_time = cpu_exec_time;
173
- dev_time = 0;
174
-
175
- return human_readable_text_from_str(buf);
176
-}
177
-#else
178
-HumanReadableText *qmp_x_query_profile(Error **errp)
179
-{
180
- error_setg(errp, "Internal profiler not compiled");
181
- return NULL;
182
-}
183
-#endif
184
-
185
static void hmp_tcg_register(void)
186
{
187
monitor_register_hmp_info_hrt("jit", qmp_x_query_jit);
188
diff --git a/accel/tcg/tcg-accel-ops.c b/accel/tcg/tcg-accel-ops.c
189
index XXXXXXX..XXXXXXX 100644
190
--- a/accel/tcg/tcg-accel-ops.c
191
+++ b/accel/tcg/tcg-accel-ops.c
192
@@ -XXX,XX +XXX,XX @@ void tcg_cpus_destroy(CPUState *cpu)
193
int tcg_cpus_exec(CPUState *cpu)
194
{
195
int ret;
196
-#ifdef CONFIG_PROFILER
197
- int64_t ti;
198
-#endif
199
assert(tcg_enabled());
200
-#ifdef CONFIG_PROFILER
201
- ti = profile_getclock();
202
-#endif
203
cpu_exec_start(cpu);
204
ret = cpu_exec(cpu);
205
cpu_exec_end(cpu);
206
-#ifdef CONFIG_PROFILER
207
- qatomic_set(&tcg_ctx->prof.cpu_exec_time,
208
- tcg_ctx->prof.cpu_exec_time + profile_getclock() - ti);
209
-#endif
210
return ret;
211
}
212
213
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
214
index XXXXXXX..XXXXXXX 100644
215
--- a/accel/tcg/translate-all.c
216
+++ b/accel/tcg/translate-all.c
217
@@ -XXX,XX +XXX,XX @@ void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
218
uintptr_t host_pc)
219
{
220
uint64_t data[TARGET_INSN_START_WORDS];
221
-#ifdef CONFIG_PROFILER
222
- TCGProfile *prof = &tcg_ctx->prof;
223
- int64_t ti = profile_getclock();
224
-#endif
225
int insns_left = cpu_unwind_data_from_tb(tb, host_pc, data);
226
227
if (insns_left < 0) {
228
@@ -XXX,XX +XXX,XX @@ void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
229
}
230
231
cpu->cc->tcg_ops->restore_state_to_opc(cpu, tb, data);
232
-
233
-#ifdef CONFIG_PROFILER
234
- qatomic_set(&prof->restore_time,
235
- prof->restore_time + profile_getclock() - ti);
236
- qatomic_set(&prof->restore_count, prof->restore_count + 1);
237
-#endif
238
}
239
240
bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc)
241
@@ -XXX,XX +XXX,XX @@ static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
242
tcg_ctx->cpu = NULL;
243
*max_insns = tb->icount;
244
245
-#ifdef CONFIG_PROFILER
246
- qatomic_set(&tcg_ctx->prof.tb_count, tcg_ctx->prof.tb_count + 1);
247
- qatomic_set(&tcg_ctx->prof.interm_time,
248
- tcg_ctx->prof.interm_time + profile_getclock() - *ti);
249
- *ti = profile_getclock();
250
-#endif
251
-
252
return tcg_gen_code(tcg_ctx, tb, pc);
253
}
254
255
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
256
tb_page_addr_t phys_pc;
257
tcg_insn_unit *gen_code_buf;
258
int gen_code_size, search_size, max_insns;
259
-#ifdef CONFIG_PROFILER
260
- TCGProfile *prof = &tcg_ctx->prof;
261
-#endif
262
int64_t ti;
263
void *host_pc;
264
265
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
266
267
tb_overflow:
268
269
-#ifdef CONFIG_PROFILER
270
- /* includes aborted translations because of exceptions */
271
- qatomic_set(&prof->tb_count1, prof->tb_count1 + 1);
272
- ti = profile_getclock();
273
-#endif
274
-
275
trace_translate_block(tb, pc, tb->tc.ptr);
276
277
gen_code_size = setjmp_gen_code(env, tb, pc, host_pc, &max_insns, &ti);
278
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
279
*/
280
perf_report_code(pc, tb, tcg_splitwx_to_rx(gen_code_buf));
281
282
-#ifdef CONFIG_PROFILER
283
- qatomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
284
- qatomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
285
- qatomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
286
- qatomic_set(&prof->search_out_len, prof->search_out_len + search_size);
287
-#endif
288
-
289
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
290
qemu_log_in_addr_range(pc)) {
291
FILE *logfile = qemu_log_trylock();
292
diff --git a/softmmu/runstate.c b/softmmu/runstate.c
293
index XXXXXXX..XXXXXXX 100644
294
--- a/softmmu/runstate.c
295
+++ b/softmmu/runstate.c
296
@@ -XXX,XX +XXX,XX @@ static bool main_loop_should_exit(int *status)
297
int qemu_main_loop(void)
298
{
299
int status = EXIT_SUCCESS;
300
-#ifdef CONFIG_PROFILER
301
- int64_t ti;
302
-#endif
303
304
while (!main_loop_should_exit(&status)) {
305
-#ifdef CONFIG_PROFILER
306
- ti = profile_getclock();
307
-#endif
308
main_loop_wait(false);
309
-#ifdef CONFIG_PROFILER
310
- dev_time += profile_getclock() - ti;
311
-#endif
312
}
313
314
return status;
315
diff --git a/tcg/tcg.c b/tcg/tcg.c
316
index XXXXXXX..XXXXXXX 100644
317
--- a/tcg/tcg.c
318
+++ b/tcg/tcg.c
319
@@ -XXX,XX +XXX,XX @@ void tcg_op_remove(TCGContext *s, TCGOp *op)
320
QTAILQ_REMOVE(&s->ops, op, link);
321
QTAILQ_INSERT_TAIL(&s->free_ops, op, link);
322
s->nb_ops--;
323
-
324
-#ifdef CONFIG_PROFILER
325
- qatomic_set(&s->prof.del_op_count, s->prof.del_op_count + 1);
326
-#endif
327
}
328
329
void tcg_remove_ops_after(TCGOp *op)
330
@@ -XXX,XX +XXX,XX @@ static void tcg_out_st_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst,
331
tcg_out_helper_load_common_args(s, ldst, parm, info, next_arg);
332
}
333
334
-#ifdef CONFIG_PROFILER
335
-
336
-/* avoid copy/paste errors */
337
-#define PROF_ADD(to, from, field) \
338
- do { \
339
- (to)->field += qatomic_read(&((from)->field)); \
340
- } while (0)
341
-
342
-#define PROF_MAX(to, from, field) \
343
- do { \
344
- typeof((from)->field) val__ = qatomic_read(&((from)->field)); \
345
- if (val__ > (to)->field) { \
346
- (to)->field = val__; \
347
- } \
348
- } while (0)
349
-
350
-/* Pass in a zero'ed @prof */
351
-static inline
352
-void tcg_profile_snapshot(TCGProfile *prof, bool counters, bool table)
353
-{
354
- unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
355
- unsigned int i;
356
-
357
- for (i = 0; i < n_ctxs; i++) {
358
- TCGContext *s = qatomic_read(&tcg_ctxs[i]);
359
- const TCGProfile *orig = &s->prof;
360
-
361
- if (counters) {
362
- PROF_ADD(prof, orig, cpu_exec_time);
363
- PROF_ADD(prof, orig, tb_count1);
364
- PROF_ADD(prof, orig, tb_count);
365
- PROF_ADD(prof, orig, op_count);
366
- PROF_MAX(prof, orig, op_count_max);
367
- PROF_ADD(prof, orig, temp_count);
368
- PROF_MAX(prof, orig, temp_count_max);
369
- PROF_ADD(prof, orig, del_op_count);
370
- PROF_ADD(prof, orig, code_in_len);
371
- PROF_ADD(prof, orig, code_out_len);
372
- PROF_ADD(prof, orig, search_out_len);
373
- PROF_ADD(prof, orig, interm_time);
374
- PROF_ADD(prof, orig, code_time);
375
- PROF_ADD(prof, orig, la_time);
376
- PROF_ADD(prof, orig, opt_time);
377
- PROF_ADD(prof, orig, restore_count);
378
- PROF_ADD(prof, orig, restore_time);
379
- }
380
- if (table) {
381
- int i;
382
-
383
- for (i = 0; i < NB_OPS; i++) {
384
- PROF_ADD(prof, orig, table_op_count[i]);
385
- }
386
- }
387
- }
388
-}
389
-
390
-#undef PROF_ADD
391
-#undef PROF_MAX
392
-
393
-static void tcg_profile_snapshot_counters(TCGProfile *prof)
394
-{
395
- tcg_profile_snapshot(prof, true, false);
396
-}
397
-
398
-static void tcg_profile_snapshot_table(TCGProfile *prof)
399
-{
400
- tcg_profile_snapshot(prof, false, true);
401
-}
402
-
403
-void tcg_dump_op_count(GString *buf)
404
-{
405
- TCGProfile prof = {};
406
- int i;
407
-
408
- tcg_profile_snapshot_table(&prof);
409
- for (i = 0; i < NB_OPS; i++) {
410
- g_string_append_printf(buf, "%s %" PRId64 "\n", tcg_op_defs[i].name,
411
- prof.table_op_count[i]);
412
- }
413
-}
414
-
415
-int64_t tcg_cpu_exec_time(void)
416
-{
417
- unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
418
- unsigned int i;
419
- int64_t ret = 0;
420
-
421
- for (i = 0; i < n_ctxs; i++) {
422
- const TCGContext *s = qatomic_read(&tcg_ctxs[i]);
423
- const TCGProfile *prof = &s->prof;
424
-
425
- ret += qatomic_read(&prof->cpu_exec_time);
426
- }
427
- return ret;
428
-}
429
-#else
430
void tcg_dump_op_count(GString *buf)
431
{
432
g_string_append_printf(buf, "[TCG profiler not compiled]\n");
433
}
434
435
-int64_t tcg_cpu_exec_time(void)
436
-{
437
- error_report("%s: TCG profiler not compiled", __func__);
438
- exit(EXIT_FAILURE);
439
-}
440
-#endif
441
-
442
-
443
int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
444
{
445
-#ifdef CONFIG_PROFILER
446
- TCGProfile *prof = &s->prof;
447
-#endif
448
int i, start_words, num_insns;
449
TCGOp *op;
450
451
-#ifdef CONFIG_PROFILER
452
- {
453
- int n = 0;
454
-
455
- QTAILQ_FOREACH(op, &s->ops, link) {
456
- n++;
457
- }
458
- qatomic_set(&prof->op_count, prof->op_count + n);
459
- if (n > prof->op_count_max) {
460
- qatomic_set(&prof->op_count_max, n);
461
- }
462
-
463
- n = s->nb_temps;
464
- qatomic_set(&prof->temp_count, prof->temp_count + n);
465
- if (n > prof->temp_count_max) {
466
- qatomic_set(&prof->temp_count_max, n);
467
- }
468
- }
469
-#endif
470
-
471
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)
472
&& qemu_log_in_addr_range(pc_start))) {
473
FILE *logfile = qemu_log_trylock();
474
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
475
}
476
#endif
477
478
-#ifdef CONFIG_PROFILER
479
- qatomic_set(&prof->opt_time, prof->opt_time - profile_getclock());
480
-#endif
481
-
482
tcg_optimize(s);
483
484
-#ifdef CONFIG_PROFILER
485
- qatomic_set(&prof->opt_time, prof->opt_time + profile_getclock());
486
- qatomic_set(&prof->la_time, prof->la_time - profile_getclock());
487
-#endif
488
-
489
reachable_code_pass(s);
490
liveness_pass_0(s);
491
liveness_pass_1(s);
492
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
493
}
494
}
495
496
-#ifdef CONFIG_PROFILER
497
- qatomic_set(&prof->la_time, prof->la_time + profile_getclock());
498
-#endif
499
-
500
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT)
501
&& qemu_log_in_addr_range(pc_start))) {
502
FILE *logfile = qemu_log_trylock();
503
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
504
QTAILQ_FOREACH(op, &s->ops, link) {
505
TCGOpcode opc = op->opc;
506
507
-#ifdef CONFIG_PROFILER
508
- qatomic_set(&prof->table_op_count[opc], prof->table_op_count[opc] + 1);
509
-#endif
510
-
511
switch (opc) {
512
case INDEX_op_mov_i32:
513
case INDEX_op_mov_i64:
514
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
515
return tcg_current_code_size(s);
516
}
517
518
-#ifdef CONFIG_PROFILER
519
-void tcg_dump_info(GString *buf)
520
-{
521
- TCGProfile prof = {};
522
- const TCGProfile *s;
523
- int64_t tb_count;
524
- int64_t tb_div_count;
525
- int64_t tot;
526
-
527
- tcg_profile_snapshot_counters(&prof);
528
- s = &prof;
529
- tb_count = s->tb_count;
530
- tb_div_count = tb_count ? tb_count : 1;
531
- tot = s->interm_time + s->code_time;
532
-
533
- g_string_append_printf(buf, "JIT cycles %" PRId64
534
- " (%0.3f s at 2.4 GHz)\n",
535
- tot, tot / 2.4e9);
536
- g_string_append_printf(buf, "translated TBs %" PRId64
537
- " (aborted=%" PRId64 " %0.1f%%)\n",
538
- tb_count, s->tb_count1 - tb_count,
539
- (double)(s->tb_count1 - s->tb_count)
540
- / (s->tb_count1 ? s->tb_count1 : 1) * 100.0);
541
- g_string_append_printf(buf, "avg ops/TB %0.1f max=%d\n",
542
- (double)s->op_count / tb_div_count, s->op_count_max);
543
- g_string_append_printf(buf, "deleted ops/TB %0.2f\n",
544
- (double)s->del_op_count / tb_div_count);
545
- g_string_append_printf(buf, "avg temps/TB %0.2f max=%d\n",
546
- (double)s->temp_count / tb_div_count,
547
- s->temp_count_max);
548
- g_string_append_printf(buf, "avg host code/TB %0.1f\n",
549
- (double)s->code_out_len / tb_div_count);
550
- g_string_append_printf(buf, "avg search data/TB %0.1f\n",
551
- (double)s->search_out_len / tb_div_count);
552
-
553
- g_string_append_printf(buf, "cycles/op %0.1f\n",
554
- s->op_count ? (double)tot / s->op_count : 0);
555
- g_string_append_printf(buf, "cycles/in byte %0.1f\n",
556
- s->code_in_len ? (double)tot / s->code_in_len : 0);
557
- g_string_append_printf(buf, "cycles/out byte %0.1f\n",
558
- s->code_out_len ? (double)tot / s->code_out_len : 0);
559
- g_string_append_printf(buf, "cycles/search byte %0.1f\n",
560
- s->search_out_len ?
561
- (double)tot / s->search_out_len : 0);
562
- if (tot == 0) {
563
- tot = 1;
564
- }
565
- g_string_append_printf(buf, " gen_interm time %0.1f%%\n",
566
- (double)s->interm_time / tot * 100.0);
567
- g_string_append_printf(buf, " gen_code time %0.1f%%\n",
568
- (double)s->code_time / tot * 100.0);
569
- g_string_append_printf(buf, "optim./code time %0.1f%%\n",
570
- (double)s->opt_time / (s->code_time ?
571
- s->code_time : 1)
572
- * 100.0);
573
- g_string_append_printf(buf, "liveness/code time %0.1f%%\n",
574
- (double)s->la_time / (s->code_time ?
575
- s->code_time : 1) * 100.0);
576
- g_string_append_printf(buf, "cpu_restore count %" PRId64 "\n",
577
- s->restore_count);
578
- g_string_append_printf(buf, " avg cycles %0.1f\n",
579
- s->restore_count ?
580
- (double)s->restore_time / s->restore_count : 0);
581
-}
582
-#else
583
void tcg_dump_info(GString *buf)
584
{
585
g_string_append_printf(buf, "[TCG profiler not compiled]\n");
586
}
587
-#endif
588
589
#ifdef ELF_HOST_MACHINE
590
/* In order to use this feature, the backend needs to do three things:
591
diff --git a/tests/qtest/qmp-cmd-test.c b/tests/qtest/qmp-cmd-test.c
592
index XXXXXXX..XXXXXXX 100644
593
--- a/tests/qtest/qmp-cmd-test.c
594
+++ b/tests/qtest/qmp-cmd-test.c
595
@@ -XXX,XX +XXX,XX @@ static int query_error_class(const char *cmd)
596
{ "query-balloon", ERROR_CLASS_DEVICE_NOT_ACTIVE },
597
{ "query-hotpluggable-cpus", ERROR_CLASS_GENERIC_ERROR },
598
{ "query-vm-generation-id", ERROR_CLASS_GENERIC_ERROR },
599
-#ifndef CONFIG_PROFILER
600
- { "x-query-profile", ERROR_CLASS_GENERIC_ERROR },
601
-#endif
602
/* Only valid with a USB bus added */
603
{ "x-query-usb", ERROR_CLASS_GENERIC_ERROR },
604
/* Only valid with accel=tcg */
605
diff --git a/hmp-commands-info.hx b/hmp-commands-info.hx
606
index XXXXXXX..XXXXXXX 100644
607
--- a/hmp-commands-info.hx
608
+++ b/hmp-commands-info.hx
609
@@ -XXX,XX +XXX,XX @@ SRST
610
Show host USB devices.
611
ERST
612
613
-#if defined(CONFIG_TCG)
614
- {
615
- .name = "profile",
616
- .args_type = "",
617
- .params = "",
618
- .help = "show profiling information",
619
- .cmd_info_hrt = qmp_x_query_profile,
620
- },
621
-#endif
622
-
623
-SRST
624
- ``info profile``
625
- Show profiling information.
626
-ERST
627
-
628
{
629
.name = "capture",
630
.args_type = "",
631
diff --git a/meson_options.txt b/meson_options.txt
632
index XXXXXXX..XXXXXXX 100644
633
--- a/meson_options.txt
634
+++ b/meson_options.txt
635
@@ -XXX,XX +XXX,XX @@ option('qom_cast_debug', type: 'boolean', value: true,
636
option('gprof', type: 'boolean', value: false,
637
description: 'QEMU profiling with gprof',
638
deprecated: true)
639
-option('profiler', type: 'boolean', value: false,
640
- description: 'profiler support')
641
option('slirp_smbd', type : 'feature', value : 'auto',
642
description: 'use smbd (at path --smbd=*) in slirp networking')
643
644
diff --git a/scripts/meson-buildoptions.sh b/scripts/meson-buildoptions.sh
645
index XXXXXXX..XXXXXXX 100644
646
--- a/scripts/meson-buildoptions.sh
647
+++ b/scripts/meson-buildoptions.sh
648
@@ -XXX,XX +XXX,XX @@ meson_options_help() {
649
printf "%s\n" ' jemalloc/system/tcmalloc)'
650
printf "%s\n" ' --enable-module-upgrades try to load modules from alternate paths for'
651
printf "%s\n" ' upgrades'
652
- printf "%s\n" ' --enable-profiler profiler support'
653
printf "%s\n" ' --enable-rng-none dummy RNG, avoid using /dev/(u)random and'
654
printf "%s\n" ' getrandom()'
655
printf "%s\n" ' --enable-safe-stack SafeStack Stack Smash Protection (requires'
656
@@ -XXX,XX +XXX,XX @@ _meson_option_parse() {
657
--with-pkgversion=*) quote_sh "-Dpkgversion=$2" ;;
658
--enable-png) printf "%s" -Dpng=enabled ;;
659
--disable-png) printf "%s" -Dpng=disabled ;;
660
- --enable-profiler) printf "%s" -Dprofiler=true ;;
661
- --disable-profiler) printf "%s" -Dprofiler=false ;;
662
--enable-pvrdma) printf "%s" -Dpvrdma=enabled ;;
663
--disable-pvrdma) printf "%s" -Dpvrdma=disabled ;;
664
--enable-qcow1) printf "%s" -Dqcow1=enabled ;;
665
--
666
2.34.1
667
668
diff view generated by jsdifflib
Deleted patch
1
From: Max Chou <max.chou@sifive.com>
2
1
3
The 5th parameter of tcg_gen_gvec_2s should be replaced by the
4
temporary tmp variable in the tcg_gen_gvec_andcs function.
5
6
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
7
Signed-off-by: Max Chou <max.chou@sifive.com>
8
Message-Id: <20230622161646.32005-9-max.chou@sifive.com>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
tcg/tcg-op-gvec.c | 2 +-
12
1 file changed, 1 insertion(+), 1 deletion(-)
13
14
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/tcg/tcg-op-gvec.c
17
+++ b/tcg/tcg-op-gvec.c
18
@@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_andcs(unsigned vece, uint32_t dofs, uint32_t aofs,
19
20
TCGv_i64 tmp = tcg_temp_ebb_new_i64();
21
tcg_gen_dup_i64(vece, tmp, c);
22
- tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, c, &g);
23
+ tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &g);
24
tcg_temp_free_i64(tmp);
25
}
26
27
--
28
2.34.1
diff view generated by jsdifflib
Deleted patch
1
The microblaze architecture does not reorder instructions.
2
While there is an MBAR wait-for-data-access instruction,
3
this concerns synchronizing with DMA.
4
1
5
This should have been defined when enabling MTTCG.
6
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
Reviewed-by: Edgar E. Iglesias <edgar@zeroasic.com>
9
Fixes: d449561b130 ("configure: microblaze: Enable mttcg")
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
12
target/microblaze/cpu.h | 3 +++
13
1 file changed, 3 insertions(+)
14
15
diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/microblaze/cpu.h
18
+++ b/target/microblaze/cpu.h
19
@@ -XXX,XX +XXX,XX @@
20
#include "exec/cpu-defs.h"
21
#include "qemu/cpu-float.h"
22
23
+/* MicroBlaze is always in-order. */
24
+#define TCG_GUEST_DEFAULT_MO TCG_MO_ALL
25
+
26
typedef struct CPUArchState CPUMBState;
27
#if !defined(CONFIG_USER_ONLY)
28
#include "mmu.h"
29
--
30
2.34.1
31
32
diff view generated by jsdifflib
Deleted patch
1
The virtio devices require proper memory ordering between
2
the vcpus and the iothreads.
3
1
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/tcg-op.c | 14 +++++++++++++-
8
1 file changed, 13 insertions(+), 1 deletion(-)
9
10
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/tcg-op.c
13
+++ b/tcg/tcg-op.c
14
@@ -XXX,XX +XXX,XX @@ void tcg_gen_br(TCGLabel *l)
15
16
void tcg_gen_mb(TCGBar mb_type)
17
{
18
- if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) {
19
+#ifdef CONFIG_USER_ONLY
20
+ bool parallel = tcg_ctx->gen_tb->cflags & CF_PARALLEL;
21
+#else
22
+ /*
23
+ * It is tempting to elide the barrier in a uniprocessor context.
24
+ * However, even with a single cpu we have i/o threads running in
25
+ * parallel, and lack of memory order can result in e.g. virtio
26
+ * queue entries being read incorrectly.
27
+ */
28
+ bool parallel = true;
29
+#endif
30
+
31
+ if (parallel) {
32
tcg_gen_op1(INDEX_op_mb, mb_type);
33
}
34
}
35
--
36
2.34.1
37
38
diff view generated by jsdifflib
Deleted patch
1
Bring the helpers into line with the rest of tcg in respecting
2
guest memory ordering.
3
1
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
accel/tcg/internal.h | 34 ++++++++++++++++++++++++++++++++++
8
accel/tcg/cputlb.c | 10 ++++++++++
9
accel/tcg/user-exec.c | 10 ++++++++++
10
3 files changed, 54 insertions(+)
11
12
diff --git a/accel/tcg/internal.h b/accel/tcg/internal.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/accel/tcg/internal.h
15
+++ b/accel/tcg/internal.h
16
@@ -XXX,XX +XXX,XX @@ extern int64_t max_advance;
17
18
extern bool one_insn_per_tb;
19
20
+/**
21
+ * tcg_req_mo:
22
+ * @type: TCGBar
23
+ *
24
+ * Filter @type to the barrier that is required for the guest
25
+ * memory ordering vs the host memory ordering. A non-zero
26
+ * result indicates that some barrier is required.
27
+ *
28
+ * If TCG_GUEST_DEFAULT_MO is not defined, assume that the
29
+ * guest requires strict ordering.
30
+ *
31
+ * This is a macro so that it's constant even without optimization.
32
+ */
33
+#ifdef TCG_GUEST_DEFAULT_MO
34
+# define tcg_req_mo(type) \
35
+ ((type) & TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO)
36
+#else
37
+# define tcg_req_mo(type) ((type) & ~TCG_TARGET_DEFAULT_MO)
38
+#endif
39
+
40
+/**
41
+ * cpu_req_mo:
42
+ * @type: TCGBar
43
+ *
44
+ * If tcg_req_mo indicates a barrier for @type is required
45
+ * for the guest memory model, issue a host memory barrier.
46
+ */
47
+#define cpu_req_mo(type) \
48
+ do { \
49
+ if (tcg_req_mo(type)) { \
50
+ smp_mb(); \
51
+ } \
52
+ } while (0)
53
+
54
#endif /* ACCEL_TCG_INTERNAL_H */
55
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
56
index XXXXXXX..XXXXXXX 100644
57
--- a/accel/tcg/cputlb.c
58
+++ b/accel/tcg/cputlb.c
59
@@ -XXX,XX +XXX,XX @@ static uint8_t do_ld1_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
60
MMULookupLocals l;
61
bool crosspage;
62
63
+ cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
64
crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
65
tcg_debug_assert(!crosspage);
66
67
@@ -XXX,XX +XXX,XX @@ static uint16_t do_ld2_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
68
uint16_t ret;
69
uint8_t a, b;
70
71
+ cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
72
crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
73
if (likely(!crosspage)) {
74
return do_ld_2(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
75
@@ -XXX,XX +XXX,XX @@ static uint32_t do_ld4_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
76
bool crosspage;
77
uint32_t ret;
78
79
+ cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
80
crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
81
if (likely(!crosspage)) {
82
return do_ld_4(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
83
@@ -XXX,XX +XXX,XX @@ static uint64_t do_ld8_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
84
bool crosspage;
85
uint64_t ret;
86
87
+ cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
88
crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
89
if (likely(!crosspage)) {
90
return do_ld_8(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
91
@@ -XXX,XX +XXX,XX @@ static Int128 do_ld16_mmu(CPUArchState *env, vaddr addr,
92
Int128 ret;
93
int first;
94
95
+ cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
96
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD, &l);
97
if (likely(!crosspage)) {
98
/* Perform the load host endian. */
99
@@ -XXX,XX +XXX,XX @@ void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
100
bool crosspage;
101
102
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
103
+ cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
104
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
105
tcg_debug_assert(!crosspage);
106
107
@@ -XXX,XX +XXX,XX @@ static void do_st2_mmu(CPUArchState *env, vaddr addr, uint16_t val,
108
bool crosspage;
109
uint8_t a, b;
110
111
+ cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
112
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
113
if (likely(!crosspage)) {
114
do_st_2(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
115
@@ -XXX,XX +XXX,XX @@ static void do_st4_mmu(CPUArchState *env, vaddr addr, uint32_t val,
116
MMULookupLocals l;
117
bool crosspage;
118
119
+ cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
120
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
121
if (likely(!crosspage)) {
122
do_st_4(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
123
@@ -XXX,XX +XXX,XX @@ static void do_st8_mmu(CPUArchState *env, vaddr addr, uint64_t val,
124
MMULookupLocals l;
125
bool crosspage;
126
127
+ cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
128
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
129
if (likely(!crosspage)) {
130
do_st_8(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
131
@@ -XXX,XX +XXX,XX @@ static void do_st16_mmu(CPUArchState *env, vaddr addr, Int128 val,
132
uint64_t a, b;
133
int first;
134
135
+ cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
136
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
137
if (likely(!crosspage)) {
138
/* Swap to host endian if necessary, then store. */
139
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
140
index XXXXXXX..XXXXXXX 100644
141
--- a/accel/tcg/user-exec.c
142
+++ b/accel/tcg/user-exec.c
143
@@ -XXX,XX +XXX,XX @@ static uint8_t do_ld1_mmu(CPUArchState *env, abi_ptr addr,
144
uint8_t ret;
145
146
tcg_debug_assert((mop & MO_SIZE) == MO_8);
147
+ cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
148
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
149
ret = ldub_p(haddr);
150
clear_helper_retaddr();
151
@@ -XXX,XX +XXX,XX @@ static uint16_t do_ld2_mmu(CPUArchState *env, abi_ptr addr,
152
uint16_t ret;
153
154
tcg_debug_assert((mop & MO_SIZE) == MO_16);
155
+ cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
156
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
157
ret = load_atom_2(env, ra, haddr, mop);
158
clear_helper_retaddr();
159
@@ -XXX,XX +XXX,XX @@ static uint32_t do_ld4_mmu(CPUArchState *env, abi_ptr addr,
160
uint32_t ret;
161
162
tcg_debug_assert((mop & MO_SIZE) == MO_32);
163
+ cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
164
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
165
ret = load_atom_4(env, ra, haddr, mop);
166
clear_helper_retaddr();
167
@@ -XXX,XX +XXX,XX @@ static uint64_t do_ld8_mmu(CPUArchState *env, abi_ptr addr,
168
uint64_t ret;
169
170
tcg_debug_assert((mop & MO_SIZE) == MO_64);
171
+ cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
172
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
173
ret = load_atom_8(env, ra, haddr, mop);
174
clear_helper_retaddr();
175
@@ -XXX,XX +XXX,XX @@ static Int128 do_ld16_mmu(CPUArchState *env, abi_ptr addr,
176
Int128 ret;
177
178
tcg_debug_assert((mop & MO_SIZE) == MO_128);
179
+ cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
180
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
181
ret = load_atom_16(env, ra, haddr, mop);
182
clear_helper_retaddr();
183
@@ -XXX,XX +XXX,XX @@ static void do_st1_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
184
void *haddr;
185
186
tcg_debug_assert((mop & MO_SIZE) == MO_8);
187
+ cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
188
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
189
stb_p(haddr, val);
190
clear_helper_retaddr();
191
@@ -XXX,XX +XXX,XX @@ static void do_st2_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
192
void *haddr;
193
194
tcg_debug_assert((mop & MO_SIZE) == MO_16);
195
+ cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
196
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
197
198
if (mop & MO_BSWAP) {
199
@@ -XXX,XX +XXX,XX @@ static void do_st4_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
200
void *haddr;
201
202
tcg_debug_assert((mop & MO_SIZE) == MO_32);
203
+ cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
204
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
205
206
if (mop & MO_BSWAP) {
207
@@ -XXX,XX +XXX,XX @@ static void do_st8_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
208
void *haddr;
209
210
tcg_debug_assert((mop & MO_SIZE) == MO_64);
211
+ cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
212
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
213
214
if (mop & MO_BSWAP) {
215
@@ -XXX,XX +XXX,XX @@ static void do_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
216
void *haddr;
217
218
tcg_debug_assert((mop & MO_SIZE) == MO_128);
219
+ cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
220
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
221
222
if (mop & MO_BSWAP) {
223
--
224
2.34.1
225
226
diff view generated by jsdifflib
Deleted patch
1
We now issue host memory barriers to match the guest memory order.
2
Continue to disable MTTCG only if the guest has not been ported.
3
1
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
accel/tcg/tcg-all.c | 39 ++++++++++-----------------------------
8
1 file changed, 10 insertions(+), 29 deletions(-)
9
10
diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/accel/tcg/tcg-all.c
13
+++ b/accel/tcg/tcg-all.c
14
@@ -XXX,XX +XXX,XX @@ DECLARE_INSTANCE_CHECKER(TCGState, TCG_STATE,
15
* they can set the appropriate CONFIG flags in ${target}-softmmu.mak
16
*
17
* Once a guest architecture has been converted to the new primitives
18
- * there are two remaining limitations to check.
19
- *
20
- * - The guest can't be oversized (e.g. 64 bit guest on 32 bit host)
21
- * - The host must have a stronger memory order than the guest
22
- *
23
- * It may be possible in future to support strong guests on weak hosts
24
- * but that will require tagging all load/stores in a guest with their
25
- * implicit memory order requirements which would likely slow things
26
- * down a lot.
27
+ * there is one remaining limitation to check:
28
+ * - The guest can't be oversized (e.g. 64 bit guest on 32 bit host)
29
*/
30
31
-static bool check_tcg_memory_orders_compatible(void)
32
-{
33
-#if defined(TCG_GUEST_DEFAULT_MO) && defined(TCG_TARGET_DEFAULT_MO)
34
- return (TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO) == 0;
35
-#else
36
- return false;
37
-#endif
38
-}
39
-
40
static bool default_mttcg_enabled(void)
41
{
42
if (icount_enabled() || TCG_OVERSIZED_GUEST) {
43
return false;
44
- } else {
45
-#ifdef TARGET_SUPPORTS_MTTCG
46
- return check_tcg_memory_orders_compatible();
47
-#else
48
- return false;
49
-#endif
50
}
51
+#ifdef TARGET_SUPPORTS_MTTCG
52
+# ifndef TCG_GUEST_DEFAULT_MO
53
+# error "TARGET_SUPPORTS_MTTCG without TCG_GUEST_DEFAULT_MO"
54
+# endif
55
+ return true;
56
+#else
57
+ return false;
58
+#endif
59
}
60
61
static void tcg_accel_instance_init(Object *obj)
62
@@ -XXX,XX +XXX,XX @@ static void tcg_set_thread(Object *obj, const char *value, Error **errp)
63
warn_report("Guest not yet converted to MTTCG - "
64
"you may get unexpected results");
65
#endif
66
- if (!check_tcg_memory_orders_compatible()) {
67
- warn_report("Guest expects a stronger memory ordering "
68
- "than the host provides");
69
- error_printf("This may cause strange/hard to debug errors\n");
70
- }
71
s->mttcg_enabled = true;
72
}
73
} else if (strcmp(value, "single") == 0) {
74
--
75
2.34.1
76
77
diff view generated by jsdifflib
Deleted patch
1
We have run out of bits we can use within the CPUTLBEntry comparators,
2
as TLB_FLAGS_MASK cannot overlap alignment.
3
1
4
Store slow_flags[] in CPUTLBEntryFull, and merge with the flags from
5
the comparator. A new TLB_FORCE_SLOW bit is set within the comparator
6
as an indication that the slow path must be used.
7
8
Move TLB_BSWAP to TLB_SLOW_FLAGS_MASK. Since we are out of bits,
9
we cannot create a new bit without moving an old one.
10
11
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
14
include/exec/cpu-all.h | 21 +++++++--
15
include/exec/cpu-defs.h | 6 +++
16
include/hw/core/cpu.h | 1 +
17
accel/tcg/cputlb.c | 98 ++++++++++++++++++++++++-----------------
18
4 files changed, 82 insertions(+), 44 deletions(-)
19
20
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/include/exec/cpu-all.h
23
+++ b/include/exec/cpu-all.h
24
@@ -XXX,XX +XXX,XX @@ CPUArchState *cpu_copy(CPUArchState *env);
25
#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3))
26
/* Set if TLB entry contains a watchpoint. */
27
#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS_MIN - 4))
28
-/* Set if TLB entry requires byte swap. */
29
-#define TLB_BSWAP (1 << (TARGET_PAGE_BITS_MIN - 5))
30
+/* Set if the slow path must be used; more flags in CPUTLBEntryFull. */
31
+#define TLB_FORCE_SLOW (1 << (TARGET_PAGE_BITS_MIN - 5))
32
/* Set if TLB entry writes ignored. */
33
#define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 6))
34
35
-/* Use this mask to check interception with an alignment mask
36
+/*
37
+ * Use this mask to check interception with an alignment mask
38
* in a TCG backend.
39
*/
40
#define TLB_FLAGS_MASK \
41
(TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
42
- | TLB_WATCHPOINT | TLB_BSWAP | TLB_DISCARD_WRITE)
43
+ | TLB_WATCHPOINT | TLB_FORCE_SLOW | TLB_DISCARD_WRITE)
44
+
45
+/*
46
+ * Flags stored in CPUTLBEntryFull.slow_flags[x].
47
+ * TLB_FORCE_SLOW must be set in CPUTLBEntry.addr_idx[x].
48
+ */
49
+/* Set if TLB entry requires byte swap. */
50
+#define TLB_BSWAP (1 << 0)
51
+
52
+#define TLB_SLOW_FLAGS_MASK TLB_BSWAP
53
+
54
+/* The two sets of flags must not overlap. */
55
+QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK);
56
57
/**
58
* tlb_hit_page: return true if page aligned @addr is a hit against the
59
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
60
index XXXXXXX..XXXXXXX 100644
61
--- a/include/exec/cpu-defs.h
62
+++ b/include/exec/cpu-defs.h
63
@@ -XXX,XX +XXX,XX @@ typedef struct CPUTLBEntryFull {
64
/* @lg_page_size contains the log2 of the page size. */
65
uint8_t lg_page_size;
66
67
+ /*
68
+ * Additional tlb flags for use by the slow path. If non-zero,
69
+ * the corresponding CPUTLBEntry comparator must have TLB_FORCE_SLOW.
70
+ */
71
+ uint8_t slow_flags[MMU_ACCESS_COUNT];
72
+
73
/*
74
* Allow target-specific additions to this structure.
75
* This may be used to cache items from the guest cpu
76
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
77
index XXXXXXX..XXXXXXX 100644
78
--- a/include/hw/core/cpu.h
79
+++ b/include/hw/core/cpu.h
80
@@ -XXX,XX +XXX,XX @@ typedef enum MMUAccessType {
81
MMU_DATA_LOAD = 0,
82
MMU_DATA_STORE = 1,
83
MMU_INST_FETCH = 2
84
+#define MMU_ACCESS_COUNT 3
85
} MMUAccessType;
86
87
typedef struct CPUWatchpoint CPUWatchpoint;
88
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
89
index XXXXXXX..XXXXXXX 100644
90
--- a/accel/tcg/cputlb.c
91
+++ b/accel/tcg/cputlb.c
92
@@ -XXX,XX +XXX,XX @@ static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
93
env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
94
}
95
96
+static inline void tlb_set_compare(CPUTLBEntryFull *full, CPUTLBEntry *ent,
97
+ target_ulong address, int flags,
98
+ MMUAccessType access_type, bool enable)
99
+{
100
+ if (enable) {
101
+ address |= flags & TLB_FLAGS_MASK;
102
+ flags &= TLB_SLOW_FLAGS_MASK;
103
+ if (flags) {
104
+ address |= TLB_FORCE_SLOW;
105
+ }
106
+ } else {
107
+ address = -1;
108
+ flags = 0;
109
+ }
110
+ ent->addr_idx[access_type] = address;
111
+ full->slow_flags[access_type] = flags;
112
+}
113
+
114
/*
115
* Add a new TLB entry. At most one entry for a given virtual address
116
* is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
117
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
118
CPUTLB *tlb = env_tlb(env);
119
CPUTLBDesc *desc = &tlb->d[mmu_idx];
120
MemoryRegionSection *section;
121
- unsigned int index;
122
- vaddr address;
123
- vaddr write_address;
124
+ unsigned int index, read_flags, write_flags;
125
uintptr_t addend;
126
CPUTLBEntry *te, tn;
127
hwaddr iotlb, xlat, sz, paddr_page;
128
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
129
" prot=%x idx=%d\n",
130
addr, full->phys_addr, prot, mmu_idx);
131
132
- address = addr_page;
133
+ read_flags = 0;
134
if (full->lg_page_size < TARGET_PAGE_BITS) {
135
/* Repeat the MMU check and TLB fill on every access. */
136
- address |= TLB_INVALID_MASK;
137
+ read_flags |= TLB_INVALID_MASK;
138
}
139
if (full->attrs.byte_swap) {
140
- address |= TLB_BSWAP;
141
+ read_flags |= TLB_BSWAP;
142
}
143
144
is_ram = memory_region_is_ram(section->mr);
145
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
146
addend = 0;
147
}
148
149
- write_address = address;
150
+ write_flags = read_flags;
151
if (is_ram) {
152
iotlb = memory_region_get_ram_addr(section->mr) + xlat;
153
/*
154
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
155
*/
156
if (prot & PAGE_WRITE) {
157
if (section->readonly) {
158
- write_address |= TLB_DISCARD_WRITE;
159
+ write_flags |= TLB_DISCARD_WRITE;
160
} else if (cpu_physical_memory_is_clean(iotlb)) {
161
- write_address |= TLB_NOTDIRTY;
162
+ write_flags |= TLB_NOTDIRTY;
163
}
164
}
165
} else {
166
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
167
* Reads to romd devices go through the ram_ptr found above,
168
* but of course reads to I/O must go through MMIO.
169
*/
170
- write_address |= TLB_MMIO;
171
+ write_flags |= TLB_MMIO;
172
if (!is_romd) {
173
- address = write_address;
174
+ read_flags = write_flags;
175
}
176
}
177
178
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
179
* TARGET_PAGE_BITS, and either
180
* + the ram_addr_t of the page base of the target RAM (RAM)
181
* + the offset within section->mr of the page base (I/O, ROMD)
182
- * We subtract the vaddr_page (which is page aligned and thus won't
183
+ * We subtract addr_page (which is page aligned and thus won't
184
* disturb the low bits) to give an offset which can be added to the
185
* (non-page-aligned) vaddr of the eventual memory access to get
186
* the MemoryRegion offset for the access. Note that the vaddr we
187
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
188
* vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
189
*/
190
desc->fulltlb[index] = *full;
191
- desc->fulltlb[index].xlat_section = iotlb - addr_page;
192
- desc->fulltlb[index].phys_addr = paddr_page;
193
+ full = &desc->fulltlb[index];
194
+ full->xlat_section = iotlb - addr_page;
195
+ full->phys_addr = paddr_page;
196
197
/* Now calculate the new entry */
198
tn.addend = addend - addr_page;
199
- if (prot & PAGE_READ) {
200
- tn.addr_read = address;
201
- if (wp_flags & BP_MEM_READ) {
202
- tn.addr_read |= TLB_WATCHPOINT;
203
- }
204
- } else {
205
- tn.addr_read = -1;
206
- }
207
208
- if (prot & PAGE_EXEC) {
209
- tn.addr_code = address;
210
- } else {
211
- tn.addr_code = -1;
212
- }
213
+ tlb_set_compare(full, &tn, addr_page, read_flags,
214
+ MMU_INST_FETCH, prot & PAGE_EXEC);
215
216
- tn.addr_write = -1;
217
- if (prot & PAGE_WRITE) {
218
- tn.addr_write = write_address;
219
- if (prot & PAGE_WRITE_INV) {
220
- tn.addr_write |= TLB_INVALID_MASK;
221
- }
222
- if (wp_flags & BP_MEM_WRITE) {
223
- tn.addr_write |= TLB_WATCHPOINT;
224
- }
225
+ if (wp_flags & BP_MEM_READ) {
226
+ read_flags |= TLB_WATCHPOINT;
227
}
228
+ tlb_set_compare(full, &tn, addr_page, read_flags,
229
+ MMU_DATA_LOAD, prot & PAGE_READ);
230
+
231
+ if (prot & PAGE_WRITE_INV) {
232
+ write_flags |= TLB_INVALID_MASK;
233
+ }
234
+ if (wp_flags & BP_MEM_WRITE) {
235
+ write_flags |= TLB_WATCHPOINT;
236
+ }
237
+ tlb_set_compare(full, &tn, addr_page, write_flags,
238
+ MMU_DATA_STORE, prot & PAGE_WRITE);
239
240
copy_tlb_helper_locked(te, &tn);
241
tlb_n_used_entries_inc(env, mmu_idx);
242
@@ -XXX,XX +XXX,XX @@ static int probe_access_internal(CPUArchState *env, vaddr addr,
243
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
244
uint64_t tlb_addr = tlb_read_idx(entry, access_type);
245
vaddr page_addr = addr & TARGET_PAGE_MASK;
246
- int flags = TLB_FLAGS_MASK;
247
+ int flags = TLB_FLAGS_MASK & ~TLB_FORCE_SLOW;
248
+ CPUTLBEntryFull *full;
249
250
if (!tlb_hit_page(tlb_addr, page_addr)) {
251
if (!victim_tlb_hit(env, mmu_idx, index, access_type, page_addr)) {
252
@@ -XXX,XX +XXX,XX @@ static int probe_access_internal(CPUArchState *env, vaddr addr,
253
}
254
flags &= tlb_addr;
255
256
- *pfull = &env_tlb(env)->d[mmu_idx].fulltlb[index];
257
+ *pfull = full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
258
+ flags |= full->slow_flags[access_type];
259
260
/* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
261
if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) {
262
@@ -XXX,XX +XXX,XX @@ static bool mmu_lookup1(CPUArchState *env, MMULookupPageData *data,
263
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
264
uint64_t tlb_addr = tlb_read_idx(entry, access_type);
265
bool maybe_resized = false;
266
+ CPUTLBEntryFull *full;
267
+ int flags;
268
269
/* If the TLB entry is for a different page, reload and try again. */
270
if (!tlb_hit(tlb_addr, addr)) {
271
@@ -XXX,XX +XXX,XX @@ static bool mmu_lookup1(CPUArchState *env, MMULookupPageData *data,
272
tlb_addr = tlb_read_idx(entry, access_type) & ~TLB_INVALID_MASK;
273
}
274
275
- data->flags = tlb_addr & TLB_FLAGS_MASK;
276
- data->full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
277
+ full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
278
+ flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW);
279
+ flags |= full->slow_flags[access_type];
280
+
281
+ data->full = full;
282
+ data->flags = flags;
283
/* Compute haddr speculatively; depending on flags it might be invalid. */
284
data->haddr = (void *)((uintptr_t)addr + entry->addend);
285
286
--
287
2.34.1
288
289
diff view generated by jsdifflib
Deleted patch
1
This frees up one bit of the primary tlb flags without
2
impacting the TLB_NOTDIRTY logic.
3
1
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
include/exec/cpu-all.h | 8 ++++----
8
accel/tcg/cputlb.c | 18 ++++++++++++++----
9
2 files changed, 18 insertions(+), 8 deletions(-)
10
11
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/include/exec/cpu-all.h
14
+++ b/include/exec/cpu-all.h
15
@@ -XXX,XX +XXX,XX @@ CPUArchState *cpu_copy(CPUArchState *env);
16
#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS_MIN - 2))
17
/* Set if TLB entry is an IO callback. */
18
#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3))
19
-/* Set if TLB entry contains a watchpoint. */
20
-#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS_MIN - 4))
21
/* Set if the slow path must be used; more flags in CPUTLBEntryFull. */
22
#define TLB_FORCE_SLOW (1 << (TARGET_PAGE_BITS_MIN - 5))
23
/* Set if TLB entry writes ignored. */
24
@@ -XXX,XX +XXX,XX @@ CPUArchState *cpu_copy(CPUArchState *env);
25
*/
26
#define TLB_FLAGS_MASK \
27
(TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
28
- | TLB_WATCHPOINT | TLB_FORCE_SLOW | TLB_DISCARD_WRITE)
29
+ | TLB_FORCE_SLOW | TLB_DISCARD_WRITE)
30
31
/*
32
* Flags stored in CPUTLBEntryFull.slow_flags[x].
33
@@ -XXX,XX +XXX,XX @@ CPUArchState *cpu_copy(CPUArchState *env);
34
*/
35
/* Set if TLB entry requires byte swap. */
36
#define TLB_BSWAP (1 << 0)
37
+/* Set if TLB entry contains a watchpoint. */
38
+#define TLB_WATCHPOINT (1 << 1)
39
40
-#define TLB_SLOW_FLAGS_MASK TLB_BSWAP
41
+#define TLB_SLOW_FLAGS_MASK (TLB_BSWAP | TLB_WATCHPOINT)
42
43
/* The two sets of flags must not overlap. */
44
QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK);
45
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
46
index XXXXXXX..XXXXXXX 100644
47
--- a/accel/tcg/cputlb.c
48
+++ b/accel/tcg/cputlb.c
49
@@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
50
*/
51
goto stop_the_world;
52
}
53
- /* Collect TLB_WATCHPOINT for read. */
54
+ /* Collect tlb flags for read. */
55
tlb_addr |= tlbe->addr_read;
56
57
/* Notice an IO access or a needs-MMU-lookup access */
58
@@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
59
notdirty_write(env_cpu(env), addr, size, full, retaddr);
60
}
61
62
- if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
63
- cpu_check_watchpoint(env_cpu(env), addr, size, full->attrs,
64
- BP_MEM_READ | BP_MEM_WRITE, retaddr);
65
+ if (unlikely(tlb_addr & TLB_FORCE_SLOW)) {
66
+ int wp_flags = 0;
67
+
68
+ if (full->slow_flags[MMU_DATA_STORE] & TLB_WATCHPOINT) {
69
+ wp_flags |= BP_MEM_WRITE;
70
+ }
71
+ if (full->slow_flags[MMU_DATA_LOAD] & TLB_WATCHPOINT) {
72
+ wp_flags |= BP_MEM_READ;
73
+ }
74
+ if (wp_flags) {
75
+ cpu_check_watchpoint(env_cpu(env), addr, size,
76
+ full->attrs, wp_flags, retaddr);
77
+ }
78
}
79
80
return hostaddr;
81
--
82
2.34.1
83
84
diff view generated by jsdifflib
Deleted patch
1
Move to fill a hole in the set of bits.
2
Reduce the total number of tlb bits by 1.
3
1
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
include/exec/cpu-all.h | 4 ++--
8
tcg/tcg-op-ldst.c | 2 +-
9
2 files changed, 3 insertions(+), 3 deletions(-)
10
11
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/include/exec/cpu-all.h
14
+++ b/include/exec/cpu-all.h
15
@@ -XXX,XX +XXX,XX @@ CPUArchState *cpu_copy(CPUArchState *env);
16
#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS_MIN - 2))
17
/* Set if TLB entry is an IO callback. */
18
#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3))
19
+/* Set if TLB entry writes ignored. */
20
+#define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 4))
21
/* Set if the slow path must be used; more flags in CPUTLBEntryFull. */
22
#define TLB_FORCE_SLOW (1 << (TARGET_PAGE_BITS_MIN - 5))
23
-/* Set if TLB entry writes ignored. */
24
-#define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 6))
25
26
/*
27
* Use this mask to check interception with an alignment mask
28
diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/tcg/tcg-op-ldst.c
31
+++ b/tcg/tcg-op-ldst.c
32
@@ -XXX,XX +XXX,XX @@ static void check_max_alignment(unsigned a_bits)
33
* The requested alignment cannot overlap the TLB flags.
34
* FIXME: Must keep the count up-to-date with "exec/cpu-all.h".
35
*/
36
- tcg_debug_assert(a_bits + 6 <= tcg_ctx->page_bits);
37
+ tcg_debug_assert(a_bits + 5 <= tcg_ctx->page_bits);
38
#endif
39
}
40
41
--
42
2.34.1
43
44
diff view generated by jsdifflib